* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_SEC
#include <libcfs/libcfs.h>
-#ifndef __KERNEL__
-#include <liblustre.h>
-#include <libcfs/list.h>
-#else
-#include <linux/crypto.h>
-#endif
#include <obd.h>
#include <obd_cksum.h>
#include "ptlrpc_internal.h"
+static int mult = 20 - PAGE_CACHE_SHIFT;
+static int enc_pool_max_memory_mb;
+CFS_MODULE_PARM(enc_pool_max_memory_mb, "i", int, 0644,
+ "Encoding pool max memory (MB), 1/8 of total physical memory by default");
+
+
/****************************************
* bulk encryption page pools *
****************************************/
-#ifdef __KERNEL__
#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
#define PAGES_PER_POOL (PTRS_PER_PAGE)
unsigned long epp_max_pages; /* maximum pages can hold, const */
unsigned int epp_max_pools; /* number of pools, const */
- /*
- * wait queue in case of not enough free pages.
- */
- cfs_waitq_t epp_waitq; /* waiting threads */
- unsigned int epp_waitqlen; /* wait queue length */
- unsigned long epp_pages_short; /* # of pages wanted of in-q users */
- unsigned int epp_growing:1; /* during adding pages */
+ /*
+ * wait queue in case of not enough free pages.
+ */
+ wait_queue_head_t epp_waitq; /* waiting threads */
+ unsigned int epp_waitqlen; /* wait queue length */
+ unsigned long epp_pages_short; /* # of pages wanted of in-q users */
+ unsigned int epp_growing:1; /* during adding pages */
/*
* indicating how idle the pools are, from 0 to MAX_IDLE_IDX
unsigned long epp_st_lowfree; /* lowest free pages reached */
unsigned int epp_st_max_wqlen; /* highest waitqueue length */
cfs_time_t epp_st_max_wait; /* in jeffies */
+ unsigned long epp_st_outofmem; /* # of out of mem requests */
/*
* pointers to pools
*/
/*
* memory shrinker
*/
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
+static const int pools_shrinker_seeks = DEFAULT_SEEKS;
static struct shrinker *pools_shrinker;
/*
* /proc/fs/lustre/sptlrpc/encrypt_page_pools
*/
-int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
- int *eof, void *data)
+int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
{
int rc;
spin_lock(&page_pools.epp_lock);
- rc = snprintf(page, count,
+ rc = seq_printf(m,
"physical pages: %lu\n"
"pages per pool: %lu\n"
"max pages: %lu\n"
"cache missing: %lu\n"
"low free mark: %lu\n"
"max waitqueue depth: %u\n"
- "max wait time: "CFS_TIME_T"/%u\n"
+ "max wait time: "CFS_TIME_T"/%lu\n"
+ "out of mem: %lu\n"
,
- num_physpages,
+ totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
page_pools.epp_st_missings,
page_pools.epp_st_lowfree,
page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ
+ page_pools.epp_st_max_wait,
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem
);
spin_unlock(&page_pools.epp_lock);
p_idx++;
g_idx = 0;
}
- };
+ }
/* free unused pools */
while (p_idx_max1 < p_idx_max2) {
}
/*
- * could be called frequently for query (@nr_to_scan == 0).
* we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
*/
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+ struct shrink_control *sc)
{
- if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+ /*
+ * if no pool access for a long time, we consider it's fully idle.
+ * a little race here is fine.
+ */
+ if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+ CACHE_QUIESCENT_PERIOD)) {
spin_lock(&page_pools.epp_lock);
- shrink_param(sc, nr_to_scan) = min_t(unsigned long,
- shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages -
- PTLRPC_MAX_BRW_PAGES);
- if (shrink_param(sc, nr_to_scan) > 0) {
- enc_pools_release_free_pages(shrink_param(sc,
- nr_to_scan));
- CDEBUG(D_SEC, "released %ld pages, %ld left\n",
- (long)shrink_param(sc, nr_to_scan),
- page_pools.epp_free_pages);
-
- page_pools.epp_st_shrinks++;
- page_pools.epp_last_shrink = cfs_time_current_sec();
- }
+ page_pools.epp_idle_idx = IDLE_IDX_MAX;
spin_unlock(&page_pools.epp_lock);
}
+ LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+ return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+ (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+ struct shrink_control *sc)
+{
+ spin_lock(&page_pools.epp_lock);
+ sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+ page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+ if (sc->nr_to_scan > 0) {
+ enc_pools_release_free_pages(sc->nr_to_scan);
+ CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+ (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+ page_pools.epp_st_shrinks++;
+ page_pools.epp_last_shrink = cfs_time_current_sec();
+ }
+ spin_unlock(&page_pools.epp_lock);
+
/*
* if no pool access for a long time, we consider it's fully idle.
* a little race here is fine.
}
LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
- return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
- (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+ return sc->nr_to_scan;
}
+#ifndef HAVE_SHRINKER_COUNT
+/*
+ * could be called frequently for query (@nr_to_scan == 0).
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+{
+ struct shrink_control scv = {
+ .nr_to_scan = shrink_param(sc, nr_to_scan),
+ .gfp_mask = shrink_param(sc, gfp_mask)
+ };
+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
+ struct shrinker* shrinker = NULL;
+#endif
+
+ enc_pools_shrink_scan(shrinker, &scv);
+
+ return enc_pools_shrink_count(shrinker, &scv);
+}
+
+#endif /* HAVE_SHRINKER_COUNT */
+
static inline
int npages_to_npools(unsigned long npages)
{
goto out_pools;
for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
- pools[i][j] = alloc_page(__GFP_IO |
- __GFP_HIGHMEM);
+ pools[i][j] = alloc_page(GFP_NOFS |
+ __GFP_HIGHMEM);
if (pools[i][j] == NULL)
goto out_pools;
static inline void enc_pools_wakeup(void)
{
- LASSERT(spin_is_locked(&page_pools.epp_lock));
- LASSERT(page_pools.epp_waitqlen >= 0);
+ assert_spin_locked(&page_pools.epp_lock);
if (unlikely(page_pools.epp_waitqlen)) {
- LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
- cfs_waitq_broadcast(&page_pools.epp_waitq);
+ LASSERT(waitqueue_active(&page_pools.epp_waitq));
+ wake_up_all(&page_pools.epp_waitq);
}
}
}
/*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+ return page_pools.epp_free_pages;
+}
+EXPORT_SYMBOL(get_free_pages_in_pool);
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+ return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+EXPORT_SYMBOL(pool_is_at_full_capacity);
+
+/*
* we allocate the requested pages atomically.
*/
int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
{
- cfs_waitlink_t waitlink;
- unsigned long this_idle = -1;
- cfs_time_t tick = 0;
- long now;
- int p_idx, g_idx;
- int i;
-
- LASSERT(desc->bd_iov_count > 0);
- LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
-
- /* resent bulk, enc iov might have been allocated previously */
- if (desc->bd_enc_iov != NULL)
- return 0;
-
- OBD_ALLOC(desc->bd_enc_iov,
- desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
- if (desc->bd_enc_iov == NULL)
- return -ENOMEM;
+ wait_queue_t waitlink;
+ unsigned long this_idle = -1;
+ cfs_time_t tick = 0;
+ long now;
+ int p_idx, g_idx;
+ int i;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
+ LASSERT(desc->bd_iov_count > 0);
+ LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
+
+ /* resent bulk, enc iov might have been allocated previously */
+ if (GET_ENC_KIOV(desc) != NULL)
+ return 0;
+
+ OBD_ALLOC(GET_ENC_KIOV(desc),
+ desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
+ if (GET_ENC_KIOV(desc) == NULL)
+ return -ENOMEM;
spin_lock(&page_pools.epp_lock);
- page_pools.epp_st_access++;
+ page_pools.epp_st_access++;
again:
- if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
- if (tick == 0)
- tick = cfs_time_current();
+ if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
+ if (tick == 0)
+ tick = cfs_time_current();
- now = cfs_time_current_sec();
+ now = cfs_time_current_sec();
- page_pools.epp_st_missings++;
- page_pools.epp_pages_short += desc->bd_iov_count;
+ page_pools.epp_st_missings++;
+ page_pools.epp_pages_short += desc->bd_iov_count;
- if (enc_pools_should_grow(desc->bd_iov_count, now)) {
- page_pools.epp_growing = 1;
+ if (enc_pools_should_grow(desc->bd_iov_count, now)) {
+ page_pools.epp_growing = 1;
spin_unlock(&page_pools.epp_lock);
enc_pools_add_pages(page_pools.epp_pages_short / 2);
spin_lock(&page_pools.epp_lock);
- page_pools.epp_growing = 0;
-
- enc_pools_wakeup();
- } else {
- if (++page_pools.epp_waitqlen >
- page_pools.epp_st_max_wqlen)
- page_pools.epp_st_max_wqlen =
- page_pools.epp_waitqlen;
-
- cfs_set_current_state(CFS_TASK_UNINT);
- cfs_waitlink_init(&waitlink);
- cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
-
- spin_unlock(&page_pools.epp_lock);
- cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
- cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
- LASSERT(page_pools.epp_waitqlen > 0);
- spin_lock(&page_pools.epp_lock);
- page_pools.epp_waitqlen--;
- }
+ page_pools.epp_growing = 0;
+
+ enc_pools_wakeup();
+ } else {
+ if (page_pools.epp_growing) {
+ if (++page_pools.epp_waitqlen >
+ page_pools.epp_st_max_wqlen)
+ page_pools.epp_st_max_wqlen =
+ page_pools.epp_waitqlen;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ init_waitqueue_entry(&waitlink, current);
+ add_wait_queue(&page_pools.epp_waitq,
+ &waitlink);
+
+ spin_unlock(&page_pools.epp_lock);
+ schedule();
+ remove_wait_queue(&page_pools.epp_waitq,
+ &waitlink);
+ LASSERT(page_pools.epp_waitqlen > 0);
+ spin_lock(&page_pools.epp_lock);
+ page_pools.epp_waitqlen--;
+ } else {
+ /* ptlrpcd thread should not sleep in that case,
+ * or deadlock may occur!
+ * Instead, return -ENOMEM so that upper layers
+ * will put request back in queue. */
+ page_pools.epp_st_outofmem++;
+ spin_unlock(&page_pools.epp_lock);
+ OBD_FREE(GET_ENC_KIOV(desc),
+ desc->bd_iov_count *
+ sizeof(*GET_ENC_KIOV(desc)));
+ GET_ENC_KIOV(desc) = NULL;
+ return -ENOMEM;
+ }
+ }
- LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
- page_pools.epp_pages_short -= desc->bd_iov_count;
+ LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
+ page_pools.epp_pages_short -= desc->bd_iov_count;
- this_idle = 0;
- goto again;
- }
+ this_idle = 0;
+ goto again;
+ }
/* record max wait time */
if (unlikely(tick != 0)) {
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
- desc->bd_enc_iov[i].kiov_page =
- page_pools.epp_pools[p_idx][g_idx];
- page_pools.epp_pools[p_idx][g_idx] = NULL;
-
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+ BD_GET_ENC_KIOV(desc, i).kiov_page =
+ page_pools.epp_pools[p_idx][g_idx];
+ page_pools.epp_pools[p_idx][g_idx] = NULL;
+
+ if (++g_idx == PAGES_PER_POOL) {
+ p_idx++;
+ g_idx = 0;
+ }
+ }
if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
page_pools.epp_st_lowfree = page_pools.epp_free_pages;
void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
{
- int p_idx, g_idx;
- int i;
+ int p_idx, g_idx;
+ int i;
+
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
- if (desc->bd_enc_iov == NULL)
- return;
+ if (GET_ENC_KIOV(desc) == NULL)
+ return;
- LASSERT(desc->bd_iov_count > 0);
+ LASSERT(desc->bd_iov_count > 0);
spin_lock(&page_pools.epp_lock);
- p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
- g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+ p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+ g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
- LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
- page_pools.epp_total_pages);
- LASSERT(page_pools.epp_pools[p_idx]);
+ LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
+ page_pools.epp_total_pages);
+ LASSERT(page_pools.epp_pools[p_idx]);
- for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
- LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
- LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
+ for (i = 0; i < desc->bd_iov_count; i++) {
+ LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL);
+ LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
+ LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
- page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_iov[i].kiov_page;
+ page_pools.epp_pools[p_idx][g_idx] =
+ BD_GET_ENC_KIOV(desc, i).kiov_page;
- if (++g_idx == PAGES_PER_POOL) {
- p_idx++;
- g_idx = 0;
- }
- }
+ if (++g_idx == PAGES_PER_POOL) {
+ p_idx++;
+ g_idx = 0;
+ }
+ }
- page_pools.epp_free_pages += desc->bd_iov_count;
+ page_pools.epp_free_pages += desc->bd_iov_count;
- enc_pools_wakeup();
+ enc_pools_wakeup();
spin_unlock(&page_pools.epp_lock);
- OBD_FREE(desc->bd_enc_iov,
- desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
- desc->bd_enc_iov = NULL;
+ OBD_FREE(GET_ENC_KIOV(desc),
+ desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
+ GET_ENC_KIOV(desc) = NULL;
}
-EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
/*
* we don't do much stuff for add_user/del_user anymore, except adding some
int sptlrpc_enc_pool_init(void)
{
- /*
- * maximum capacity is 1/8 of total physical memory.
- * is the 1/8 a good number?
- */
- page_pools.epp_max_pages = num_physpages / 8;
- page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
+ DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
+ enc_pools_shrink_count, enc_pools_shrink_scan);
+
+ page_pools.epp_max_pages = totalram_pages / 8;
+ if (enc_pool_max_memory_mb > 0 &&
+ enc_pool_max_memory_mb <= (totalram_pages >> mult))
+ page_pools.epp_max_pages = enc_pool_max_memory_mb << mult;
+
+ page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
- cfs_waitq_init(&page_pools.epp_waitq);
- page_pools.epp_waitqlen = 0;
- page_pools.epp_pages_short = 0;
+ init_waitqueue_head(&page_pools.epp_waitq);
+ page_pools.epp_waitqlen = 0;
+ page_pools.epp_pages_short = 0;
page_pools.epp_growing = 0;
page_pools.epp_st_lowfree = 0;
page_pools.epp_st_max_wqlen = 0;
page_pools.epp_st_max_wait = 0;
+ page_pools.epp_st_outofmem = 0;
enc_pools_alloc();
if (page_pools.epp_pools == NULL)
return -ENOMEM;
- pools_shrinker = set_shrinker(pools_shrinker_seeks,
- enc_pools_shrink);
+ pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
if (pools_shrinker == NULL) {
enc_pools_free();
return -ENOMEM;
enc_pools_free();
- if (page_pools.epp_st_access > 0) {
- CDEBUG(D_SEC,
- "max pages %lu, grows %u, grow fails %u, shrinks %u, "
- "access %lu, missing %lu, max qlen %u, max wait "
- CFS_TIME_T"/%d\n",
- page_pools.epp_st_max_pages, page_pools.epp_st_grows,
- page_pools.epp_st_grow_fails,
+ if (page_pools.epp_st_access > 0) {
+ CDEBUG(D_SEC,
+ "max pages %lu, grows %u, grow fails %u, shrinks %u, "
+ "access %lu, missing %lu, max qlen %u, max wait "
+ CFS_TIME_T"/%lu, out of mem %lu\n",
+ page_pools.epp_st_max_pages, page_pools.epp_st_grows,
+ page_pools.epp_st_grow_fails,
page_pools.epp_st_shrinks, page_pools.epp_st_access,
page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
- page_pools.epp_st_max_wait, HZ);
+ page_pools.epp_st_max_wait,
+ msecs_to_jiffies(MSEC_PER_SEC),
+ page_pools.epp_st_outofmem);
}
}
-#else /* !__KERNEL__ */
-
-int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
-{
- return 0;
-}
-
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
-{
-}
-
-int sptlrpc_enc_pool_init(void)
-{
- return 0;
-}
-
-void sptlrpc_enc_pool_fini(void)
-{
-}
-#endif
static int cfs_hash_alg_id[] = {
[BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
{
return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
}
-EXPORT_SYMBOL(sptlrpc_get_hash_name);
__u8 sptlrpc_get_hash_alg(const char *algname)
{
return cfs_crypto_hash_alg(algname);
}
-EXPORT_SYMBOL(sptlrpc_get_hash_alg);
int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
{
}
EXPORT_SYMBOL(bulk_sec_desc_unpack);
+/*
+ * Compute the checksum of an RPC buffer payload. If the return \a buflen
+ * is not large enough, truncate the result to fit so that it is possible
+ * to use a hash function with a large hash space, but only use a part of
+ * the resulting hash.
+ */
int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
void *buf, int buflen)
{
struct cfs_crypto_hash_desc *hdesc;
int hashsize;
- char hashbuf[64];
unsigned int bufsize;
int i, err;
+ LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
LASSERT(buflen >= 4);
hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
for (i = 0; i < desc->bd_iov_count; i++) {
-#ifdef __KERNEL__
- cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
- desc->bd_iov[i].kiov_len);
-#else
- cfs_crypto_hash_update(hdesc, desc->bd_iov[i].iov_base,
- desc->bd_iov[i].iov_len);
-#endif
+ cfs_crypto_hash_update_page(hdesc,
+ BD_GET_KIOV(desc, i).kiov_page,
+ BD_GET_KIOV(desc, i).kiov_offset &
+ ~PAGE_MASK,
+ BD_GET_KIOV(desc, i).kiov_len);
}
+
if (hashsize > buflen) {
+ unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+
bufsize = sizeof(hashbuf);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
- &bufsize);
+ LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
+ bufsize, hashsize);
+ err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
memcpy(buf, hashbuf, buflen);
} else {
bufsize = buflen;
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
- &bufsize);
+ err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
}
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
return err;
}
-EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
-
-