Whamcloud - gitweb
LU-7091 mdd: refresh nlink after update linkea
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
index c4f45aa..8067bd1 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 #define DEBUG_SUBSYSTEM S_SEC
 
 #include <libcfs/libcfs.h>
-#ifndef __KERNEL__
-#include <liblustre.h>
-#include <libcfs/list.h>
-#else
-#include <linux/crypto.h>
-#endif
 
 #include <obd.h>
 #include <obd_cksum.h>
 
 #include "ptlrpc_internal.h"
 
+static int mult = 20 - PAGE_CACHE_SHIFT;
+static int enc_pool_max_memory_mb;
+CFS_MODULE_PARM(enc_pool_max_memory_mb, "i", int, 0644,
+               "Encoding pool max memory (MB), 1/8 of total physical memory by default");
+
+
 /****************************************
  * bulk encryption page pools           *
  ****************************************/
 
-#ifdef __KERNEL__
 
 #define PTRS_PER_PAGE   (PAGE_CACHE_SIZE / sizeof(void *))
 #define PAGES_PER_POOL  (PTRS_PER_PAGE)
@@ -120,6 +119,7 @@ static struct ptlrpc_enc_page_pool {
         unsigned long    epp_st_lowfree;        /* lowest free pages reached */
         unsigned int     epp_st_max_wqlen;      /* highest waitqueue length */
         cfs_time_t       epp_st_max_wait;       /* in jeffies */
+       unsigned long    epp_st_outofmem;       /* # of out of mem requests */
        /*
         * pointers to pools
         */
@@ -129,21 +129,20 @@ static struct ptlrpc_enc_page_pool {
 /*
  * memory shrinker
  */
-const int pools_shrinker_seeks = DEFAULT_SEEKS;
+static const int pools_shrinker_seeks = DEFAULT_SEEKS;
 static struct shrinker *pools_shrinker;
 
 
 /*
  * /proc/fs/lustre/sptlrpc/encrypt_page_pools
  */
-int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
-                               int *eof, void *data)
+int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
 {
         int     rc;
 
        spin_lock(&page_pools.epp_lock);
 
-        rc = snprintf(page, count,
+       rc = seq_printf(m,
                       "physical pages:          %lu\n"
                       "pages per pool:          %lu\n"
                       "max pages:               %lu\n"
@@ -161,9 +160,10 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
                       "cache missing:           %lu\n"
                       "low free mark:           %lu\n"
                       "max waitqueue depth:     %u\n"
-                      "max wait time:           "CFS_TIME_T"/%u\n"
+                     "max wait time:           "CFS_TIME_T"/%lu\n"
+                     "out of mem:             %lu\n"
                       ,
-                     num_physpages,
+                     totalram_pages,
                       PAGES_PER_POOL,
                       page_pools.epp_max_pages,
                       page_pools.epp_max_pools,
@@ -180,7 +180,9 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
                      page_pools.epp_st_missings,
                      page_pools.epp_st_lowfree,
                      page_pools.epp_st_max_wqlen,
-                     page_pools.epp_st_max_wait, HZ
+                     page_pools.epp_st_max_wait,
+                     msecs_to_jiffies(MSEC_PER_SEC),
+                     page_pools.epp_st_outofmem
                     );
 
        spin_unlock(&page_pools.epp_lock);
@@ -221,7 +223,7 @@ static void enc_pools_release_free_pages(long npages)
                         p_idx++;
                         g_idx = 0;
                 }
-        };
+       }
 
         /* free unused pools */
         while (p_idx_max1 < p_idx_max2) {
@@ -233,30 +235,46 @@ static void enc_pools_release_free_pages(long npages)
 }
 
 /*
- * could be called frequently for query (@nr_to_scan == 0).
  * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
  */
-static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+static unsigned long enc_pools_shrink_count(struct shrinker *s,
+                                           struct shrink_control *sc)
 {
-       if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
+       /*
+        * if no pool access for a long time, we consider it's fully idle.
+        * a little race here is fine.
+        */
+       if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
+                    CACHE_QUIESCENT_PERIOD)) {
                spin_lock(&page_pools.epp_lock);
-                shrink_param(sc, nr_to_scan) = min_t(unsigned long,
-                                                   shrink_param(sc, nr_to_scan),
-                                                   page_pools.epp_free_pages -
-                                                   PTLRPC_MAX_BRW_PAGES);
-                if (shrink_param(sc, nr_to_scan) > 0) {
-                        enc_pools_release_free_pages(shrink_param(sc,
-                                                                  nr_to_scan));
-                        CDEBUG(D_SEC, "released %ld pages, %ld left\n",
-                               (long)shrink_param(sc, nr_to_scan),
-                               page_pools.epp_free_pages);
-
-                        page_pools.epp_st_shrinks++;
-                        page_pools.epp_last_shrink = cfs_time_current_sec();
-                }
+               page_pools.epp_idle_idx = IDLE_IDX_MAX;
                spin_unlock(&page_pools.epp_lock);
        }
 
+       LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
+       return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
+               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+}
+
+/*
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static unsigned long enc_pools_shrink_scan(struct shrinker *s,
+                                          struct shrink_control *sc)
+{
+       spin_lock(&page_pools.epp_lock);
+       sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
+                             page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
+       if (sc->nr_to_scan > 0) {
+               enc_pools_release_free_pages(sc->nr_to_scan);
+               CDEBUG(D_SEC, "released %ld pages, %ld left\n",
+                      (long)sc->nr_to_scan, page_pools.epp_free_pages);
+
+               page_pools.epp_st_shrinks++;
+               page_pools.epp_last_shrink = cfs_time_current_sec();
+       }
+       spin_unlock(&page_pools.epp_lock);
+
        /*
         * if no pool access for a long time, we consider it's fully idle.
         * a little race here is fine.
@@ -269,10 +287,31 @@ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
        }
 
        LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
-       return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
-               (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
+       return sc->nr_to_scan;
+}
+
+#ifndef HAVE_SHRINKER_COUNT
+/*
+ * could be called frequently for query (@nr_to_scan == 0).
+ * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
+ */
+static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+{
+       struct shrink_control scv = {
+               .nr_to_scan = shrink_param(sc, nr_to_scan),
+               .gfp_mask   = shrink_param(sc, gfp_mask)
+       };
+#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
+       struct shrinker* shrinker = NULL;
+#endif
+
+       enc_pools_shrink_scan(shrinker, &scv);
+
+       return enc_pools_shrink_count(shrinker, &scv);
 }
 
+#endif /* HAVE_SHRINKER_COUNT */
+
 static inline
 int npages_to_npools(unsigned long npages)
 {
@@ -420,8 +459,8 @@ static int enc_pools_add_pages(int npages)
                        goto out_pools;
 
                for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
-                       pools[i][j] = alloc_page(__GFP_IO |
-                                                    __GFP_HIGHMEM);
+                       pools[i][j] = alloc_page(GFP_NOFS |
+                                                __GFP_HIGHMEM);
                        if (pools[i][j] == NULL)
                                goto out_pools;
 
@@ -449,7 +488,7 @@ out:
 
 static inline void enc_pools_wakeup(void)
 {
-       LASSERT(spin_is_locked(&page_pools.epp_lock));
+       assert_spin_locked(&page_pools.epp_lock);
 
        if (unlikely(page_pools.epp_waitqlen)) {
                LASSERT(waitqueue_active(&page_pools.epp_waitq));
@@ -490,6 +529,24 @@ static int enc_pools_should_grow(int page_needed, long now)
 }
 
 /*
+ * Export the number of free pages in the pool
+ */
+int get_free_pages_in_pool(void)
+{
+       return page_pools.epp_free_pages;
+}
+EXPORT_SYMBOL(get_free_pages_in_pool);
+
+/*
+ * Let outside world know if enc_pool full capacity is reached
+ */
+int pool_is_at_full_capacity(void)
+{
+       return (page_pools.epp_total_pages == page_pools.epp_max_pages);
+}
+EXPORT_SYMBOL(pool_is_at_full_capacity);
+
+/*
  * we allocate the requested pages atomically.
  */
 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
@@ -501,16 +558,17 @@ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
        int             p_idx, g_idx;
        int             i;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
        LASSERT(desc->bd_iov_count > 0);
        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
 
        /* resent bulk, enc iov might have been allocated previously */
-       if (desc->bd_enc_iov != NULL)
+       if (GET_ENC_KIOV(desc) != NULL)
                return 0;
 
-       OBD_ALLOC(desc->bd_enc_iov,
-                 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
-       if (desc->bd_enc_iov == NULL)
+       OBD_ALLOC(GET_ENC_KIOV(desc),
+                 desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
+       if (GET_ENC_KIOV(desc) == NULL)
                return -ENOMEM;
 
        spin_lock(&page_pools.epp_lock);
@@ -537,21 +595,37 @@ again:
 
                        enc_pools_wakeup();
                } else {
-                       if (++page_pools.epp_waitqlen >
-                           page_pools.epp_st_max_wqlen)
-                               page_pools.epp_st_max_wqlen =
-                                               page_pools.epp_waitqlen;
-
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       init_waitqueue_entry_current(&waitlink);
-                       add_wait_queue(&page_pools.epp_waitq, &waitlink);
-
-                       spin_unlock(&page_pools.epp_lock);
-                       waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
-                       remove_wait_queue(&page_pools.epp_waitq, &waitlink);
-                       LASSERT(page_pools.epp_waitqlen > 0);
-                       spin_lock(&page_pools.epp_lock);
-                       page_pools.epp_waitqlen--;
+                       if (page_pools.epp_growing) {
+                               if (++page_pools.epp_waitqlen >
+                                   page_pools.epp_st_max_wqlen)
+                                       page_pools.epp_st_max_wqlen =
+                                                       page_pools.epp_waitqlen;
+
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               init_waitqueue_entry(&waitlink, current);
+                               add_wait_queue(&page_pools.epp_waitq,
+                                              &waitlink);
+
+                               spin_unlock(&page_pools.epp_lock);
+                               schedule();
+                               remove_wait_queue(&page_pools.epp_waitq,
+                                                 &waitlink);
+                               LASSERT(page_pools.epp_waitqlen > 0);
+                               spin_lock(&page_pools.epp_lock);
+                               page_pools.epp_waitqlen--;
+                       } else {
+                               /* ptlrpcd thread should not sleep in that case,
+                                * or deadlock may occur!
+                                * Instead, return -ENOMEM so that upper layers
+                                * will put request back in queue. */
+                               page_pools.epp_st_outofmem++;
+                               spin_unlock(&page_pools.epp_lock);
+                               OBD_FREE(GET_ENC_KIOV(desc),
+                                        desc->bd_iov_count *
+                                               sizeof(*GET_ENC_KIOV(desc)));
+                               GET_ENC_KIOV(desc) = NULL;
+                               return -ENOMEM;
+                       }
                }
 
                LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
@@ -574,17 +648,17 @@ again:
         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
-                desc->bd_enc_iov[i].kiov_page =
-                                        page_pools.epp_pools[p_idx][g_idx];
-                page_pools.epp_pools[p_idx][g_idx] = NULL;
-
-                if (++g_idx == PAGES_PER_POOL) {
-                        p_idx++;
-                        g_idx = 0;
-                }
-        }
+       for (i = 0; i < desc->bd_iov_count; i++) {
+               LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
+               BD_GET_ENC_KIOV(desc, i).kiov_page =
+                      page_pools.epp_pools[p_idx][g_idx];
+               page_pools.epp_pools[p_idx][g_idx] = NULL;
+
+               if (++g_idx == PAGES_PER_POOL) {
+                       p_idx++;
+                       g_idx = 0;
+               }
+       }
 
         if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
                 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
@@ -609,48 +683,49 @@ EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
 
 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 {
-        int     p_idx, g_idx;
-        int     i;
+       int     p_idx, g_idx;
+       int     i;
+
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
 
-        if (desc->bd_enc_iov == NULL)
-                return;
+       if (GET_ENC_KIOV(desc) == NULL)
+               return;
 
-        LASSERT(desc->bd_iov_count > 0);
+       LASSERT(desc->bd_iov_count > 0);
 
        spin_lock(&page_pools.epp_lock);
 
-        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
-        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
+       p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
+       g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 
-        LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
-                page_pools.epp_total_pages);
-        LASSERT(page_pools.epp_pools[p_idx]);
+       LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
+               page_pools.epp_total_pages);
+       LASSERT(page_pools.epp_pools[p_idx]);
 
-        for (i = 0; i < desc->bd_iov_count; i++) {
-                LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
-                LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
-                LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
+       for (i = 0; i < desc->bd_iov_count; i++) {
+               LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL);
+               LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
+               LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
 
-                page_pools.epp_pools[p_idx][g_idx] =
-                                        desc->bd_enc_iov[i].kiov_page;
+               page_pools.epp_pools[p_idx][g_idx] =
+                       BD_GET_ENC_KIOV(desc, i).kiov_page;
 
-                if (++g_idx == PAGES_PER_POOL) {
-                        p_idx++;
-                        g_idx = 0;
-                }
-        }
+               if (++g_idx == PAGES_PER_POOL) {
+                       p_idx++;
+                       g_idx = 0;
+               }
+       }
 
-        page_pools.epp_free_pages += desc->bd_iov_count;
+       page_pools.epp_free_pages += desc->bd_iov_count;
 
-        enc_pools_wakeup();
+       enc_pools_wakeup();
 
        spin_unlock(&page_pools.epp_lock);
 
-       OBD_FREE(desc->bd_enc_iov,
-                desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
-       desc->bd_enc_iov = NULL;
+       OBD_FREE(GET_ENC_KIOV(desc),
+                desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
+       GET_ENC_KIOV(desc) = NULL;
 }
-EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
 
 /*
  * we don't do much stuff for add_user/del_user anymore, except adding some
@@ -707,11 +782,14 @@ static inline void enc_pools_free(void)
 
 int sptlrpc_enc_pool_init(void)
 {
-       /*
-        * maximum capacity is 1/8 of total physical memory.
-        * is the 1/8 a good number?
-        */
-       page_pools.epp_max_pages = num_physpages / 8;
+       DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
+                        enc_pools_shrink_count, enc_pools_shrink_scan);
+
+       page_pools.epp_max_pages = totalram_pages / 8;
+       if (enc_pool_max_memory_mb > 0 &&
+           enc_pool_max_memory_mb <= (totalram_pages >> mult))
+               page_pools.epp_max_pages = enc_pool_max_memory_mb << mult;
+
        page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
 
        init_waitqueue_head(&page_pools.epp_waitq);
@@ -737,13 +815,13 @@ int sptlrpc_enc_pool_init(void)
         page_pools.epp_st_lowfree = 0;
         page_pools.epp_st_max_wqlen = 0;
         page_pools.epp_st_max_wait = 0;
+       page_pools.epp_st_outofmem = 0;
 
         enc_pools_alloc();
         if (page_pools.epp_pools == NULL)
                 return -ENOMEM;
 
-       pools_shrinker = set_shrinker(pools_shrinker_seeks,
-                                          enc_pools_shrink);
+       pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
         if (pools_shrinker == NULL) {
                 enc_pools_free();
                 return -ENOMEM;
@@ -768,39 +846,21 @@ void sptlrpc_enc_pool_fini(void)
 
         enc_pools_free();
 
-        if (page_pools.epp_st_access > 0) {
-                CDEBUG(D_SEC,
-                       "max pages %lu, grows %u, grow fails %u, shrinks %u, "
-                       "access %lu, missing %lu, max qlen %u, max wait "
-                       CFS_TIME_T"/%d\n",
-                       page_pools.epp_st_max_pages, page_pools.epp_st_grows,
-                       page_pools.epp_st_grow_fails,
+       if (page_pools.epp_st_access > 0) {
+               CDEBUG(D_SEC,
+                      "max pages %lu, grows %u, grow fails %u, shrinks %u, "
+                      "access %lu, missing %lu, max qlen %u, max wait "
+                      CFS_TIME_T"/%lu, out of mem %lu\n",
+                      page_pools.epp_st_max_pages, page_pools.epp_st_grows,
+                      page_pools.epp_st_grow_fails,
                       page_pools.epp_st_shrinks, page_pools.epp_st_access,
                       page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
-                      page_pools.epp_st_max_wait, HZ);
+                      page_pools.epp_st_max_wait,
+                      msecs_to_jiffies(MSEC_PER_SEC),
+                      page_pools.epp_st_outofmem);
        }
 }
 
-#else /* !__KERNEL__ */
-
-int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
-{
-        return 0;
-}
-
-void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
-{
-}
-
-int sptlrpc_enc_pool_init(void)
-{
-        return 0;
-}
-
-void sptlrpc_enc_pool_fini(void)
-{
-}
-#endif
 
 static int cfs_hash_alg_id[] = {
        [BULK_HASH_ALG_NULL]    = CFS_HASH_ALG_NULL,
@@ -816,13 +876,11 @@ const char * sptlrpc_get_hash_name(__u8 hash_alg)
 {
        return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
 }
-EXPORT_SYMBOL(sptlrpc_get_hash_name);
 
 __u8 sptlrpc_get_hash_alg(const char *algname)
 {
        return cfs_crypto_hash_alg(algname);
 }
-EXPORT_SYMBOL(sptlrpc_get_hash_alg);
 
 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
 {
@@ -862,15 +920,21 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
 }
 EXPORT_SYMBOL(bulk_sec_desc_unpack);
 
+/*
+ * Compute the checksum of an RPC buffer payload.  If the return \a buflen
+ * is not large enough, truncate the result to fit so that it is possible
+ * to use a hash function with a large hash space, but only use a part of
+ * the resulting hash.
+ */
 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
                              void *buf, int buflen)
 {
        struct cfs_crypto_hash_desc     *hdesc;
        int                             hashsize;
-       char                            hashbuf[64];
        unsigned int                    bufsize;
        int                             i, err;
 
+       LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
        LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
        LASSERT(buflen >= 4);
 
@@ -884,30 +948,25 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
        hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
 
        for (i = 0; i < desc->bd_iov_count; i++) {
-#ifdef __KERNEL__
-               cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
-                                 desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
-                                 desc->bd_iov[i].kiov_len);
-#else
-               cfs_crypto_hash_update(hdesc, desc->bd_iov[i].iov_base,
-                                 desc->bd_iov[i].iov_len);
-#endif
+               cfs_crypto_hash_update_page(hdesc,
+                                 BD_GET_KIOV(desc, i).kiov_page,
+                                 BD_GET_KIOV(desc, i).kiov_offset &
+                                             ~PAGE_MASK,
+                                 BD_GET_KIOV(desc, i).kiov_len);
        }
+
        if (hashsize > buflen) {
+               unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+
                bufsize = sizeof(hashbuf);
-               err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
-                                           &bufsize);
+               LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
+                        bufsize, hashsize);
+               err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
                memcpy(buf, hashbuf, buflen);
        } else {
                bufsize = buflen;
-               err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
-                                           &bufsize);
+               err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
        }
 
-       if (err)
-               cfs_crypto_hash_final(hdesc, NULL, NULL);
        return err;
 }
-EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
-
-