X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fsec_bulk.c;h=473943cc126b072bb7dfbfbb0e81ea1c8f5a2c9d;hb=f6995cf04407dff15d6ca79ca44cfa97dc6eb014;hp=a9eaccb3df850a9367cb2245bdc0235924381c3b;hpb=84a3fd67356c8073a917ea6abd63928055e38156;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index a9eaccb..473943c 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, Whamcloud, Inc. + * Copyright (c) 2011, 2014, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -41,12 +41,6 @@ #define DEBUG_SUBSYSTEM S_SEC #include -#ifndef __KERNEL__ -#include -#include -#else -#include -#endif #include #include @@ -63,9 +57,8 @@ * bulk encryption page pools * ****************************************/ -#ifdef __KERNEL__ -#define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *)) +#define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (PTRS_PER_PAGE) #define IDLE_IDX_MAX (100) @@ -80,13 +73,13 @@ static struct ptlrpc_enc_page_pool { unsigned long epp_max_pages; /* maximum pages can hold, const */ unsigned int epp_max_pools; /* number of pools, const */ - /* - * wait queue in case of not enough free pages. - */ - cfs_waitq_t epp_waitq; /* waiting threads */ - unsigned int epp_waitqlen; /* wait queue length */ - unsigned long epp_pages_short; /* # of pages wanted of in-q users */ - unsigned int epp_growing:1; /* during adding pages */ + /* + * wait queue in case of not enough free pages. + */ + wait_queue_head_t epp_waitq; /* waiting threads */ + unsigned int epp_waitqlen; /* wait queue length */ + unsigned long epp_pages_short; /* # of pages wanted of in-q users */ + unsigned int epp_growing:1; /* during adding pages */ /* * indicating how idle the pools are, from 0 to MAX_IDLE_IDX @@ -104,7 +97,7 @@ static struct ptlrpc_enc_page_pool { /* * in-pool pages bookkeeping */ - cfs_spinlock_t epp_lock; /* protect following fields */ + spinlock_t epp_lock; /* protect following fields */ unsigned long epp_total_pages; /* total pages in pools */ unsigned long epp_free_pages; /* current pages available */ @@ -120,30 +113,29 @@ static struct ptlrpc_enc_page_pool { unsigned long epp_st_lowfree; /* lowest free pages reached */ unsigned int epp_st_max_wqlen; /* highest waitqueue length */ cfs_time_t epp_st_max_wait; /* in jeffies */ - /* - * pointers to pools - */ - cfs_page_t ***epp_pools; + /* + * pointers to pools + */ + struct page ***epp_pools; } page_pools; /* * memory shrinker */ -const int pools_shrinker_seeks = CFS_DEFAULT_SEEKS; -static struct cfs_shrinker *pools_shrinker = NULL; +static const int pools_shrinker_seeks = DEFAULT_SEEKS; +static struct shrinker *pools_shrinker; /* * /proc/fs/lustre/sptlrpc/encrypt_page_pools */ -int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count, - int *eof, void *data) +int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v) { int rc; - cfs_spin_lock(&page_pools.epp_lock); + spin_lock(&page_pools.epp_lock); - rc = snprintf(page, count, + rc = seq_printf(m, "physical pages: %lu\n" "pages per pool: %lu\n" "max pages: %lu\n" @@ -161,9 +153,9 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count, "cache missing: %lu\n" "low free mark: %lu\n" "max waitqueue depth: %u\n" - "max wait time: "CFS_TIME_T"/%u\n" + "max wait time: "CFS_TIME_T"/%lu\n" , - cfs_num_physpages, + totalram_pages, PAGES_PER_POOL, page_pools.epp_max_pages, page_pools.epp_max_pools, @@ -177,14 +169,15 @@ int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count, page_pools.epp_st_grow_fails, page_pools.epp_st_shrinks, page_pools.epp_st_access, - page_pools.epp_st_missings, - page_pools.epp_st_lowfree, - page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, CFS_HZ - ); - - cfs_spin_unlock(&page_pools.epp_lock); - return rc; + page_pools.epp_st_missings, + page_pools.epp_st_lowfree, + page_pools.epp_st_max_wqlen, + page_pools.epp_st_max_wait, + msecs_to_jiffies(MSEC_PER_SEC) + ); + + spin_unlock(&page_pools.epp_lock); + return rc; } static void enc_pools_release_free_pages(long npages) @@ -214,65 +207,102 @@ static void enc_pools_release_free_pages(long npages) LASSERT(page_pools.epp_pools[p_idx]); LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL); - cfs_free_page(page_pools.epp_pools[p_idx][g_idx]); + __free_page(page_pools.epp_pools[p_idx][g_idx]); page_pools.epp_pools[p_idx][g_idx] = NULL; if (++g_idx == PAGES_PER_POOL) { p_idx++; g_idx = 0; } - }; + } /* free unused pools */ while (p_idx_max1 < p_idx_max2) { LASSERT(page_pools.epp_pools[p_idx_max2]); - OBD_FREE(page_pools.epp_pools[p_idx_max2], CFS_PAGE_SIZE); + OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE); page_pools.epp_pools[p_idx_max2] = NULL; p_idx_max2--; } } /* + * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. + */ +static unsigned long enc_pools_shrink_count(struct shrinker *s, + struct shrink_control *sc) +{ + /* + * if no pool access for a long time, we consider it's fully idle. + * a little race here is fine. + */ + if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access > + CACHE_QUIESCENT_PERIOD)) { + spin_lock(&page_pools.epp_lock); + page_pools.epp_idle_idx = IDLE_IDX_MAX; + spin_unlock(&page_pools.epp_lock); + } + + LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX); + return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) * + (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX; +} + +/* + * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. + */ +static unsigned long enc_pools_shrink_scan(struct shrinker *s, + struct shrink_control *sc) +{ + spin_lock(&page_pools.epp_lock); + sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan, + page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES); + if (sc->nr_to_scan > 0) { + enc_pools_release_free_pages(sc->nr_to_scan); + CDEBUG(D_SEC, "released %ld pages, %ld left\n", + (long)sc->nr_to_scan, page_pools.epp_free_pages); + + page_pools.epp_st_shrinks++; + page_pools.epp_last_shrink = cfs_time_current_sec(); + } + spin_unlock(&page_pools.epp_lock); + + /* + * if no pool access for a long time, we consider it's fully idle. + * a little race here is fine. + */ + if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access > + CACHE_QUIESCENT_PERIOD)) { + spin_lock(&page_pools.epp_lock); + page_pools.epp_idle_idx = IDLE_IDX_MAX; + spin_unlock(&page_pools.epp_lock); + } + + LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX); + return sc->nr_to_scan; +} + +#ifndef HAVE_SHRINKER_COUNT +/* * could be called frequently for query (@nr_to_scan == 0). * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool. */ static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask)) { - if (unlikely(shrink_param(sc, nr_to_scan) != 0)) { - cfs_spin_lock(&page_pools.epp_lock); - shrink_param(sc, nr_to_scan) = min_t(unsigned long, - shrink_param(sc, nr_to_scan), - page_pools.epp_free_pages - - PTLRPC_MAX_BRW_PAGES); - if (shrink_param(sc, nr_to_scan) > 0) { - enc_pools_release_free_pages(shrink_param(sc, - nr_to_scan)); - CDEBUG(D_SEC, "released %ld pages, %ld left\n", - (long)shrink_param(sc, nr_to_scan), - page_pools.epp_free_pages); - - page_pools.epp_st_shrinks++; - page_pools.epp_last_shrink = cfs_time_current_sec(); - } - cfs_spin_unlock(&page_pools.epp_lock); - } + struct shrink_control scv = { + .nr_to_scan = shrink_param(sc, nr_to_scan), + .gfp_mask = shrink_param(sc, gfp_mask) + }; +#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL) + struct shrinker* shrinker = NULL; +#endif - /* - * if no pool access for a long time, we consider it's fully idle. - * a little race here is fine. - */ - if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access > - CACHE_QUIESCENT_PERIOD)) { - cfs_spin_lock(&page_pools.epp_lock); - page_pools.epp_idle_idx = IDLE_IDX_MAX; - cfs_spin_unlock(&page_pools.epp_lock); - } + enc_pools_shrink_scan(shrinker, &scv); - LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX); - return max((int) page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) * - (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX; + return enc_pools_shrink_count(shrinker, &scv); } +#endif /* HAVE_SHRINKER_COUNT */ + static inline int npages_to_npools(unsigned long npages) { @@ -282,25 +312,25 @@ int npages_to_npools(unsigned long npages) /* * return how many pages cleaned up. */ -static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools) +static unsigned long enc_pools_cleanup(struct page ***pools, int npools) { - unsigned long cleaned = 0; - int i, j; - - for (i = 0; i < npools; i++) { - if (pools[i]) { - for (j = 0; j < PAGES_PER_POOL; j++) { - if (pools[i][j]) { - cfs_free_page(pools[i][j]); - cleaned++; - } - } - OBD_FREE(pools[i], CFS_PAGE_SIZE); - pools[i] = NULL; - } - } + unsigned long cleaned = 0; + int i, j; + + for (i = 0; i < npools; i++) { + if (pools[i]) { + for (j = 0; j < PAGES_PER_POOL; j++) { + if (pools[i][j]) { + __free_page(pools[i][j]); + cleaned++; + } + } + OBD_FREE(pools[i], PAGE_CACHE_SIZE); + pools[i] = NULL; + } + } - return cleaned; + return cleaned; } /* @@ -310,7 +340,7 @@ static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools) * we have options to avoid most memory copy with some tricks. but we choose * the simplest way to avoid complexity. It's not frequently called. */ -static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages) +static void enc_pools_insert(struct page ***pools, int npools, int npages) { int freeslot; int op_idx, np_idx, og_idx, ng_idx; @@ -321,7 +351,7 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages) LASSERT(npages_to_npools(npages) == npools); LASSERT(page_pools.epp_growing); - cfs_spin_lock(&page_pools.epp_lock); + spin_lock(&page_pools.epp_lock); /* * (1) fill all the free slots of current pools. @@ -388,20 +418,20 @@ static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages) CDEBUG(D_SEC, "add %d pages to total %lu\n", npages, page_pools.epp_total_pages); - cfs_spin_unlock(&page_pools.epp_lock); + spin_unlock(&page_pools.epp_lock); } static int enc_pools_add_pages(int npages) { - static CFS_DEFINE_MUTEX(add_pages_mutex); - cfs_page_t ***pools; - int npools, alloced = 0; - int i, j, rc = -ENOMEM; + static DEFINE_MUTEX(add_pages_mutex); + struct page ***pools; + int npools, alloced = 0; + int i, j, rc = -ENOMEM; - if (npages < PTLRPC_MAX_BRW_PAGES) - npages = PTLRPC_MAX_BRW_PAGES; + if (npages < PTLRPC_MAX_BRW_PAGES) + npages = PTLRPC_MAX_BRW_PAGES; - cfs_mutex_lock(&add_pages_mutex); + mutex_lock(&add_pages_mutex); if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages) npages = page_pools.epp_max_pages - page_pools.epp_total_pages; @@ -414,21 +444,21 @@ static int enc_pools_add_pages(int npages) if (pools == NULL) goto out; - for (i = 0; i < npools; i++) { - OBD_ALLOC(pools[i], CFS_PAGE_SIZE); - if (pools[i] == NULL) - goto out_pools; + for (i = 0; i < npools; i++) { + OBD_ALLOC(pools[i], PAGE_CACHE_SIZE); + if (pools[i] == NULL) + goto out_pools; - for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) { - pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO | - CFS_ALLOC_HIGHMEM); - if (pools[i][j] == NULL) - goto out_pools; + for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) { + pools[i][j] = alloc_page(GFP_NOFS | + __GFP_HIGHMEM); + if (pools[i][j] == NULL) + goto out_pools; - alloced++; - } - } - LASSERT(alloced == npages); + alloced++; + } + } + LASSERT(alloced == npages); enc_pools_insert(pools, npools, npages); CDEBUG(D_SEC, "added %d pages into pools\n", npages); @@ -443,19 +473,18 @@ out: CERROR("Failed to allocate %d enc pages\n", npages); } - cfs_mutex_unlock(&add_pages_mutex); + mutex_unlock(&add_pages_mutex); return rc; } static inline void enc_pools_wakeup(void) { - LASSERT_SPIN_LOCKED(&page_pools.epp_lock); - LASSERT(page_pools.epp_waitqlen >= 0); + assert_spin_locked(&page_pools.epp_lock); - if (unlikely(page_pools.epp_waitqlen)) { - LASSERT(cfs_waitq_active(&page_pools.epp_waitq)); - cfs_waitq_broadcast(&page_pools.epp_waitq); - } + if (unlikely(page_pools.epp_waitqlen)) { + LASSERT(waitqueue_active(&page_pools.epp_waitq)); + wake_up_all(&page_pools.epp_waitq); + } } static int enc_pools_should_grow(int page_needed, long now) @@ -495,72 +524,72 @@ static int enc_pools_should_grow(int page_needed, long now) */ int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) { - cfs_waitlink_t waitlink; - unsigned long this_idle = -1; - cfs_time_t tick = 0; - long now; - int p_idx, g_idx; - int i; + wait_queue_t waitlink; + unsigned long this_idle = -1; + cfs_time_t tick = 0; + long now; + int p_idx, g_idx; + int i; - LASSERT(desc->bd_iov_count > 0); - LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); + LASSERT(desc->bd_iov_count > 0); + LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages); - /* resent bulk, enc iov might have been allocated previously */ - if (desc->bd_enc_iov != NULL) - return 0; + /* resent bulk, enc iov might have been allocated previously */ + if (desc->bd_enc_iov != NULL) + return 0; - OBD_ALLOC(desc->bd_enc_iov, - desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); - if (desc->bd_enc_iov == NULL) - return -ENOMEM; + OBD_ALLOC(desc->bd_enc_iov, + desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); + if (desc->bd_enc_iov == NULL) + return -ENOMEM; - cfs_spin_lock(&page_pools.epp_lock); + spin_lock(&page_pools.epp_lock); - page_pools.epp_st_access++; + page_pools.epp_st_access++; again: - if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) { - if (tick == 0) - tick = cfs_time_current(); + if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) { + if (tick == 0) + tick = cfs_time_current(); - now = cfs_time_current_sec(); + now = cfs_time_current_sec(); - page_pools.epp_st_missings++; - page_pools.epp_pages_short += desc->bd_iov_count; + page_pools.epp_st_missings++; + page_pools.epp_pages_short += desc->bd_iov_count; - if (enc_pools_should_grow(desc->bd_iov_count, now)) { - page_pools.epp_growing = 1; + if (enc_pools_should_grow(desc->bd_iov_count, now)) { + page_pools.epp_growing = 1; - cfs_spin_unlock(&page_pools.epp_lock); - enc_pools_add_pages(page_pools.epp_pages_short / 2); - cfs_spin_lock(&page_pools.epp_lock); + spin_unlock(&page_pools.epp_lock); + enc_pools_add_pages(page_pools.epp_pages_short / 2); + spin_lock(&page_pools.epp_lock); - page_pools.epp_growing = 0; + page_pools.epp_growing = 0; - enc_pools_wakeup(); - } else { - if (++page_pools.epp_waitqlen > - page_pools.epp_st_max_wqlen) - page_pools.epp_st_max_wqlen = - page_pools.epp_waitqlen; + enc_pools_wakeup(); + } else { + if (++page_pools.epp_waitqlen > + page_pools.epp_st_max_wqlen) + page_pools.epp_st_max_wqlen = + page_pools.epp_waitqlen; - cfs_set_current_state(CFS_TASK_UNINT); - cfs_waitlink_init(&waitlink); - cfs_waitq_add(&page_pools.epp_waitq, &waitlink); + set_current_state(TASK_UNINTERRUPTIBLE); + init_waitqueue_entry(&waitlink, current); + add_wait_queue(&page_pools.epp_waitq, &waitlink); - cfs_spin_unlock(&page_pools.epp_lock); - cfs_waitq_wait(&waitlink, CFS_TASK_UNINT); - cfs_waitq_del(&page_pools.epp_waitq, &waitlink); - LASSERT(page_pools.epp_waitqlen > 0); - cfs_spin_lock(&page_pools.epp_lock); - page_pools.epp_waitqlen--; - } + spin_unlock(&page_pools.epp_lock); + schedule(); + remove_wait_queue(&page_pools.epp_waitq, &waitlink); + LASSERT(page_pools.epp_waitqlen > 0); + spin_lock(&page_pools.epp_lock); + page_pools.epp_waitqlen--; + } - LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count); - page_pools.epp_pages_short -= desc->bd_iov_count; + LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count); + page_pools.epp_pages_short -= desc->bd_iov_count; - this_idle = 0; - goto again; - } + this_idle = 0; + goto again; + } /* record max wait time */ if (unlikely(tick != 0)) { @@ -603,8 +632,8 @@ again: page_pools.epp_last_access = cfs_time_current_sec(); - cfs_spin_unlock(&page_pools.epp_lock); - return 0; + spin_unlock(&page_pools.epp_lock); + return 0; } EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages); @@ -618,7 +647,7 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) LASSERT(desc->bd_iov_count > 0); - cfs_spin_lock(&page_pools.epp_lock); + spin_lock(&page_pools.epp_lock); p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; g_idx = page_pools.epp_free_pages % PAGES_PER_POOL; @@ -645,13 +674,12 @@ void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) enc_pools_wakeup(); - cfs_spin_unlock(&page_pools.epp_lock); + spin_unlock(&page_pools.epp_lock); - OBD_FREE(desc->bd_enc_iov, - desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); - desc->bd_enc_iov = NULL; + OBD_FREE(desc->bd_enc_iov, + desc->bd_iov_count * sizeof(*desc->bd_enc_iov)); + desc->bd_enc_iov = NULL; } -EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages); /* * we don't do much stuff for add_user/del_user anymore, except adding some @@ -660,25 +688,25 @@ EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages); */ int sptlrpc_enc_pool_add_user(void) { - int need_grow = 0; + int need_grow = 0; - cfs_spin_lock(&page_pools.epp_lock); - if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) { - page_pools.epp_growing = 1; - need_grow = 1; - } - cfs_spin_unlock(&page_pools.epp_lock); + spin_lock(&page_pools.epp_lock); + if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) { + page_pools.epp_growing = 1; + need_grow = 1; + } + spin_unlock(&page_pools.epp_lock); - if (need_grow) { - enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES + - PTLRPC_MAX_BRW_PAGES); + if (need_grow) { + enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES + + PTLRPC_MAX_BRW_PAGES); - cfs_spin_lock(&page_pools.epp_lock); - page_pools.epp_growing = 0; - enc_pools_wakeup(); - cfs_spin_unlock(&page_pools.epp_lock); - } - return 0; + spin_lock(&page_pools.epp_lock); + page_pools.epp_growing = 0; + enc_pools_wakeup(); + spin_unlock(&page_pools.epp_lock); + } + return 0; } EXPORT_SYMBOL(sptlrpc_enc_pool_add_user); @@ -708,16 +736,18 @@ static inline void enc_pools_free(void) int sptlrpc_enc_pool_init(void) { - /* - * maximum capacity is 1/8 of total physical memory. - * is the 1/8 a good number? - */ - page_pools.epp_max_pages = cfs_num_physpages / 8; - page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); - - cfs_waitq_init(&page_pools.epp_waitq); - page_pools.epp_waitqlen = 0; - page_pools.epp_pages_short = 0; + DEF_SHRINKER_VAR(shvar, enc_pools_shrink, + enc_pools_shrink_count, enc_pools_shrink_scan); + /* + * maximum capacity is 1/8 of total physical memory. + * is the 1/8 a good number? + */ + page_pools.epp_max_pages = totalram_pages / 8; + page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages); + + init_waitqueue_head(&page_pools.epp_waitq); + page_pools.epp_waitqlen = 0; + page_pools.epp_pages_short = 0; page_pools.epp_growing = 0; @@ -725,7 +755,7 @@ int sptlrpc_enc_pool_init(void) page_pools.epp_last_shrink = cfs_time_current_sec(); page_pools.epp_last_access = cfs_time_current_sec(); - cfs_spin_lock_init(&page_pools.epp_lock); + spin_lock_init(&page_pools.epp_lock); page_pools.epp_total_pages = 0; page_pools.epp_free_pages = 0; @@ -743,8 +773,7 @@ int sptlrpc_enc_pool_init(void) if (page_pools.epp_pools == NULL) return -ENOMEM; - pools_shrinker = cfs_set_shrinker(pools_shrinker_seeks, - enc_pools_shrink); + pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar); if (pools_shrinker == NULL) { enc_pools_free(); return -ENOMEM; @@ -761,7 +790,7 @@ void sptlrpc_enc_pool_fini(void) LASSERT(page_pools.epp_pools); LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages); - cfs_remove_shrinker(pools_shrinker); + remove_shrinker(pools_shrinker); npools = npages_to_npools(page_pools.epp_total_pages); cleaned = enc_pools_cleanup(page_pools.epp_pools, npools); @@ -769,39 +798,20 @@ void sptlrpc_enc_pool_fini(void) enc_pools_free(); - if (page_pools.epp_st_access > 0) { - CDEBUG(D_SEC, - "max pages %lu, grows %u, grow fails %u, shrinks %u, " - "access %lu, missing %lu, max qlen %u, max wait " - CFS_TIME_T"/%d\n", - page_pools.epp_st_max_pages, page_pools.epp_st_grows, - page_pools.epp_st_grow_fails, - page_pools.epp_st_shrinks, page_pools.epp_st_access, - page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, - page_pools.epp_st_max_wait, CFS_HZ); - } -} - -#else /* !__KERNEL__ */ - -int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc) -{ - return 0; -} - -void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc) -{ + if (page_pools.epp_st_access > 0) { + CDEBUG(D_SEC, + "max pages %lu, grows %u, grow fails %u, shrinks %u, " + "access %lu, missing %lu, max qlen %u, max wait " + CFS_TIME_T"/%lu\n", + page_pools.epp_st_max_pages, page_pools.epp_st_grows, + page_pools.epp_st_grow_fails, + page_pools.epp_st_shrinks, page_pools.epp_st_access, + page_pools.epp_st_missings, page_pools.epp_st_max_wqlen, + page_pools.epp_st_max_wait, + msecs_to_jiffies(MSEC_PER_SEC)); + } } -int sptlrpc_enc_pool_init(void) -{ - return 0; -} - -void sptlrpc_enc_pool_fini(void) -{ -} -#endif static int cfs_hash_alg_id[] = { [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL, @@ -817,13 +827,11 @@ const char * sptlrpc_get_hash_name(__u8 hash_alg) { return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]); } -EXPORT_SYMBOL(sptlrpc_get_hash_name); __u8 sptlrpc_get_hash_alg(const char *algname) { return cfs_crypto_hash_alg(algname); } -EXPORT_SYMBOL(sptlrpc_get_hash_alg); int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) { @@ -863,12 +871,17 @@ int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed) } EXPORT_SYMBOL(bulk_sec_desc_unpack); +/* + * Compute the checksum of an RPC buffer payload. If the return \a buflen + * is not large enough, truncate the result to fit so that it is possible + * to use a hash function with a large hash space, but only use a part of + * the resulting hash. + */ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, void *buf, int buflen) { struct cfs_crypto_hash_desc *hdesc; int hashsize; - char hashbuf[64]; unsigned int bufsize; int i, err; @@ -885,30 +898,23 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]); for (i = 0; i < desc->bd_iov_count; i++) { -#ifdef __KERNEL__ cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page, - desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK, + desc->bd_iov[i].kiov_offset & ~PAGE_MASK, desc->bd_iov[i].kiov_len); -#else - cfs_crypto_hash_update(hdesc, desc->bd_iov[i].iov_base, - desc->bd_iov[i].iov_len); -#endif } + if (hashsize > buflen) { + unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX]; + bufsize = sizeof(hashbuf); - err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf, - &bufsize); + LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n", + bufsize, hashsize); + err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize); memcpy(buf, hashbuf, buflen); } else { bufsize = buflen; - err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf, - &bufsize); + err = cfs_crypto_hash_final(hdesc, buf, &bufsize); } - if (err) - cfs_crypto_hash_final(hdesc, NULL, NULL); return err; } -EXPORT_SYMBOL(sptlrpc_get_bulk_checksum); - -