4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_bulk.c
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
43 #include <libcfs/libcfs.h>
46 #include <obd_cksum.h>
47 #include <obd_class.h>
48 #include <obd_support.h>
49 #include <lustre_net.h>
50 #include <lustre_import.h>
51 #include <lustre_dlm.h>
52 #include <lustre_sec.h>
54 #include "ptlrpc_internal.h"
56 static int mult = 20 - PAGE_CACHE_SHIFT;
57 static int enc_pool_max_memory_mb;
58 module_param(enc_pool_max_memory_mb, int, 0644);
59 MODULE_PARM_DESC(enc_pool_max_memory_mb,
60 "Encoding pool max memory (MB), 1/8 of total physical memory by default");
62 /****************************************
63 * bulk encryption page pools *
64 ****************************************/
67 #define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
68 #define PAGES_PER_POOL (PTRS_PER_PAGE)
70 #define IDLE_IDX_MAX (100)
71 #define IDLE_IDX_WEIGHT (3)
73 #define CACHE_QUIESCENT_PERIOD (20)
75 static struct ptlrpc_enc_page_pool {
79 unsigned long epp_max_pages; /* maximum pages can hold, const */
80 unsigned int epp_max_pools; /* number of pools, const */
83 * wait queue in case of not enough free pages.
85 wait_queue_head_t epp_waitq; /* waiting threads */
86 unsigned int epp_waitqlen; /* wait queue length */
87 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
88 unsigned int epp_growing:1; /* during adding pages */
91 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
92 * this is counted based on each time when getting pages from
93 * the pools, not based on time. which means in case that system
94 * is idled for a while but the idle_idx might still be low if no
95 * activities happened in the pools.
97 unsigned long epp_idle_idx;
99 /* last shrink time due to mem tight */
100 long epp_last_shrink;
101 long epp_last_access;
104 * in-pool pages bookkeeping
106 spinlock_t epp_lock; /* protect following fields */
107 unsigned long epp_total_pages; /* total pages in pools */
108 unsigned long epp_free_pages; /* current pages available */
113 unsigned long epp_st_max_pages; /* # of pages ever reached */
114 unsigned int epp_st_grows; /* # of grows */
115 unsigned int epp_st_grow_fails; /* # of add pages failures */
116 unsigned int epp_st_shrinks; /* # of shrinks */
117 unsigned long epp_st_access; /* # of access */
118 unsigned long epp_st_missings; /* # of cache missing */
119 unsigned long epp_st_lowfree; /* lowest free pages reached */
120 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
121 cfs_time_t epp_st_max_wait; /* in jeffies */
122 unsigned long epp_st_outofmem; /* # of out of mem requests */
124 * pointers to pools, may be vmalloc'd
126 struct page ***epp_pools;
132 static const int pools_shrinker_seeks = DEFAULT_SEEKS;
133 static struct shrinker *pools_shrinker;
137 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
139 int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
141 spin_lock(&page_pools.epp_lock);
143 seq_printf(m, "physical pages: %lu\n"
144 "pages per pool: %lu\n"
149 "idle index: %lu/100\n"
150 "last shrink: %lds\n"
151 "last access: %lds\n"
152 "max pages reached: %lu\n"
154 "grows failure: %u\n"
156 "cache access: %lu\n"
157 "cache missing: %lu\n"
158 "low free mark: %lu\n"
159 "max waitqueue depth: %u\n"
160 "max wait time: "CFS_TIME_T"/%lu\n"
162 totalram_pages, PAGES_PER_POOL,
163 page_pools.epp_max_pages,
164 page_pools.epp_max_pools,
165 page_pools.epp_total_pages,
166 page_pools.epp_free_pages,
167 page_pools.epp_idle_idx,
168 cfs_time_current_sec() - page_pools.epp_last_shrink,
169 cfs_time_current_sec() - page_pools.epp_last_access,
170 page_pools.epp_st_max_pages,
171 page_pools.epp_st_grows,
172 page_pools.epp_st_grow_fails,
173 page_pools.epp_st_shrinks,
174 page_pools.epp_st_access,
175 page_pools.epp_st_missings,
176 page_pools.epp_st_lowfree,
177 page_pools.epp_st_max_wqlen,
178 page_pools.epp_st_max_wait,
179 msecs_to_jiffies(MSEC_PER_SEC),
180 page_pools.epp_st_outofmem);
182 spin_unlock(&page_pools.epp_lock);
186 static void enc_pools_release_free_pages(long npages)
189 int p_idx_max1, p_idx_max2;
192 LASSERT(npages <= page_pools.epp_free_pages);
193 LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
195 /* max pool index before the release */
196 p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
198 page_pools.epp_free_pages -= npages;
199 page_pools.epp_total_pages -= npages;
201 /* max pool index after the release */
202 p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
203 ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
205 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
206 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
207 LASSERT(page_pools.epp_pools[p_idx]);
210 LASSERT(page_pools.epp_pools[p_idx]);
211 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
213 __free_page(page_pools.epp_pools[p_idx][g_idx]);
214 page_pools.epp_pools[p_idx][g_idx] = NULL;
216 if (++g_idx == PAGES_PER_POOL) {
222 /* free unused pools */
223 while (p_idx_max1 < p_idx_max2) {
224 LASSERT(page_pools.epp_pools[p_idx_max2]);
225 OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
226 page_pools.epp_pools[p_idx_max2] = NULL;
232 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
234 static unsigned long enc_pools_shrink_count(struct shrinker *s,
235 struct shrink_control *sc)
238 * if no pool access for a long time, we consider it's fully idle.
239 * a little race here is fine.
241 if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
242 CACHE_QUIESCENT_PERIOD)) {
243 spin_lock(&page_pools.epp_lock);
244 page_pools.epp_idle_idx = IDLE_IDX_MAX;
245 spin_unlock(&page_pools.epp_lock);
248 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
249 return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
250 (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
254 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
256 static unsigned long enc_pools_shrink_scan(struct shrinker *s,
257 struct shrink_control *sc)
259 spin_lock(&page_pools.epp_lock);
260 sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
261 page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
262 if (sc->nr_to_scan > 0) {
263 enc_pools_release_free_pages(sc->nr_to_scan);
264 CDEBUG(D_SEC, "released %ld pages, %ld left\n",
265 (long)sc->nr_to_scan, page_pools.epp_free_pages);
267 page_pools.epp_st_shrinks++;
268 page_pools.epp_last_shrink = cfs_time_current_sec();
270 spin_unlock(&page_pools.epp_lock);
273 * if no pool access for a long time, we consider it's fully idle.
274 * a little race here is fine.
276 if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
277 CACHE_QUIESCENT_PERIOD)) {
278 spin_lock(&page_pools.epp_lock);
279 page_pools.epp_idle_idx = IDLE_IDX_MAX;
280 spin_unlock(&page_pools.epp_lock);
283 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
284 return sc->nr_to_scan;
287 #ifndef HAVE_SHRINKER_COUNT
289 * could be called frequently for query (@nr_to_scan == 0).
290 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
292 static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
294 struct shrink_control scv = {
295 .nr_to_scan = shrink_param(sc, nr_to_scan),
296 .gfp_mask = shrink_param(sc, gfp_mask)
298 #if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
299 struct shrinker* shrinker = NULL;
302 enc_pools_shrink_scan(shrinker, &scv);
304 return enc_pools_shrink_count(shrinker, &scv);
307 #endif /* HAVE_SHRINKER_COUNT */
310 int npages_to_npools(unsigned long npages)
312 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
316 * return how many pages cleaned up.
318 static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
320 unsigned long cleaned = 0;
323 for (i = 0; i < npools; i++) {
325 for (j = 0; j < PAGES_PER_POOL; j++) {
327 __free_page(pools[i][j]);
331 OBD_FREE(pools[i], PAGE_CACHE_SIZE);
340 * merge @npools pointed by @pools which contains @npages new pages
341 * into current pools.
343 * we have options to avoid most memory copy with some tricks. but we choose
344 * the simplest way to avoid complexity. It's not frequently called.
346 static void enc_pools_insert(struct page ***pools, int npools, int npages)
349 int op_idx, np_idx, og_idx, ng_idx;
350 int cur_npools, end_npools;
353 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
354 LASSERT(npages_to_npools(npages) == npools);
355 LASSERT(page_pools.epp_growing);
357 spin_lock(&page_pools.epp_lock);
360 * (1) fill all the free slots of current pools.
362 /* free slots are those left by rent pages, and the extra ones with
363 * index >= total_pages, locate at the tail of last pool. */
364 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
366 freeslot = PAGES_PER_POOL - freeslot;
367 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
369 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
370 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
372 ng_idx = (npages - 1) % PAGES_PER_POOL;
375 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
376 LASSERT(pools[np_idx][ng_idx] != NULL);
378 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
379 pools[np_idx][ng_idx] = NULL;
383 if (++og_idx == PAGES_PER_POOL) {
391 ng_idx = PAGES_PER_POOL - 1;
396 * (2) add pools if needed.
398 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
400 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
402 LASSERT(end_npools <= page_pools.epp_max_pools);
405 while (cur_npools < end_npools) {
406 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
407 LASSERT(np_idx < npools);
408 LASSERT(pools[np_idx] != NULL);
410 page_pools.epp_pools[cur_npools++] = pools[np_idx];
411 pools[np_idx++] = NULL;
414 page_pools.epp_total_pages += npages;
415 page_pools.epp_free_pages += npages;
416 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
418 if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
419 page_pools.epp_st_max_pages = page_pools.epp_total_pages;
421 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
422 page_pools.epp_total_pages);
424 spin_unlock(&page_pools.epp_lock);
427 static int enc_pools_add_pages(int npages)
429 static DEFINE_MUTEX(add_pages_mutex);
430 struct page ***pools;
431 int npools, alloced = 0;
432 int i, j, rc = -ENOMEM;
434 if (npages < PTLRPC_MAX_BRW_PAGES)
435 npages = PTLRPC_MAX_BRW_PAGES;
437 mutex_lock(&add_pages_mutex);
439 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
440 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
443 page_pools.epp_st_grows++;
445 npools = npages_to_npools(npages);
446 OBD_ALLOC(pools, npools * sizeof(*pools));
450 for (i = 0; i < npools; i++) {
451 OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
452 if (pools[i] == NULL)
455 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
456 pools[i][j] = alloc_page(GFP_NOFS |
458 if (pools[i][j] == NULL)
464 LASSERT(alloced == npages);
466 enc_pools_insert(pools, npools, npages);
467 CDEBUG(D_SEC, "added %d pages into pools\n", npages);
471 enc_pools_cleanup(pools, npools);
472 OBD_FREE(pools, npools * sizeof(*pools));
475 page_pools.epp_st_grow_fails++;
476 CERROR("Failed to allocate %d enc pages\n", npages);
479 mutex_unlock(&add_pages_mutex);
483 static inline void enc_pools_wakeup(void)
485 assert_spin_locked(&page_pools.epp_lock);
487 if (unlikely(page_pools.epp_waitqlen)) {
488 LASSERT(waitqueue_active(&page_pools.epp_waitq));
489 wake_up_all(&page_pools.epp_waitq);
493 static int enc_pools_should_grow(int page_needed, long now)
495 /* don't grow if someone else is growing the pools right now,
496 * or the pools has reached its full capacity
498 if (page_pools.epp_growing ||
499 page_pools.epp_total_pages == page_pools.epp_max_pages)
502 /* if total pages is not enough, we need to grow */
503 if (page_pools.epp_total_pages < page_needed)
507 * we wanted to return 0 here if there was a shrink just
508 * happened a moment ago, but this may cause deadlock if both
509 * client and ost live on single node.
513 * here we perhaps need consider other factors like wait queue
514 * length, idle index, etc. ?
517 /* grow the pools in any other cases */
522 * Export the number of free pages in the pool
524 int get_free_pages_in_pool(void)
526 return page_pools.epp_free_pages;
528 EXPORT_SYMBOL(get_free_pages_in_pool);
531 * Let outside world know if enc_pool full capacity is reached
533 int pool_is_at_full_capacity(void)
535 return (page_pools.epp_total_pages == page_pools.epp_max_pages);
537 EXPORT_SYMBOL(pool_is_at_full_capacity);
540 * we allocate the requested pages atomically.
542 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
544 wait_queue_t waitlink;
545 unsigned long this_idle = -1;
551 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
552 LASSERT(desc->bd_iov_count > 0);
553 LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
555 /* resent bulk, enc iov might have been allocated previously */
556 if (GET_ENC_KIOV(desc) != NULL)
559 OBD_ALLOC_LARGE(GET_ENC_KIOV(desc),
560 desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
561 if (GET_ENC_KIOV(desc) == NULL)
564 spin_lock(&page_pools.epp_lock);
566 page_pools.epp_st_access++;
568 if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
570 tick = cfs_time_current();
572 now = cfs_time_current_sec();
574 page_pools.epp_st_missings++;
575 page_pools.epp_pages_short += desc->bd_iov_count;
577 if (enc_pools_should_grow(desc->bd_iov_count, now)) {
578 page_pools.epp_growing = 1;
580 spin_unlock(&page_pools.epp_lock);
581 enc_pools_add_pages(page_pools.epp_pages_short / 2);
582 spin_lock(&page_pools.epp_lock);
584 page_pools.epp_growing = 0;
588 if (page_pools.epp_growing) {
589 if (++page_pools.epp_waitqlen >
590 page_pools.epp_st_max_wqlen)
591 page_pools.epp_st_max_wqlen =
592 page_pools.epp_waitqlen;
594 set_current_state(TASK_UNINTERRUPTIBLE);
595 init_waitqueue_entry(&waitlink, current);
596 add_wait_queue(&page_pools.epp_waitq,
599 spin_unlock(&page_pools.epp_lock);
601 remove_wait_queue(&page_pools.epp_waitq,
603 LASSERT(page_pools.epp_waitqlen > 0);
604 spin_lock(&page_pools.epp_lock);
605 page_pools.epp_waitqlen--;
607 /* ptlrpcd thread should not sleep in that case,
608 * or deadlock may occur!
609 * Instead, return -ENOMEM so that upper layers
610 * will put request back in queue. */
611 page_pools.epp_st_outofmem++;
612 spin_unlock(&page_pools.epp_lock);
613 OBD_FREE_LARGE(GET_ENC_KIOV(desc),
615 sizeof(*GET_ENC_KIOV(desc)));
616 GET_ENC_KIOV(desc) = NULL;
621 LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
622 page_pools.epp_pages_short -= desc->bd_iov_count;
628 /* record max wait time */
629 if (unlikely(tick != 0)) {
630 tick = cfs_time_current() - tick;
631 if (tick > page_pools.epp_st_max_wait)
632 page_pools.epp_st_max_wait = tick;
635 /* proceed with rest of allocation */
636 page_pools.epp_free_pages -= desc->bd_iov_count;
638 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
639 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
641 for (i = 0; i < desc->bd_iov_count; i++) {
642 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
643 BD_GET_ENC_KIOV(desc, i).kiov_page =
644 page_pools.epp_pools[p_idx][g_idx];
645 page_pools.epp_pools[p_idx][g_idx] = NULL;
647 if (++g_idx == PAGES_PER_POOL) {
653 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
654 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
657 * new idle index = (old * weight + new) / (weight + 1)
659 if (this_idle == -1) {
660 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
661 page_pools.epp_total_pages;
663 page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
665 (IDLE_IDX_WEIGHT + 1);
667 page_pools.epp_last_access = cfs_time_current_sec();
669 spin_unlock(&page_pools.epp_lock);
672 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
674 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
679 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
681 if (GET_ENC_KIOV(desc) == NULL)
684 LASSERT(desc->bd_iov_count > 0);
686 spin_lock(&page_pools.epp_lock);
688 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
689 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
691 LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
692 page_pools.epp_total_pages);
693 LASSERT(page_pools.epp_pools[p_idx]);
695 for (i = 0; i < desc->bd_iov_count; i++) {
696 LASSERT(BD_GET_ENC_KIOV(desc, i).kiov_page != NULL);
697 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
698 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
700 page_pools.epp_pools[p_idx][g_idx] =
701 BD_GET_ENC_KIOV(desc, i).kiov_page;
703 if (++g_idx == PAGES_PER_POOL) {
709 page_pools.epp_free_pages += desc->bd_iov_count;
713 spin_unlock(&page_pools.epp_lock);
715 OBD_FREE_LARGE(GET_ENC_KIOV(desc),
716 desc->bd_iov_count * sizeof(*GET_ENC_KIOV(desc)));
717 GET_ENC_KIOV(desc) = NULL;
721 * we don't do much stuff for add_user/del_user anymore, except adding some
722 * initial pages in add_user() if current pools are empty, rest would be
723 * handled by the pools's self-adaption.
725 int sptlrpc_enc_pool_add_user(void)
729 spin_lock(&page_pools.epp_lock);
730 if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
731 page_pools.epp_growing = 1;
734 spin_unlock(&page_pools.epp_lock);
737 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
738 PTLRPC_MAX_BRW_PAGES);
740 spin_lock(&page_pools.epp_lock);
741 page_pools.epp_growing = 0;
743 spin_unlock(&page_pools.epp_lock);
747 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
749 int sptlrpc_enc_pool_del_user(void)
753 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
755 static inline void enc_pools_alloc(void)
757 LASSERT(page_pools.epp_max_pools);
758 OBD_ALLOC_LARGE(page_pools.epp_pools,
759 page_pools.epp_max_pools *
760 sizeof(*page_pools.epp_pools));
763 static inline void enc_pools_free(void)
765 LASSERT(page_pools.epp_max_pools);
766 LASSERT(page_pools.epp_pools);
768 OBD_FREE_LARGE(page_pools.epp_pools,
769 page_pools.epp_max_pools *
770 sizeof(*page_pools.epp_pools));
773 int sptlrpc_enc_pool_init(void)
775 DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
776 enc_pools_shrink_count, enc_pools_shrink_scan);
778 page_pools.epp_max_pages = totalram_pages / 8;
779 if (enc_pool_max_memory_mb > 0 &&
780 enc_pool_max_memory_mb <= (totalram_pages >> mult))
781 page_pools.epp_max_pages = enc_pool_max_memory_mb << mult;
783 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
785 init_waitqueue_head(&page_pools.epp_waitq);
786 page_pools.epp_waitqlen = 0;
787 page_pools.epp_pages_short = 0;
789 page_pools.epp_growing = 0;
791 page_pools.epp_idle_idx = 0;
792 page_pools.epp_last_shrink = cfs_time_current_sec();
793 page_pools.epp_last_access = cfs_time_current_sec();
795 spin_lock_init(&page_pools.epp_lock);
796 page_pools.epp_total_pages = 0;
797 page_pools.epp_free_pages = 0;
799 page_pools.epp_st_max_pages = 0;
800 page_pools.epp_st_grows = 0;
801 page_pools.epp_st_grow_fails = 0;
802 page_pools.epp_st_shrinks = 0;
803 page_pools.epp_st_access = 0;
804 page_pools.epp_st_missings = 0;
805 page_pools.epp_st_lowfree = 0;
806 page_pools.epp_st_max_wqlen = 0;
807 page_pools.epp_st_max_wait = 0;
808 page_pools.epp_st_outofmem = 0;
811 if (page_pools.epp_pools == NULL)
814 pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
815 if (pools_shrinker == NULL) {
823 void sptlrpc_enc_pool_fini(void)
825 unsigned long cleaned, npools;
827 LASSERT(pools_shrinker);
828 LASSERT(page_pools.epp_pools);
829 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
831 remove_shrinker(pools_shrinker);
833 npools = npages_to_npools(page_pools.epp_total_pages);
834 cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
835 LASSERT(cleaned == page_pools.epp_total_pages);
839 if (page_pools.epp_st_access > 0) {
841 "max pages %lu, grows %u, grow fails %u, shrinks %u, "
842 "access %lu, missing %lu, max qlen %u, max wait "
843 CFS_TIME_T"/%lu, out of mem %lu\n",
844 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
845 page_pools.epp_st_grow_fails,
846 page_pools.epp_st_shrinks, page_pools.epp_st_access,
847 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
848 page_pools.epp_st_max_wait,
849 msecs_to_jiffies(MSEC_PER_SEC),
850 page_pools.epp_st_outofmem);
855 static int cfs_hash_alg_id[] = {
856 [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
857 [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
858 [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
859 [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
860 [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
861 [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
862 [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
863 [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
865 const char * sptlrpc_get_hash_name(__u8 hash_alg)
867 return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
870 __u8 sptlrpc_get_hash_alg(const char *algname)
872 return cfs_crypto_hash_alg(algname);
875 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
877 struct ptlrpc_bulk_sec_desc *bsd;
878 int size = msg->lm_buflens[offset];
880 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
882 CERROR("Invalid bulk sec desc: size %d\n", size);
887 __swab32s(&bsd->bsd_nob);
890 if (unlikely(bsd->bsd_version != 0)) {
891 CERROR("Unexpected version %u\n", bsd->bsd_version);
895 if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
896 CERROR("Invalid type %u\n", bsd->bsd_type);
900 /* FIXME more sanity check here */
902 if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
903 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
904 bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
905 CERROR("Invalid svc %u\n", bsd->bsd_svc);
911 EXPORT_SYMBOL(bulk_sec_desc_unpack);
914 * Compute the checksum of an RPC buffer payload. If the return \a buflen
915 * is not large enough, truncate the result to fit so that it is possible
916 * to use a hash function with a large hash space, but only use a part of
917 * the resulting hash.
919 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
920 void *buf, int buflen)
922 struct cfs_crypto_hash_desc *hdesc;
924 unsigned int bufsize;
927 LASSERT(ptlrpc_is_bulk_desc_kiov(desc->bd_type));
928 LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
929 LASSERT(buflen >= 4);
931 hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
933 CERROR("Unable to initialize checksum hash %s\n",
934 cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
935 return PTR_ERR(hdesc);
938 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
940 for (i = 0; i < desc->bd_iov_count; i++) {
941 cfs_crypto_hash_update_page(hdesc,
942 BD_GET_KIOV(desc, i).kiov_page,
943 BD_GET_KIOV(desc, i).kiov_offset &
945 BD_GET_KIOV(desc, i).kiov_len);
948 if (hashsize > buflen) {
949 unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
951 bufsize = sizeof(hashbuf);
952 LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
954 err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
955 memcpy(buf, hashbuf, buflen);
958 err = cfs_crypto_hash_final(hdesc, buf, &bufsize);