4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/ptlrpc/sec_bulk.c
33 * Author: Eric Mei <ericm@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_SEC
38 #include <libcfs/linux/linux-mem.h>
41 #include <obd_cksum.h>
42 #include <obd_class.h>
43 #include <obd_support.h>
44 #include <lustre_net.h>
45 #include <lustre_import.h>
46 #include <lustre_dlm.h>
47 #include <lustre_sec.h>
49 #include "ptlrpc_internal.h"
51 static int mult = 20 - PAGE_SHIFT;
52 static int enc_pool_max_memory_mb;
53 module_param(enc_pool_max_memory_mb, int, 0644);
54 MODULE_PARM_DESC(enc_pool_max_memory_mb,
55 "Encoding pool max memory (MB), 1/8 of total physical memory by default");
58 * bulk encryption page pools
61 #define PTRS_PER_PAGE (PAGE_SIZE / sizeof(void *))
62 #define PAGES_PER_POOL (PTRS_PER_PAGE)
64 #define IDLE_IDX_MAX (100)
65 #define IDLE_IDX_WEIGHT (3)
67 #define CACHE_QUIESCENT_PERIOD (20)
69 static struct ptlrpc_enc_page_pool {
70 unsigned long epp_max_pages; /* maximum pages can hold, const */
71 unsigned int epp_max_pools; /* number of pools, const */
74 * wait queue in case of not enough free pages.
76 wait_queue_head_t epp_waitq; /* waiting threads */
77 unsigned int epp_waitqlen; /* wait queue length */
78 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
79 unsigned int epp_growing:1; /* during adding pages */
82 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
83 * this is counted based on each time when getting pages from
84 * the pools, not based on time. which means in case that system
85 * is idled for a while but the idle_idx might still be low if no
86 * activities happened in the pools.
88 unsigned long epp_idle_idx;
90 /* last shrink time due to mem tight */
91 time64_t epp_last_shrink;
92 time64_t epp_last_access;
94 /* in-pool pages bookkeeping */
95 spinlock_t epp_lock; /* protect following fields */
96 unsigned long epp_total_pages; /* total pages in pools */
97 unsigned long epp_free_pages; /* current pages available */
100 unsigned long epp_st_max_pages; /* # of pages ever reached */
101 unsigned int epp_st_grows; /* # of grows */
102 unsigned int epp_st_grow_fails; /* # of add pages failures */
103 unsigned int epp_st_shrinks; /* # of shrinks */
104 unsigned long epp_st_access; /* # of access */
105 unsigned long epp_st_missings; /* # of cache missing */
106 unsigned long epp_st_lowfree; /* lowest free pages reached */
107 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
108 ktime_t epp_st_max_wait; /* in nanoseconds */
109 unsigned long epp_st_outofmem; /* # of out of mem requests */
111 * pointers to pools, may be vmalloc'd
113 struct page ***epp_pools;
117 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
119 int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
121 spin_lock(&page_pools.epp_lock);
123 seq_printf(m, "physical pages: %lu\n"
124 "pages per pool: %lu\n"
129 "idle index: %lu/100\n"
130 "last shrink: %llds\n"
131 "last access: %llds\n"
132 "max pages reached: %lu\n"
134 "grows failure: %u\n"
136 "cache access: %lu\n"
137 "cache missing: %lu\n"
138 "low free mark: %lu\n"
139 "max waitqueue depth: %u\n"
140 "max wait time ms: %lld\n"
142 cfs_totalram_pages(), PAGES_PER_POOL,
143 page_pools.epp_max_pages,
144 page_pools.epp_max_pools,
145 page_pools.epp_total_pages,
146 page_pools.epp_free_pages,
147 page_pools.epp_idle_idx,
148 ktime_get_seconds() - page_pools.epp_last_shrink,
149 ktime_get_seconds() - page_pools.epp_last_access,
150 page_pools.epp_st_max_pages,
151 page_pools.epp_st_grows,
152 page_pools.epp_st_grow_fails,
153 page_pools.epp_st_shrinks,
154 page_pools.epp_st_access,
155 page_pools.epp_st_missings,
156 page_pools.epp_st_lowfree,
157 page_pools.epp_st_max_wqlen,
158 ktime_to_ms(page_pools.epp_st_max_wait),
159 page_pools.epp_st_outofmem);
161 spin_unlock(&page_pools.epp_lock);
165 static void enc_pools_release_free_pages(long npages)
168 int p_idx_max1, p_idx_max2;
171 LASSERT(npages <= page_pools.epp_free_pages);
172 LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
174 /* max pool index before the release */
175 p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
177 page_pools.epp_free_pages -= npages;
178 page_pools.epp_total_pages -= npages;
180 /* max pool index after the release */
181 p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
182 ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
184 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
185 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
186 LASSERT(page_pools.epp_pools[p_idx]);
189 LASSERT(page_pools.epp_pools[p_idx]);
190 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
192 __free_page(page_pools.epp_pools[p_idx][g_idx]);
193 page_pools.epp_pools[p_idx][g_idx] = NULL;
195 if (++g_idx == PAGES_PER_POOL) {
201 /* free unused pools */
202 while (p_idx_max1 < p_idx_max2) {
203 LASSERT(page_pools.epp_pools[p_idx_max2]);
204 OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_SIZE);
205 page_pools.epp_pools[p_idx_max2] = NULL;
211 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
213 static unsigned long enc_pools_shrink_count(struct shrinker *s,
214 struct shrink_control *sc)
217 * if no pool access for a long time, we consider it's fully idle.
218 * a little race here is fine.
220 if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
221 CACHE_QUIESCENT_PERIOD)) {
222 spin_lock(&page_pools.epp_lock);
223 page_pools.epp_idle_idx = IDLE_IDX_MAX;
224 spin_unlock(&page_pools.epp_lock);
227 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
228 return (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) ? 0 :
229 (page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES) *
230 (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
234 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
236 static unsigned long enc_pools_shrink_scan(struct shrinker *s,
237 struct shrink_control *sc)
239 spin_lock(&page_pools.epp_lock);
240 if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES)
243 sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
244 page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
245 if (sc->nr_to_scan > 0) {
246 enc_pools_release_free_pages(sc->nr_to_scan);
247 CDEBUG(D_SEC, "released %ld pages, %ld left\n",
248 (long)sc->nr_to_scan, page_pools.epp_free_pages);
250 page_pools.epp_st_shrinks++;
251 page_pools.epp_last_shrink = ktime_get_seconds();
253 spin_unlock(&page_pools.epp_lock);
256 * if no pool access for a long time, we consider it's fully idle.
257 * a little race here is fine.
259 if (unlikely(ktime_get_seconds() - page_pools.epp_last_access >
260 CACHE_QUIESCENT_PERIOD)) {
261 spin_lock(&page_pools.epp_lock);
262 page_pools.epp_idle_idx = IDLE_IDX_MAX;
263 spin_unlock(&page_pools.epp_lock);
266 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
267 return sc->nr_to_scan;
270 #ifdef HAVE_SHRINKER_COUNT
271 static struct shrinker pools_shrinker = {
272 .count_objects = enc_pools_shrink_count,
273 .scan_objects = enc_pools_shrink_scan,
274 .seeks = DEFAULT_SEEKS,
278 * could be called frequently for query (@nr_to_scan == 0).
279 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
281 static int enc_pools_shrink(struct shrinker *shrinker,
282 struct shrink_control *sc)
284 enc_pools_shrink_scan(shrinker, sc);
286 return enc_pools_shrink_count(shrinker, sc);
289 static struct shrinker pools_shrinker = {
290 .shrink = enc_pools_shrink,
291 .seeks = DEFAULT_SEEKS,
293 #endif /* HAVE_SHRINKER_COUNT */
296 int npages_to_npools(unsigned long npages)
298 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
302 * return how many pages cleaned up.
304 static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
306 unsigned long cleaned = 0;
309 for (i = 0; i < npools; i++) {
311 for (j = 0; j < PAGES_PER_POOL; j++) {
313 __free_page(pools[i][j]);
317 OBD_FREE(pools[i], PAGE_SIZE);
326 * merge @npools pointed by @pools which contains @npages new pages
327 * into current pools.
329 * we have options to avoid most memory copy with some tricks. but we choose
330 * the simplest way to avoid complexity. It's not frequently called.
332 static void enc_pools_insert(struct page ***pools, int npools, int npages)
335 int op_idx, np_idx, og_idx, ng_idx;
336 int cur_npools, end_npools;
339 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
340 LASSERT(npages_to_npools(npages) == npools);
341 LASSERT(page_pools.epp_growing);
343 spin_lock(&page_pools.epp_lock);
346 * (1) fill all the free slots of current pools.
349 * free slots are those left by rent pages, and the extra ones with
350 * index >= total_pages, locate at the tail of last pool.
352 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
354 freeslot = PAGES_PER_POOL - freeslot;
355 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
357 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
358 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
360 ng_idx = (npages - 1) % PAGES_PER_POOL;
363 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
364 LASSERT(pools[np_idx][ng_idx] != NULL);
366 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
367 pools[np_idx][ng_idx] = NULL;
371 if (++og_idx == PAGES_PER_POOL) {
379 ng_idx = PAGES_PER_POOL - 1;
384 * (2) add pools if needed.
386 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
388 end_npools = (page_pools.epp_total_pages + npages +
389 PAGES_PER_POOL - 1) / PAGES_PER_POOL;
390 LASSERT(end_npools <= page_pools.epp_max_pools);
393 while (cur_npools < end_npools) {
394 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
395 LASSERT(np_idx < npools);
396 LASSERT(pools[np_idx] != NULL);
398 page_pools.epp_pools[cur_npools++] = pools[np_idx];
399 pools[np_idx++] = NULL;
402 page_pools.epp_total_pages += npages;
403 page_pools.epp_free_pages += npages;
404 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
406 if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
407 page_pools.epp_st_max_pages = page_pools.epp_total_pages;
409 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
410 page_pools.epp_total_pages);
412 spin_unlock(&page_pools.epp_lock);
415 static int enc_pools_add_pages(int npages)
417 static DEFINE_MUTEX(add_pages_mutex);
418 struct page ***pools;
419 int npools, alloced = 0;
420 int i, j, rc = -ENOMEM;
422 if (npages < PTLRPC_MAX_BRW_PAGES)
423 npages = PTLRPC_MAX_BRW_PAGES;
425 mutex_lock(&add_pages_mutex);
427 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
428 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
431 page_pools.epp_st_grows++;
433 npools = npages_to_npools(npages);
434 OBD_ALLOC_PTR_ARRAY(pools, npools);
438 for (i = 0; i < npools; i++) {
439 OBD_ALLOC(pools[i], PAGE_SIZE);
440 if (pools[i] == NULL)
443 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
444 pools[i][j] = alloc_page(GFP_NOFS |
446 if (pools[i][j] == NULL)
452 LASSERT(alloced == npages);
454 enc_pools_insert(pools, npools, npages);
455 CDEBUG(D_SEC, "added %d pages into pools\n", npages);
459 enc_pools_cleanup(pools, npools);
460 OBD_FREE_PTR_ARRAY(pools, npools);
463 page_pools.epp_st_grow_fails++;
464 CERROR("Failed to allocate %d enc pages\n", npages);
467 mutex_unlock(&add_pages_mutex);
471 static inline void enc_pools_wakeup(void)
473 assert_spin_locked(&page_pools.epp_lock);
475 if (unlikely(page_pools.epp_waitqlen)) {
476 LASSERT(waitqueue_active(&page_pools.epp_waitq));
477 wake_up(&page_pools.epp_waitq);
481 static int enc_pools_should_grow(int page_needed, time64_t now)
484 * don't grow if someone else is growing the pools right now,
485 * or the pools has reached its full capacity
487 if (page_pools.epp_growing ||
488 page_pools.epp_total_pages == page_pools.epp_max_pages)
491 /* if total pages is not enough, we need to grow */
492 if (page_pools.epp_total_pages < page_needed)
496 * we wanted to return 0 here if there was a shrink just
497 * happened a moment ago, but this may cause deadlock if both
498 * client and ost live on single node.
502 * here we perhaps need consider other factors like wait queue
503 * length, idle index, etc. ?
506 /* grow the pools in any other cases */
511 * Export the number of free pages in the pool
513 int get_free_pages_in_pool(void)
515 return page_pools.epp_free_pages;
517 EXPORT_SYMBOL(get_free_pages_in_pool);
520 * Let outside world know if enc_pool full capacity is reached
522 int pool_is_at_full_capacity(void)
524 return (page_pools.epp_total_pages == page_pools.epp_max_pages);
526 EXPORT_SYMBOL(pool_is_at_full_capacity);
529 * we allocate the requested pages atomically.
531 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
533 wait_queue_entry_t waitlink;
534 unsigned long this_idle = -1;
540 LASSERT(desc->bd_iov_count > 0);
541 LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
543 /* resent bulk, enc iov might have been allocated previously */
544 if (desc->bd_enc_vec != NULL)
547 OBD_ALLOC_LARGE(desc->bd_enc_vec,
548 desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
549 if (desc->bd_enc_vec == NULL)
552 spin_lock(&page_pools.epp_lock);
554 page_pools.epp_st_access++;
556 if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
558 tick_ns = ktime_get_ns();
560 now = ktime_get_real_seconds();
562 page_pools.epp_st_missings++;
563 page_pools.epp_pages_short += desc->bd_iov_count;
565 if (enc_pools_should_grow(desc->bd_iov_count, now)) {
566 page_pools.epp_growing = 1;
568 spin_unlock(&page_pools.epp_lock);
569 enc_pools_add_pages(page_pools.epp_pages_short / 2);
570 spin_lock(&page_pools.epp_lock);
572 page_pools.epp_growing = 0;
576 if (page_pools.epp_growing) {
577 if (++page_pools.epp_waitqlen >
578 page_pools.epp_st_max_wqlen)
579 page_pools.epp_st_max_wqlen =
580 page_pools.epp_waitqlen;
582 set_current_state(TASK_UNINTERRUPTIBLE);
583 init_wait(&waitlink);
584 add_wait_queue(&page_pools.epp_waitq,
587 spin_unlock(&page_pools.epp_lock);
589 remove_wait_queue(&page_pools.epp_waitq,
591 LASSERT(page_pools.epp_waitqlen > 0);
592 spin_lock(&page_pools.epp_lock);
593 page_pools.epp_waitqlen--;
596 * ptlrpcd thread should not sleep in that case,
597 * or deadlock may occur!
598 * Instead, return -ENOMEM so that upper layers
599 * will put request back in queue.
601 page_pools.epp_st_outofmem++;
602 spin_unlock(&page_pools.epp_lock);
603 OBD_FREE_LARGE(desc->bd_enc_vec,
605 sizeof(*desc->bd_enc_vec));
606 desc->bd_enc_vec = NULL;
611 LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
612 page_pools.epp_pages_short -= desc->bd_iov_count;
618 /* record max wait time */
619 if (unlikely(tick_ns)) {
620 ktime_t tick = ktime_sub_ns(ktime_get(), tick_ns);
622 if (ktime_after(tick, page_pools.epp_st_max_wait))
623 page_pools.epp_st_max_wait = tick;
626 /* proceed with rest of allocation */
627 page_pools.epp_free_pages -= desc->bd_iov_count;
629 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
630 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
632 for (i = 0; i < desc->bd_iov_count; i++) {
633 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
634 desc->bd_enc_vec[i].bv_page =
635 page_pools.epp_pools[p_idx][g_idx];
636 page_pools.epp_pools[p_idx][g_idx] = NULL;
638 if (++g_idx == PAGES_PER_POOL) {
644 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
645 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
648 * new idle index = (old * weight + new) / (weight + 1)
650 if (this_idle == -1) {
651 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
652 page_pools.epp_total_pages;
654 page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
656 (IDLE_IDX_WEIGHT + 1);
658 page_pools.epp_last_access = ktime_get_seconds();
660 spin_unlock(&page_pools.epp_lock);
663 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
665 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
670 if (desc->bd_enc_vec == NULL)
673 LASSERT(desc->bd_iov_count > 0);
675 spin_lock(&page_pools.epp_lock);
677 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
678 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
680 LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
681 page_pools.epp_total_pages);
682 LASSERT(page_pools.epp_pools[p_idx]);
684 for (i = 0; i < desc->bd_iov_count; i++) {
685 LASSERT(desc->bd_enc_vec[i].bv_page);
686 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
687 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
689 page_pools.epp_pools[p_idx][g_idx] =
690 desc->bd_enc_vec[i].bv_page;
692 if (++g_idx == PAGES_PER_POOL) {
698 page_pools.epp_free_pages += desc->bd_iov_count;
702 spin_unlock(&page_pools.epp_lock);
704 OBD_FREE_LARGE(desc->bd_enc_vec,
705 desc->bd_iov_count * sizeof(*desc->bd_enc_vec));
706 desc->bd_enc_vec = NULL;
710 * we don't do much stuff for add_user/del_user anymore, except adding some
711 * initial pages in add_user() if current pools are empty, rest would be
712 * handled by the pools's self-adaption.
714 int sptlrpc_enc_pool_add_user(void)
718 spin_lock(&page_pools.epp_lock);
719 if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
720 page_pools.epp_growing = 1;
723 spin_unlock(&page_pools.epp_lock);
726 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
727 PTLRPC_MAX_BRW_PAGES);
729 spin_lock(&page_pools.epp_lock);
730 page_pools.epp_growing = 0;
732 spin_unlock(&page_pools.epp_lock);
736 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
738 int sptlrpc_enc_pool_del_user(void)
742 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
744 static inline void enc_pools_alloc(void)
746 LASSERT(page_pools.epp_max_pools);
747 OBD_ALLOC_LARGE(page_pools.epp_pools,
748 page_pools.epp_max_pools *
749 sizeof(*page_pools.epp_pools));
752 static inline void enc_pools_free(void)
754 LASSERT(page_pools.epp_max_pools);
755 LASSERT(page_pools.epp_pools);
757 OBD_FREE_LARGE(page_pools.epp_pools,
758 page_pools.epp_max_pools *
759 sizeof(*page_pools.epp_pools));
762 int sptlrpc_enc_pool_init(void)
766 page_pools.epp_max_pages = cfs_totalram_pages() / 8;
767 if (enc_pool_max_memory_mb > 0 &&
768 enc_pool_max_memory_mb <= (cfs_totalram_pages() >> mult))
769 page_pools.epp_max_pages = enc_pool_max_memory_mb << mult;
771 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
773 init_waitqueue_head(&page_pools.epp_waitq);
774 page_pools.epp_waitqlen = 0;
775 page_pools.epp_pages_short = 0;
777 page_pools.epp_growing = 0;
779 page_pools.epp_idle_idx = 0;
780 page_pools.epp_last_shrink = ktime_get_seconds();
781 page_pools.epp_last_access = ktime_get_seconds();
783 spin_lock_init(&page_pools.epp_lock);
784 page_pools.epp_total_pages = 0;
785 page_pools.epp_free_pages = 0;
787 page_pools.epp_st_max_pages = 0;
788 page_pools.epp_st_grows = 0;
789 page_pools.epp_st_grow_fails = 0;
790 page_pools.epp_st_shrinks = 0;
791 page_pools.epp_st_access = 0;
792 page_pools.epp_st_missings = 0;
793 page_pools.epp_st_lowfree = 0;
794 page_pools.epp_st_max_wqlen = 0;
795 page_pools.epp_st_max_wait = ktime_set(0, 0);
796 page_pools.epp_st_outofmem = 0;
799 if (page_pools.epp_pools == NULL)
802 rc = register_shrinker(&pools_shrinker);
809 void sptlrpc_enc_pool_fini(void)
811 unsigned long cleaned, npools;
813 LASSERT(page_pools.epp_pools);
814 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
816 unregister_shrinker(&pools_shrinker);
818 npools = npages_to_npools(page_pools.epp_total_pages);
819 cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
820 LASSERT(cleaned == page_pools.epp_total_pages);
824 if (page_pools.epp_st_access > 0) {
826 "max pages %lu, grows %u, grow fails %u, shrinks %u, access %lu, missing %lu, max qlen %u, max wait ms %lld, out of mem %lu\n",
827 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
828 page_pools.epp_st_grow_fails,
829 page_pools.epp_st_shrinks, page_pools.epp_st_access,
830 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
831 ktime_to_ms(page_pools.epp_st_max_wait),
832 page_pools.epp_st_outofmem);
837 static int cfs_hash_alg_id[] = {
838 [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
839 [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
840 [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
841 [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
842 [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
843 [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
844 [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
845 [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
847 const char *sptlrpc_get_hash_name(__u8 hash_alg)
849 return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
852 __u8 sptlrpc_get_hash_alg(const char *algname)
854 return cfs_crypto_hash_alg(algname);
857 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
859 struct ptlrpc_bulk_sec_desc *bsd;
860 int size = msg->lm_buflens[offset];
862 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
864 CERROR("Invalid bulk sec desc: size %d\n", size);
869 __swab32s(&bsd->bsd_nob);
871 if (unlikely(bsd->bsd_version != 0)) {
872 CERROR("Unexpected version %u\n", bsd->bsd_version);
876 if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
877 CERROR("Invalid type %u\n", bsd->bsd_type);
881 /* FIXME more sanity check here */
883 if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
884 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
885 bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
886 CERROR("Invalid svc %u\n", bsd->bsd_svc);
892 EXPORT_SYMBOL(bulk_sec_desc_unpack);
895 * Compute the checksum of an RPC buffer payload. If the return \a buflen
896 * is not large enough, truncate the result to fit so that it is possible
897 * to use a hash function with a large hash space, but only use a part of
898 * the resulting hash.
900 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
901 void *buf, int buflen)
903 struct ahash_request *req;
905 unsigned int bufsize;
908 LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
909 LASSERT(buflen >= 4);
911 req = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
913 CERROR("Unable to initialize checksum hash %s\n",
914 cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
918 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
920 for (i = 0; i < desc->bd_iov_count; i++) {
921 cfs_crypto_hash_update_page(req,
922 desc->bd_vec[i].bv_page,
923 desc->bd_vec[i].bv_offset &
925 desc->bd_vec[i].bv_len);
928 if (hashsize > buflen) {
929 unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
931 bufsize = sizeof(hashbuf);
932 LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
934 err = cfs_crypto_hash_final(req, hashbuf, &bufsize);
935 memcpy(buf, hashbuf, buflen);
938 err = cfs_crypto_hash_final(req, buf, &bufsize);