4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_bulk.c
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
43 #include <libcfs/libcfs.h>
45 #include <liblustre.h>
46 #include <libcfs/list.h>
48 #include <linux/crypto.h>
52 #include <obd_cksum.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_net.h>
56 #include <lustre_import.h>
57 #include <lustre_dlm.h>
58 #include <lustre_sec.h>
60 #include "ptlrpc_internal.h"
62 /****************************************
63 * bulk encryption page pools *
64 ****************************************/
68 #define PTRS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *))
69 #define PAGES_PER_POOL (PTRS_PER_PAGE)
71 #define IDLE_IDX_MAX (100)
72 #define IDLE_IDX_WEIGHT (3)
74 #define CACHE_QUIESCENT_PERIOD (20)
76 static struct ptlrpc_enc_page_pool {
80 unsigned long epp_max_pages; /* maximum pages can hold, const */
81 unsigned int epp_max_pools; /* number of pools, const */
84 * wait queue in case of not enough free pages.
86 wait_queue_head_t epp_waitq; /* waiting threads */
87 unsigned int epp_waitqlen; /* wait queue length */
88 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
89 unsigned int epp_growing:1; /* during adding pages */
92 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
93 * this is counted based on each time when getting pages from
94 * the pools, not based on time. which means in case that system
95 * is idled for a while but the idle_idx might still be low if no
96 * activities happened in the pools.
98 unsigned long epp_idle_idx;
100 /* last shrink time due to mem tight */
101 long epp_last_shrink;
102 long epp_last_access;
105 * in-pool pages bookkeeping
107 spinlock_t epp_lock; /* protect following fields */
108 unsigned long epp_total_pages; /* total pages in pools */
109 unsigned long epp_free_pages; /* current pages available */
114 unsigned long epp_st_max_pages; /* # of pages ever reached */
115 unsigned int epp_st_grows; /* # of grows */
116 unsigned int epp_st_grow_fails; /* # of add pages failures */
117 unsigned int epp_st_shrinks; /* # of shrinks */
118 unsigned long epp_st_access; /* # of access */
119 unsigned long epp_st_missings; /* # of cache missing */
120 unsigned long epp_st_lowfree; /* lowest free pages reached */
121 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
122 cfs_time_t epp_st_max_wait; /* in jeffies */
126 struct page ***epp_pools;
132 const int pools_shrinker_seeks = DEFAULT_SEEKS;
133 static struct shrinker *pools_shrinker;
137 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
139 int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
143 spin_lock(&page_pools.epp_lock);
146 "physical pages: %lu\n"
147 "pages per pool: %lu\n"
152 "idle index: %lu/100\n"
153 "last shrink: %lds\n"
154 "last access: %lds\n"
155 "max pages reached: %lu\n"
157 "grows failure: %u\n"
159 "cache access: %lu\n"
160 "cache missing: %lu\n"
161 "low free mark: %lu\n"
162 "max waitqueue depth: %u\n"
163 "max wait time: "CFS_TIME_T"/%u\n"
167 page_pools.epp_max_pages,
168 page_pools.epp_max_pools,
169 page_pools.epp_total_pages,
170 page_pools.epp_free_pages,
171 page_pools.epp_idle_idx,
172 cfs_time_current_sec() - page_pools.epp_last_shrink,
173 cfs_time_current_sec() - page_pools.epp_last_access,
174 page_pools.epp_st_max_pages,
175 page_pools.epp_st_grows,
176 page_pools.epp_st_grow_fails,
177 page_pools.epp_st_shrinks,
178 page_pools.epp_st_access,
179 page_pools.epp_st_missings,
180 page_pools.epp_st_lowfree,
181 page_pools.epp_st_max_wqlen,
182 page_pools.epp_st_max_wait, HZ
185 spin_unlock(&page_pools.epp_lock);
189 static void enc_pools_release_free_pages(long npages)
192 int p_idx_max1, p_idx_max2;
195 LASSERT(npages <= page_pools.epp_free_pages);
196 LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
198 /* max pool index before the release */
199 p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
201 page_pools.epp_free_pages -= npages;
202 page_pools.epp_total_pages -= npages;
204 /* max pool index after the release */
205 p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
206 ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
208 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
209 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
210 LASSERT(page_pools.epp_pools[p_idx]);
213 LASSERT(page_pools.epp_pools[p_idx]);
214 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
216 __free_page(page_pools.epp_pools[p_idx][g_idx]);
217 page_pools.epp_pools[p_idx][g_idx] = NULL;
219 if (++g_idx == PAGES_PER_POOL) {
225 /* free unused pools */
226 while (p_idx_max1 < p_idx_max2) {
227 LASSERT(page_pools.epp_pools[p_idx_max2]);
228 OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
229 page_pools.epp_pools[p_idx_max2] = NULL;
235 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
237 static unsigned long enc_pools_shrink_count(struct shrinker *s,
238 struct shrink_control *sc)
241 * if no pool access for a long time, we consider it's fully idle.
242 * a little race here is fine.
244 if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
245 CACHE_QUIESCENT_PERIOD)) {
246 spin_lock(&page_pools.epp_lock);
247 page_pools.epp_idle_idx = IDLE_IDX_MAX;
248 spin_unlock(&page_pools.epp_lock);
251 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
252 return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
253 (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
257 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
259 static unsigned long enc_pools_shrink_scan(struct shrinker *s,
260 struct shrink_control *sc)
262 spin_lock(&page_pools.epp_lock);
263 sc->nr_to_scan = min_t(unsigned long, sc->nr_to_scan,
264 page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES);
265 if (sc->nr_to_scan > 0) {
266 enc_pools_release_free_pages(sc->nr_to_scan);
267 CDEBUG(D_SEC, "released %ld pages, %ld left\n",
268 (long)sc->nr_to_scan, page_pools.epp_free_pages);
270 page_pools.epp_st_shrinks++;
271 page_pools.epp_last_shrink = cfs_time_current_sec();
273 spin_unlock(&page_pools.epp_lock);
276 * if no pool access for a long time, we consider it's fully idle.
277 * a little race here is fine.
279 if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
280 CACHE_QUIESCENT_PERIOD)) {
281 spin_lock(&page_pools.epp_lock);
282 page_pools.epp_idle_idx = IDLE_IDX_MAX;
283 spin_unlock(&page_pools.epp_lock);
286 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
287 return sc->nr_to_scan;
290 #ifndef HAVE_SHRINKER_COUNT
292 * could be called frequently for query (@nr_to_scan == 0).
293 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
295 static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
297 struct shrink_control scv = {
298 .nr_to_scan = shrink_param(sc, nr_to_scan),
299 .gfp_mask = shrink_param(sc, gfp_mask)
301 #if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
302 struct shrinker* shrinker = NULL;
305 enc_pools_shrink_scan(shrinker, &scv);
307 return enc_pools_shrink_count(shrinker, &scv);
310 #endif /* HAVE_SHRINKER_COUNT */
313 int npages_to_npools(unsigned long npages)
315 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
319 * return how many pages cleaned up.
321 static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
323 unsigned long cleaned = 0;
326 for (i = 0; i < npools; i++) {
328 for (j = 0; j < PAGES_PER_POOL; j++) {
330 __free_page(pools[i][j]);
334 OBD_FREE(pools[i], PAGE_CACHE_SIZE);
343 * merge @npools pointed by @pools which contains @npages new pages
344 * into current pools.
346 * we have options to avoid most memory copy with some tricks. but we choose
347 * the simplest way to avoid complexity. It's not frequently called.
349 static void enc_pools_insert(struct page ***pools, int npools, int npages)
352 int op_idx, np_idx, og_idx, ng_idx;
353 int cur_npools, end_npools;
356 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
357 LASSERT(npages_to_npools(npages) == npools);
358 LASSERT(page_pools.epp_growing);
360 spin_lock(&page_pools.epp_lock);
363 * (1) fill all the free slots of current pools.
365 /* free slots are those left by rent pages, and the extra ones with
366 * index >= total_pages, locate at the tail of last pool. */
367 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
369 freeslot = PAGES_PER_POOL - freeslot;
370 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
372 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
373 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
375 ng_idx = (npages - 1) % PAGES_PER_POOL;
378 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
379 LASSERT(pools[np_idx][ng_idx] != NULL);
381 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
382 pools[np_idx][ng_idx] = NULL;
386 if (++og_idx == PAGES_PER_POOL) {
394 ng_idx = PAGES_PER_POOL - 1;
399 * (2) add pools if needed.
401 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
403 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
405 LASSERT(end_npools <= page_pools.epp_max_pools);
408 while (cur_npools < end_npools) {
409 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
410 LASSERT(np_idx < npools);
411 LASSERT(pools[np_idx] != NULL);
413 page_pools.epp_pools[cur_npools++] = pools[np_idx];
414 pools[np_idx++] = NULL;
417 page_pools.epp_total_pages += npages;
418 page_pools.epp_free_pages += npages;
419 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
421 if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
422 page_pools.epp_st_max_pages = page_pools.epp_total_pages;
424 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
425 page_pools.epp_total_pages);
427 spin_unlock(&page_pools.epp_lock);
430 static int enc_pools_add_pages(int npages)
432 static DEFINE_MUTEX(add_pages_mutex);
433 struct page ***pools;
434 int npools, alloced = 0;
435 int i, j, rc = -ENOMEM;
437 if (npages < PTLRPC_MAX_BRW_PAGES)
438 npages = PTLRPC_MAX_BRW_PAGES;
440 mutex_lock(&add_pages_mutex);
442 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
443 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
446 page_pools.epp_st_grows++;
448 npools = npages_to_npools(npages);
449 OBD_ALLOC(pools, npools * sizeof(*pools));
453 for (i = 0; i < npools; i++) {
454 OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
455 if (pools[i] == NULL)
458 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
459 pools[i][j] = alloc_page(GFP_NOFS |
461 if (pools[i][j] == NULL)
467 LASSERT(alloced == npages);
469 enc_pools_insert(pools, npools, npages);
470 CDEBUG(D_SEC, "added %d pages into pools\n", npages);
474 enc_pools_cleanup(pools, npools);
475 OBD_FREE(pools, npools * sizeof(*pools));
478 page_pools.epp_st_grow_fails++;
479 CERROR("Failed to allocate %d enc pages\n", npages);
482 mutex_unlock(&add_pages_mutex);
486 static inline void enc_pools_wakeup(void)
488 assert_spin_locked(&page_pools.epp_lock);
490 if (unlikely(page_pools.epp_waitqlen)) {
491 LASSERT(waitqueue_active(&page_pools.epp_waitq));
492 wake_up_all(&page_pools.epp_waitq);
496 static int enc_pools_should_grow(int page_needed, long now)
498 /* don't grow if someone else is growing the pools right now,
499 * or the pools has reached its full capacity
501 if (page_pools.epp_growing ||
502 page_pools.epp_total_pages == page_pools.epp_max_pages)
505 /* if total pages is not enough, we need to grow */
506 if (page_pools.epp_total_pages < page_needed)
510 * we wanted to return 0 here if there was a shrink just happened
511 * moment ago, but this may cause deadlock if both client and ost
512 * live on single node.
515 if (now - page_pools.epp_last_shrink < 2)
520 * here we perhaps need consider other factors like wait queue
521 * length, idle index, etc. ?
524 /* grow the pools in any other cases */
529 * we allocate the requested pages atomically.
531 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
533 wait_queue_t waitlink;
534 unsigned long this_idle = -1;
540 LASSERT(desc->bd_iov_count > 0);
541 LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
543 /* resent bulk, enc iov might have been allocated previously */
544 if (desc->bd_enc_iov != NULL)
547 OBD_ALLOC(desc->bd_enc_iov,
548 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
549 if (desc->bd_enc_iov == NULL)
552 spin_lock(&page_pools.epp_lock);
554 page_pools.epp_st_access++;
556 if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
558 tick = cfs_time_current();
560 now = cfs_time_current_sec();
562 page_pools.epp_st_missings++;
563 page_pools.epp_pages_short += desc->bd_iov_count;
565 if (enc_pools_should_grow(desc->bd_iov_count, now)) {
566 page_pools.epp_growing = 1;
568 spin_unlock(&page_pools.epp_lock);
569 enc_pools_add_pages(page_pools.epp_pages_short / 2);
570 spin_lock(&page_pools.epp_lock);
572 page_pools.epp_growing = 0;
576 if (++page_pools.epp_waitqlen >
577 page_pools.epp_st_max_wqlen)
578 page_pools.epp_st_max_wqlen =
579 page_pools.epp_waitqlen;
581 set_current_state(TASK_UNINTERRUPTIBLE);
582 init_waitqueue_entry_current(&waitlink);
583 add_wait_queue(&page_pools.epp_waitq, &waitlink);
585 spin_unlock(&page_pools.epp_lock);
586 waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
587 remove_wait_queue(&page_pools.epp_waitq, &waitlink);
588 LASSERT(page_pools.epp_waitqlen > 0);
589 spin_lock(&page_pools.epp_lock);
590 page_pools.epp_waitqlen--;
593 LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
594 page_pools.epp_pages_short -= desc->bd_iov_count;
600 /* record max wait time */
601 if (unlikely(tick != 0)) {
602 tick = cfs_time_current() - tick;
603 if (tick > page_pools.epp_st_max_wait)
604 page_pools.epp_st_max_wait = tick;
607 /* proceed with rest of allocation */
608 page_pools.epp_free_pages -= desc->bd_iov_count;
610 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
611 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
613 for (i = 0; i < desc->bd_iov_count; i++) {
614 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
615 desc->bd_enc_iov[i].kiov_page =
616 page_pools.epp_pools[p_idx][g_idx];
617 page_pools.epp_pools[p_idx][g_idx] = NULL;
619 if (++g_idx == PAGES_PER_POOL) {
625 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
626 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
629 * new idle index = (old * weight + new) / (weight + 1)
631 if (this_idle == -1) {
632 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
633 page_pools.epp_total_pages;
635 page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
637 (IDLE_IDX_WEIGHT + 1);
639 page_pools.epp_last_access = cfs_time_current_sec();
641 spin_unlock(&page_pools.epp_lock);
644 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
646 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
651 if (desc->bd_enc_iov == NULL)
654 LASSERT(desc->bd_iov_count > 0);
656 spin_lock(&page_pools.epp_lock);
658 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
659 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
661 LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
662 page_pools.epp_total_pages);
663 LASSERT(page_pools.epp_pools[p_idx]);
665 for (i = 0; i < desc->bd_iov_count; i++) {
666 LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
667 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
668 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
670 page_pools.epp_pools[p_idx][g_idx] =
671 desc->bd_enc_iov[i].kiov_page;
673 if (++g_idx == PAGES_PER_POOL) {
679 page_pools.epp_free_pages += desc->bd_iov_count;
683 spin_unlock(&page_pools.epp_lock);
685 OBD_FREE(desc->bd_enc_iov,
686 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
687 desc->bd_enc_iov = NULL;
689 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
692 * we don't do much stuff for add_user/del_user anymore, except adding some
693 * initial pages in add_user() if current pools are empty, rest would be
694 * handled by the pools's self-adaption.
696 int sptlrpc_enc_pool_add_user(void)
700 spin_lock(&page_pools.epp_lock);
701 if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
702 page_pools.epp_growing = 1;
705 spin_unlock(&page_pools.epp_lock);
708 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
709 PTLRPC_MAX_BRW_PAGES);
711 spin_lock(&page_pools.epp_lock);
712 page_pools.epp_growing = 0;
714 spin_unlock(&page_pools.epp_lock);
718 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
720 int sptlrpc_enc_pool_del_user(void)
724 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
726 static inline void enc_pools_alloc(void)
728 LASSERT(page_pools.epp_max_pools);
729 OBD_ALLOC_LARGE(page_pools.epp_pools,
730 page_pools.epp_max_pools *
731 sizeof(*page_pools.epp_pools));
734 static inline void enc_pools_free(void)
736 LASSERT(page_pools.epp_max_pools);
737 LASSERT(page_pools.epp_pools);
739 OBD_FREE_LARGE(page_pools.epp_pools,
740 page_pools.epp_max_pools *
741 sizeof(*page_pools.epp_pools));
744 int sptlrpc_enc_pool_init(void)
746 DEF_SHRINKER_VAR(shvar, enc_pools_shrink,
747 enc_pools_shrink_count, enc_pools_shrink_scan);
749 * maximum capacity is 1/8 of total physical memory.
750 * is the 1/8 a good number?
752 page_pools.epp_max_pages = totalram_pages / 8;
753 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
755 init_waitqueue_head(&page_pools.epp_waitq);
756 page_pools.epp_waitqlen = 0;
757 page_pools.epp_pages_short = 0;
759 page_pools.epp_growing = 0;
761 page_pools.epp_idle_idx = 0;
762 page_pools.epp_last_shrink = cfs_time_current_sec();
763 page_pools.epp_last_access = cfs_time_current_sec();
765 spin_lock_init(&page_pools.epp_lock);
766 page_pools.epp_total_pages = 0;
767 page_pools.epp_free_pages = 0;
769 page_pools.epp_st_max_pages = 0;
770 page_pools.epp_st_grows = 0;
771 page_pools.epp_st_grow_fails = 0;
772 page_pools.epp_st_shrinks = 0;
773 page_pools.epp_st_access = 0;
774 page_pools.epp_st_missings = 0;
775 page_pools.epp_st_lowfree = 0;
776 page_pools.epp_st_max_wqlen = 0;
777 page_pools.epp_st_max_wait = 0;
780 if (page_pools.epp_pools == NULL)
783 pools_shrinker = set_shrinker(pools_shrinker_seeks, &shvar);
784 if (pools_shrinker == NULL) {
792 void sptlrpc_enc_pool_fini(void)
794 unsigned long cleaned, npools;
796 LASSERT(pools_shrinker);
797 LASSERT(page_pools.epp_pools);
798 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
800 remove_shrinker(pools_shrinker);
802 npools = npages_to_npools(page_pools.epp_total_pages);
803 cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
804 LASSERT(cleaned == page_pools.epp_total_pages);
808 if (page_pools.epp_st_access > 0) {
810 "max pages %lu, grows %u, grow fails %u, shrinks %u, "
811 "access %lu, missing %lu, max qlen %u, max wait "
813 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
814 page_pools.epp_st_grow_fails,
815 page_pools.epp_st_shrinks, page_pools.epp_st_access,
816 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
817 page_pools.epp_st_max_wait, HZ);
821 #else /* !__KERNEL__ */
823 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
828 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
832 int sptlrpc_enc_pool_init(void)
837 void sptlrpc_enc_pool_fini(void)
842 static int cfs_hash_alg_id[] = {
843 [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
844 [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
845 [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
846 [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
847 [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
848 [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
849 [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
850 [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
852 const char * sptlrpc_get_hash_name(__u8 hash_alg)
854 return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
856 EXPORT_SYMBOL(sptlrpc_get_hash_name);
858 __u8 sptlrpc_get_hash_alg(const char *algname)
860 return cfs_crypto_hash_alg(algname);
862 EXPORT_SYMBOL(sptlrpc_get_hash_alg);
864 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
866 struct ptlrpc_bulk_sec_desc *bsd;
867 int size = msg->lm_buflens[offset];
869 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
871 CERROR("Invalid bulk sec desc: size %d\n", size);
876 __swab32s(&bsd->bsd_nob);
879 if (unlikely(bsd->bsd_version != 0)) {
880 CERROR("Unexpected version %u\n", bsd->bsd_version);
884 if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
885 CERROR("Invalid type %u\n", bsd->bsd_type);
889 /* FIXME more sanity check here */
891 if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
892 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
893 bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
894 CERROR("Invalid svc %u\n", bsd->bsd_svc);
900 EXPORT_SYMBOL(bulk_sec_desc_unpack);
902 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
903 void *buf, int buflen)
905 struct cfs_crypto_hash_desc *hdesc;
908 unsigned int bufsize;
911 LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
912 LASSERT(buflen >= 4);
914 hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
916 CERROR("Unable to initialize checksum hash %s\n",
917 cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
918 return PTR_ERR(hdesc);
921 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
923 for (i = 0; i < desc->bd_iov_count; i++) {
925 cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
926 desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
927 desc->bd_iov[i].kiov_len);
929 cfs_crypto_hash_update(hdesc, desc->bd_iov[i].iov_base,
930 desc->bd_iov[i].iov_len);
933 if (hashsize > buflen) {
934 bufsize = sizeof(hashbuf);
935 err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
937 memcpy(buf, hashbuf, buflen);
940 err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
945 cfs_crypto_hash_final(hdesc, NULL, NULL);
948 EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);