1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ptlrpc/sec_bulk.c
38 * Author: Eric Mei <ericm@clusterfs.com>
44 #define DEBUG_SUBSYSTEM S_SEC
46 #include <libcfs/libcfs.h>
48 #include <liblustre.h>
49 #include <libcfs/list.h>
51 #include <linux/crypto.h>
55 #include <obd_cksum.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_net.h>
59 #include <lustre_import.h>
60 #include <lustre_dlm.h>
61 #include <lustre_sec.h>
63 #include "ptlrpc_internal.h"
65 /****************************************
66 * bulk encryption page pools *
67 ****************************************/
71 #define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *))
72 #define PAGES_PER_POOL (PTRS_PER_PAGE)
74 #define IDLE_IDX_MAX (100)
75 #define IDLE_IDX_WEIGHT (3)
77 #define CACHE_QUIESCENT_PERIOD (20)
79 static struct ptlrpc_enc_page_pool {
83 unsigned long epp_max_pages; /* maximum pages can hold, const */
84 unsigned int epp_max_pools; /* number of pools, const */
87 * wait queue in case of not enough free pages.
89 cfs_waitq_t epp_waitq; /* waiting threads */
90 unsigned int epp_waitqlen; /* wait queue length */
91 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
92 unsigned int epp_growing:1; /* during adding pages */
95 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
96 * this is counted based on each time when getting pages from
97 * the pools, not based on time. which means in case that system
98 * is idled for a while but the idle_idx might still be low if no
99 * activities happened in the pools.
101 unsigned long epp_idle_idx;
103 /* last shrink time due to mem tight */
104 long epp_last_shrink;
105 long epp_last_access;
108 * in-pool pages bookkeeping
110 spinlock_t epp_lock; /* protect following fields */
111 unsigned long epp_total_pages; /* total pages in pools */
112 unsigned long epp_free_pages; /* current pages available */
117 unsigned long epp_st_max_pages; /* # of pages ever reached */
118 unsigned int epp_st_grows; /* # of grows */
119 unsigned int epp_st_grow_fails; /* # of add pages failures */
120 unsigned int epp_st_shrinks; /* # of shrinks */
121 unsigned long epp_st_access; /* # of access */
122 unsigned long epp_st_missings; /* # of cache missing */
123 unsigned long epp_st_lowfree; /* lowest free pages reached */
124 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
125 cfs_time_t epp_st_max_wait; /* in jeffies */
129 cfs_page_t ***epp_pools;
135 const int pools_shrinker_seeks = DEFAULT_SEEKS;
136 static struct shrinker *pools_shrinker = NULL;
140 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
142 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
143 int *eof, void *data)
147 spin_lock(&page_pools.epp_lock);
149 rc = snprintf(page, count,
150 "physical pages: %lu\n"
151 "pages per pool: %lu\n"
156 "idle index: %lu/100\n"
157 "last shrink: %lds\n"
158 "last access: %lds\n"
159 "max pages reached: %lu\n"
161 "grows failure: %u\n"
163 "cache access: %lu\n"
164 "cache missing: %lu\n"
165 "low free mark: %lu\n"
166 "max waitqueue depth: %u\n"
167 "max wait time: "CFS_TIME_T"/%u\n"
171 page_pools.epp_max_pages,
172 page_pools.epp_max_pools,
173 page_pools.epp_total_pages,
174 page_pools.epp_free_pages,
175 page_pools.epp_idle_idx,
176 cfs_time_current_sec() - page_pools.epp_last_shrink,
177 cfs_time_current_sec() - page_pools.epp_last_access,
178 page_pools.epp_st_max_pages,
179 page_pools.epp_st_grows,
180 page_pools.epp_st_grow_fails,
181 page_pools.epp_st_shrinks,
182 page_pools.epp_st_access,
183 page_pools.epp_st_missings,
184 page_pools.epp_st_lowfree,
185 page_pools.epp_st_max_wqlen,
186 page_pools.epp_st_max_wait, HZ
189 spin_unlock(&page_pools.epp_lock);
193 static void enc_pools_release_free_pages(long npages)
196 int p_idx_max1, p_idx_max2;
199 LASSERT(npages <= page_pools.epp_free_pages);
200 LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
202 /* max pool index before the release */
203 p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
205 page_pools.epp_free_pages -= npages;
206 page_pools.epp_total_pages -= npages;
208 /* max pool index after the release */
209 p_idx_max1 = page_pools.epp_total_pages == 0 ? 0 :
210 ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
212 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
213 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
214 LASSERT(page_pools.epp_pools[p_idx]);
217 LASSERT(page_pools.epp_pools[p_idx]);
218 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
220 cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
221 page_pools.epp_pools[p_idx][g_idx] = NULL;
223 if (++g_idx == PAGES_PER_POOL) {
229 /* free unused pools */
230 while (p_idx_max1 < p_idx_max2) {
231 LASSERT(page_pools.epp_pools[p_idx_max2]);
232 OBD_FREE(page_pools.epp_pools[p_idx_max2], CFS_PAGE_SIZE);
233 page_pools.epp_pools[p_idx_max2] = NULL;
239 * could be called frequently for query (@nr_to_scan == 0)
241 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
245 spin_lock(&page_pools.epp_lock);
247 if (nr_to_scan > page_pools.epp_free_pages)
248 nr_to_scan = page_pools.epp_free_pages;
250 if (nr_to_scan > 0) {
251 enc_pools_release_free_pages(nr_to_scan);
252 CDEBUG(D_SEC, "released %d pages, %ld left\n",
253 nr_to_scan, page_pools.epp_free_pages);
255 page_pools.epp_st_shrinks++;
256 page_pools.epp_last_shrink = cfs_time_current_sec();
260 * try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool
262 if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) {
268 * if no pool access for a long time, we consider it's fully idle
270 if (cfs_time_current_sec() - page_pools.epp_last_access >
271 CACHE_QUIESCENT_PERIOD)
272 page_pools.epp_idle_idx = IDLE_IDX_MAX;
274 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
275 ret = (page_pools.epp_free_pages * page_pools.epp_idle_idx /
277 if (page_pools.epp_free_pages - ret < PTLRPC_MAX_BRW_PAGES)
278 ret = page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES;
281 spin_unlock(&page_pools.epp_lock);
286 int npages_to_npools(unsigned long npages)
288 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
292 * return how many pages cleaned up.
294 static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
296 unsigned long cleaned = 0;
299 for (i = 0; i < npools; i++) {
301 for (j = 0; j < PAGES_PER_POOL; j++) {
303 cfs_free_page(pools[i][j]);
307 OBD_FREE(pools[i], CFS_PAGE_SIZE);
316 * merge @npools pointed by @pools which contains @npages new pages
317 * into current pools.
319 * we have options to avoid most memory copy with some tricks. but we choose
320 * the simplest way to avoid complexity. It's not frequently called.
322 static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
325 int op_idx, np_idx, og_idx, ng_idx;
326 int cur_npools, end_npools;
329 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
330 LASSERT(npages_to_npools(npages) == npools);
332 spin_lock(&page_pools.epp_lock);
335 * (1) fill all the free slots of current pools.
337 /* free slots are those left by rent pages, and the extra ones with
338 * index >= total_pages, locate at the tail of last pool. */
339 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
341 freeslot = PAGES_PER_POOL - freeslot;
342 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
344 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
345 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
347 ng_idx = (npages - 1) % PAGES_PER_POOL;
350 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
351 LASSERT(pools[np_idx][ng_idx] != NULL);
353 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
354 pools[np_idx][ng_idx] = NULL;
358 if (++og_idx == PAGES_PER_POOL) {
366 ng_idx = PAGES_PER_POOL - 1;
371 * (2) add pools if needed.
373 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
375 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
377 LASSERT(end_npools <= page_pools.epp_max_pools);
380 while (cur_npools < end_npools) {
381 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
382 LASSERT(np_idx < npools);
383 LASSERT(pools[np_idx] != NULL);
385 page_pools.epp_pools[cur_npools++] = pools[np_idx];
386 pools[np_idx++] = NULL;
389 page_pools.epp_total_pages += npages;
390 page_pools.epp_free_pages += npages;
391 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
393 if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
394 page_pools.epp_st_max_pages = page_pools.epp_total_pages;
396 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
397 page_pools.epp_total_pages);
399 spin_unlock(&page_pools.epp_lock);
402 static int enc_pools_add_pages(int npages)
404 static DECLARE_MUTEX(sem_add_pages);
406 int npools, alloced = 0;
407 int i, j, rc = -ENOMEM;
409 if (npages < PTLRPC_MAX_BRW_PAGES)
410 npages = PTLRPC_MAX_BRW_PAGES;
412 down(&sem_add_pages);
414 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
415 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
418 page_pools.epp_st_grows++;
420 npools = npages_to_npools(npages);
421 OBD_ALLOC(pools, npools * sizeof(*pools));
425 for (i = 0; i < npools; i++) {
426 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
427 if (pools[i] == NULL)
430 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
431 pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
433 if (pools[i][j] == NULL)
440 enc_pools_insert(pools, npools, npages);
441 CDEBUG(D_SEC, "added %d pages into pools\n", npages);
445 enc_pools_cleanup(pools, npools);
446 OBD_FREE(pools, npools * sizeof(*pools));
449 page_pools.epp_st_grow_fails++;
450 CERROR("Failed to allocate %d enc pages\n", npages);
457 static inline void enc_pools_wakeup(void)
459 if (unlikely(page_pools.epp_waitqlen)) {
460 LASSERT(page_pools.epp_waitqlen > 0);
461 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
462 cfs_waitq_broadcast(&page_pools.epp_waitq);
466 static int enc_pools_should_grow(int page_needed, long now)
468 /* don't grow if someone else is growing the pools right now,
469 * or the pools has reached its full capacity
471 if (page_pools.epp_growing ||
472 page_pools.epp_total_pages == page_pools.epp_max_pages)
475 /* if total pages is not enough, we need to grow */
476 if (page_pools.epp_total_pages < page_needed)
479 /* if we just did a shrink due to memory tight, we'd better
480 * wait a while to grow again.
482 if (now - page_pools.epp_last_shrink < 2)
486 * here we perhaps need consider other factors like wait queue
487 * length, idle index, etc. ?
490 /* grow the pools in any other cases */
495 * we allocate the requested pages atomically.
497 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
499 cfs_waitlink_t waitlink;
500 unsigned long this_idle = -1;
506 LASSERT(desc->bd_max_iov > 0);
507 LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
509 /* resent bulk, enc pages might have been allocated previously */
510 if (desc->bd_enc_pages != NULL)
513 OBD_ALLOC(desc->bd_enc_pages,
514 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
515 if (desc->bd_enc_pages == NULL)
518 spin_lock(&page_pools.epp_lock);
520 page_pools.epp_st_access++;
522 if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
524 tick = cfs_time_current();
526 now = cfs_time_current_sec();
528 page_pools.epp_st_missings++;
529 page_pools.epp_pages_short += desc->bd_max_iov;
531 if (enc_pools_should_grow(desc->bd_max_iov, now)) {
532 page_pools.epp_growing = 1;
534 spin_unlock(&page_pools.epp_lock);
535 enc_pools_add_pages(page_pools.epp_pages_short / 2);
536 spin_lock(&page_pools.epp_lock);
538 page_pools.epp_growing = 0;
540 if (++page_pools.epp_waitqlen >
541 page_pools.epp_st_max_wqlen)
542 page_pools.epp_st_max_wqlen =
543 page_pools.epp_waitqlen;
545 set_current_state(CFS_TASK_UNINT);
546 cfs_waitlink_init(&waitlink);
547 cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
549 spin_unlock(&page_pools.epp_lock);
550 cfs_waitq_wait(&waitlink, CFS_TASK_UNINT);
551 cfs_waitq_del(&page_pools.epp_waitq, &waitlink);
552 spin_lock(&page_pools.epp_lock);
554 LASSERT(page_pools.epp_waitqlen > 0);
555 page_pools.epp_waitqlen--;
558 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
559 page_pools.epp_pages_short -= desc->bd_max_iov;
565 /* record max wait time */
566 if (unlikely(tick != 0)) {
567 tick = cfs_time_current() - tick;
568 if (tick > page_pools.epp_st_max_wait)
569 page_pools.epp_st_max_wait = tick;
572 /* proceed with rest of allocation */
573 page_pools.epp_free_pages -= desc->bd_max_iov;
575 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
576 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
578 for (i = 0; i < desc->bd_max_iov; i++) {
579 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
580 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
581 page_pools.epp_pools[p_idx][g_idx] = NULL;
583 if (++g_idx == PAGES_PER_POOL) {
589 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
590 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
593 * new idle index = (old * weight + new) / (weight + 1)
595 if (this_idle == -1) {
596 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
597 page_pools.epp_total_pages;
599 page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
601 (IDLE_IDX_WEIGHT + 1);
603 page_pools.epp_last_access = cfs_time_current_sec();
605 spin_unlock(&page_pools.epp_lock);
608 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
610 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
615 if (desc->bd_enc_pages == NULL)
617 if (desc->bd_max_iov == 0)
620 spin_lock(&page_pools.epp_lock);
622 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
623 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
625 LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
626 page_pools.epp_total_pages);
627 LASSERT(page_pools.epp_pools[p_idx]);
629 for (i = 0; i < desc->bd_max_iov; i++) {
630 LASSERT(desc->bd_enc_pages[i] != NULL);
631 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
632 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
634 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
636 if (++g_idx == PAGES_PER_POOL) {
642 page_pools.epp_free_pages += desc->bd_max_iov;
646 spin_unlock(&page_pools.epp_lock);
648 OBD_FREE(desc->bd_enc_pages,
649 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
650 desc->bd_enc_pages = NULL;
652 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
655 * we don't do much stuff for add_user/del_user anymore, except adding some
656 * initial pages in add_user() if current pools are empty, rest would be
657 * handled by the pools's self-adaption.
659 int sptlrpc_enc_pool_add_user(void)
663 spin_lock(&page_pools.epp_lock);
664 if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
665 page_pools.epp_growing = 1;
668 spin_unlock(&page_pools.epp_lock);
671 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
673 spin_lock(&page_pools.epp_lock);
674 page_pools.epp_growing = 0;
676 spin_unlock(&page_pools.epp_lock);
680 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
682 int sptlrpc_enc_pool_del_user(void)
686 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
688 static inline void enc_pools_alloc(void)
690 LASSERT(page_pools.epp_max_pools);
692 * on system with huge memory but small page size, this might lead to
693 * high-order allocation. but it's not common, and we suppose memory
694 * be not too much fragmented at module loading time.
696 OBD_ALLOC(page_pools.epp_pools,
697 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
700 static inline void enc_pools_free(void)
702 LASSERT(page_pools.epp_max_pools);
703 LASSERT(page_pools.epp_pools);
705 OBD_FREE(page_pools.epp_pools,
706 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
709 int sptlrpc_enc_pool_init(void)
712 * maximum capacity is 1/8 of total physical memory.
713 * is the 1/8 a good number?
715 page_pools.epp_max_pages = num_physpages / 8;
716 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
718 cfs_waitq_init(&page_pools.epp_waitq);
719 page_pools.epp_waitqlen = 0;
720 page_pools.epp_pages_short = 0;
722 page_pools.epp_growing = 0;
724 page_pools.epp_idle_idx = 0;
725 page_pools.epp_last_shrink = cfs_time_current_sec();
726 page_pools.epp_last_access = cfs_time_current_sec();
728 spin_lock_init(&page_pools.epp_lock);
729 page_pools.epp_total_pages = 0;
730 page_pools.epp_free_pages = 0;
732 page_pools.epp_st_max_pages = 0;
733 page_pools.epp_st_grows = 0;
734 page_pools.epp_st_grow_fails = 0;
735 page_pools.epp_st_shrinks = 0;
736 page_pools.epp_st_access = 0;
737 page_pools.epp_st_missings = 0;
738 page_pools.epp_st_lowfree = 0;
739 page_pools.epp_st_max_wqlen = 0;
740 page_pools.epp_st_max_wait = 0;
743 if (page_pools.epp_pools == NULL)
746 pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
747 if (pools_shrinker == NULL) {
755 void sptlrpc_enc_pool_fini(void)
757 unsigned long cleaned, npools;
759 LASSERT(pools_shrinker);
760 LASSERT(page_pools.epp_pools);
761 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
763 remove_shrinker(pools_shrinker);
765 npools = npages_to_npools(page_pools.epp_total_pages);
766 cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
767 LASSERT(cleaned == page_pools.epp_total_pages);
771 if (page_pools.epp_st_access > 0) {
772 CWARN("max pages %lu, grows %u, grow fails %u, shrinks %u, "
773 "access %lu, missing %lu, max qlen %u, max wait "
775 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
776 page_pools.epp_st_grow_fails,
777 page_pools.epp_st_shrinks, page_pools.epp_st_access,
778 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
779 page_pools.epp_st_max_wait, HZ);
783 #else /* !__KERNEL__ */
785 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
790 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
794 int sptlrpc_enc_pool_init(void)
799 void sptlrpc_enc_pool_fini(void)
804 /****************************************
805 * Helpers to assist policy modules to *
806 * implement checksum funcationality *
807 ****************************************/
809 static struct sptlrpc_hash_type hash_types[] = {
810 [BULK_HASH_ALG_NULL] = { "null", "null", 0 },
811 [BULK_HASH_ALG_ADLER32] = { "adler32", "adler32", 4 },
812 [BULK_HASH_ALG_CRC32] = { "crc32", "crc32", 4 },
813 [BULK_HASH_ALG_MD5] = { "md5", "md5", 16 },
814 [BULK_HASH_ALG_SHA1] = { "sha1", "sha1", 20 },
815 [BULK_HASH_ALG_SHA256] = { "sha256", "sha256", 32 },
816 [BULK_HASH_ALG_SHA384] = { "sha384", "sha384", 48 },
817 [BULK_HASH_ALG_SHA512] = { "sha512", "sha512", 64 },
818 [BULK_HASH_ALG_WP256] = { "wp256", "wp256", 32 },
819 [BULK_HASH_ALG_WP384] = { "wp384", "wp384", 48 },
820 [BULK_HASH_ALG_WP512] = { "wp512", "wp512", 64 },
823 const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
825 struct sptlrpc_hash_type *ht;
827 if (hash_alg < BULK_HASH_ALG_MAX) {
828 ht = &hash_types[hash_alg];
829 if (ht->sht_tfm_name)
834 EXPORT_SYMBOL(sptlrpc_get_hash_type);
836 const char * sptlrpc_get_hash_name(__u8 hash_alg)
838 const struct sptlrpc_hash_type *ht;
840 ht = sptlrpc_get_hash_type(hash_alg);
846 EXPORT_SYMBOL(sptlrpc_get_hash_name);
848 int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
850 int size = sizeof(struct ptlrpc_bulk_sec_desc);
852 LASSERT(hash_alg < BULK_HASH_ALG_MAX);
854 /* read request don't need extra data */
855 if (!(read && request))
856 size += hash_types[hash_alg].sht_size;
860 EXPORT_SYMBOL(bulk_sec_desc_size);
862 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
864 struct ptlrpc_bulk_sec_desc *bsd;
865 int size = msg->lm_buflens[offset];
867 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
869 CERROR("Invalid bulk sec desc: size %d\n", size);
873 /* nothing to swab */
875 if (unlikely(bsd->bsd_version != 0)) {
876 CERROR("Unexpected version %u\n", bsd->bsd_version);
880 if (unlikely(bsd->bsd_flags != 0)) {
881 CERROR("Unexpected flags %x\n", bsd->bsd_flags);
885 if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
886 CERROR("Unsupported checksum algorithm %u\n",
891 if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
892 CERROR("Unsupported cipher algorithm %u\n",
897 if (unlikely(size > sizeof(*bsd)) &&
898 size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
899 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
900 bsd->bsd_hash_alg, size);
906 EXPORT_SYMBOL(bulk_sec_desc_unpack);
911 static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
919 for (i = 0; i < desc->bd_iov_count; i++) {
920 page = desc->bd_iov[i].kiov_page;
921 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
922 ptr = cfs_kmap(page) + off;
923 len = desc->bd_iov[i].kiov_len;
925 adler32 = adler32(adler32, ptr, len);
930 adler32 = cpu_to_le32(adler32);
931 memcpy(buf, &adler32, sizeof(adler32));
936 static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
944 for (i = 0; i < desc->bd_iov_count; i++) {
945 page = desc->bd_iov[i].kiov_page;
946 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
947 ptr = cfs_kmap(page) + off;
948 len = desc->bd_iov[i].kiov_len;
950 crc32 = crc32_le(crc32, ptr, len);
955 crc32 = cpu_to_le32(crc32);
956 memcpy(buf, &crc32, sizeof(crc32));
960 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
962 struct hash_desc hdesc;
963 struct scatterlist *sl;
964 int i, rc = 0, bytes = 0;
966 LASSERT(alg > BULK_HASH_ALG_NULL &&
967 alg < BULK_HASH_ALG_MAX);
970 case BULK_HASH_ALG_ADLER32:
972 return do_bulk_checksum_adler32(desc, buf);
974 CERROR("Adler32 not supported\n");
977 case BULK_HASH_ALG_CRC32:
978 return do_bulk_checksum_crc32(desc, buf);
981 hdesc.tfm = ll_crypto_alloc_hash(hash_types[alg].sht_tfm_name, 0, 0);
982 if (hdesc.tfm == NULL) {
983 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
988 OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
994 for (i = 0; i < desc->bd_iov_count; i++) {
995 sl[i].page = desc->bd_iov[i].kiov_page;
996 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
997 sl[i].length = desc->bd_iov[i].kiov_len;
998 bytes += desc->bd_iov[i].kiov_len;
1001 ll_crypto_hash_init(&hdesc);
1002 ll_crypto_hash_update(&hdesc, sl, bytes);
1003 ll_crypto_hash_final(&hdesc, buf);
1005 OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
1008 ll_crypto_free_hash(hdesc.tfm);
1012 #else /* !__KERNEL__ */
1014 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
1019 LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
1021 if (alg == BULK_HASH_ALG_ADLER32)
1026 for (i = 0; i < desc->bd_iov_count; i++) {
1027 unsigned char *ptr = desc->bd_iov[i].iov_base;
1028 int len = desc->bd_iov[i].iov_len;
1031 case BULK_HASH_ALG_ADLER32:
1033 csum32 = adler32(csum32, ptr, len);
1035 CERROR("Adler32 not supported\n");
1039 case BULK_HASH_ALG_CRC32:
1040 csum32 = crc32_le(csum32, ptr, len);
1045 csum32 = cpu_to_le32(csum32);
1046 memcpy(buf, &csum32, sizeof(csum32));
1050 #endif /* __KERNEL__ */
1053 * perform algorithm @alg checksum on @desc, store result in @buf.
1054 * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
1057 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
1058 struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
1063 LASSERT(alg < BULK_HASH_ALG_MAX);
1065 bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
1067 if (alg == BULK_HASH_ALG_NULL)
1070 LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
1072 rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
1074 bsd->bsd_hash_alg = alg;
1080 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
1081 struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
1082 struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
1086 int csum_size, rc = 0;
1089 LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
1092 bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
1094 if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
1097 /* for all supported algorithms */
1098 csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
1100 if (bsdvsize < sizeof(*bsdv) + csum_size) {
1101 CERROR("verifier size %d too small, require %d\n",
1102 bsdvsize, (int) sizeof(*bsdv) + csum_size);
1107 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
1108 csum_p = (char *) bsdr->bsd_csum;
1110 OBD_ALLOC(buf, csum_size);
1116 rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
1118 if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
1119 CERROR("BAD %s CHECKSUM (%s), data mutated during "
1120 "transfer!\n", read ? "READ" : "WRITE",
1121 hash_types[bsdv->bsd_hash_alg].sht_name);
1124 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
1125 read ? "read" : "write",
1126 hash_types[bsdv->bsd_hash_alg].sht_name);
1130 bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
1131 memcpy(bsdr->bsd_csum, csum_p, csum_size);
1134 OBD_FREE(buf, csum_size);
1140 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
1141 __u32 alg, struct lustre_msg *rmsg, int roff)
1143 struct ptlrpc_bulk_sec_desc *bsdr;
1146 rsize = rmsg->lm_buflens[roff];
1147 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
1150 LASSERT(rsize >= sizeof(*bsdr));
1151 LASSERT(alg < BULK_HASH_ALG_MAX);
1154 bsdr->bsd_hash_alg = alg;
1156 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
1158 CERROR("bulk write: client failed to compute "
1159 "checksum: %d\n", rc);
1161 /* For sending we only compute the wrong checksum instead
1162 * of corrupting the data so it is still correct on a redo */
1163 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
1164 bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
1165 bsdr->bsd_csum[0] ^= 0x1;
1170 EXPORT_SYMBOL(bulk_csum_cli_request);
1172 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
1173 struct lustre_msg *rmsg, int roff,
1174 struct lustre_msg *vmsg, int voff)
1176 struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
1179 rsize = rmsg->lm_buflens[roff];
1180 vsize = vmsg->lm_buflens[voff];
1181 bsdr = lustre_msg_buf(rmsg, roff, 0);
1182 bsdv = lustre_msg_buf(vmsg, voff, 0);
1184 if (bsdv == NULL || vsize < sizeof(*bsdv)) {
1185 CERROR("Invalid checksum verifier from server: size %d\n",
1191 LASSERT(rsize >= sizeof(*bsdr));
1192 LASSERT(vsize >= sizeof(*bsdv));
1194 if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
1195 CERROR("bulk %s: checksum algorithm mismatch: client request "
1196 "%s but server reply with %s. try to use the new one "
1197 "for checksum verification\n",
1198 read ? "read" : "write",
1199 hash_types[bsdr->bsd_hash_alg].sht_name,
1200 hash_types[bsdv->bsd_hash_alg].sht_name);
1204 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
1206 char *cli, *srv, *new = NULL;
1207 int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
1209 LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
1210 if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
1213 if (vsize < sizeof(*bsdv) + csum_size) {
1214 CERROR("verifier size %d too small, require %d\n",
1215 vsize, (int) sizeof(*bsdv) + csum_size);
1219 cli = (char *) (bsdr + 1);
1220 srv = (char *) (bsdv + 1);
1222 if (!memcmp(cli, srv, csum_size)) {
1223 /* checksum confirmed */
1224 CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
1225 hash_types[bsdr->bsd_hash_alg].sht_name);
1229 /* checksum mismatch, re-compute a new one and compare with
1230 * others, give out proper warnings. */
1231 OBD_ALLOC(new, csum_size);
1235 do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
1237 if (!memcmp(new, srv, csum_size)) {
1238 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1239 "on the client after we checksummed them\n",
1240 hash_types[bsdr->bsd_hash_alg].sht_name);
1241 } else if (!memcmp(new, cli, csum_size)) {
1242 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1244 hash_types[bsdr->bsd_hash_alg].sht_name);
1246 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1247 "in transit, and the current page contents "
1248 "don't match the originals and what the server "
1250 hash_types[bsdr->bsd_hash_alg].sht_name);
1252 OBD_FREE(new, csum_size);
1257 EXPORT_SYMBOL(bulk_csum_cli_reply);
1260 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1263 unsigned int off, i;
1265 for (i = 0; i < desc->bd_iov_count; i++) {
1266 if (desc->bd_iov[i].kiov_len == 0)
1269 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
1270 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
1272 cfs_kunmap(desc->bd_iov[i].kiov_page);
1277 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1280 #endif /* __KERNEL__ */
1282 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
1283 struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
1284 struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
1288 LASSERT(vsize >= sizeof(*bsdv));
1289 LASSERT(rsize >= sizeof(*bsdr));
1290 LASSERT(bsdv && bsdr);
1293 rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
1295 CERROR("bulk read: server failed to generate %s "
1297 hash_types[bsdv->bsd_hash_alg].sht_name, rc);
1299 /* corrupt the data after we compute the checksum, to
1300 * simulate an OST->client data error */
1301 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1302 corrupt_bulk_data(desc);
1304 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
1309 EXPORT_SYMBOL(bulk_csum_svc);
1311 /****************************************
1312 * Helpers to assist policy modules to *
1313 * implement encryption funcationality *
1314 ****************************************/
1318 #define CRYPTO_TFM_MODE_ECB (0)
1319 #define CRYPTO_TFM_MODE_CBC (1)
1322 static struct sptlrpc_ciph_type cipher_types[] = {
1323 [BULK_CIPH_ALG_NULL] = {
1324 "null", "null", 0, 0, 0
1326 [BULK_CIPH_ALG_ARC4] = {
1327 "arc4", "ecb(arc4)", 0, 0, 16
1329 [BULK_CIPH_ALG_AES128] = {
1330 "aes128", "cbc(aes)", 0, 16, 16
1332 [BULK_CIPH_ALG_AES192] = {
1333 "aes192", "cbc(aes)", 0, 16, 24
1335 [BULK_CIPH_ALG_AES256] = {
1336 "aes256", "cbc(aes)", 0, 16, 32
1338 [BULK_CIPH_ALG_CAST128] = {
1339 "cast128", "cbc(cast5)", 0, 8, 16
1341 [BULK_CIPH_ALG_CAST256] = {
1342 "cast256", "cbc(cast6)", 0, 16, 32
1344 [BULK_CIPH_ALG_TWOFISH128] = {
1345 "twofish128", "cbc(twofish)", 0, 16, 16
1347 [BULK_CIPH_ALG_TWOFISH256] = {
1348 "twofish256", "cbc(twofish)", 0, 16, 32
1352 const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
1354 struct sptlrpc_ciph_type *ct;
1356 if (ciph_alg < BULK_CIPH_ALG_MAX) {
1357 ct = &cipher_types[ciph_alg];
1358 if (ct->sct_tfm_name)
1363 EXPORT_SYMBOL(sptlrpc_get_ciph_type);
1365 const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
1367 const struct sptlrpc_ciph_type *ct;
1369 ct = sptlrpc_get_ciph_type(ciph_alg);
1371 return ct->sct_name;
1375 EXPORT_SYMBOL(sptlrpc_get_ciph_name);