1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_SEC
28 #include <libcfs/libcfs.h>
30 #include <liblustre.h>
31 #include <libcfs/list.h>
33 #include <linux/crypto.h>
37 #include <obd_cksum.h>
38 #include <obd_class.h>
39 #include <obd_support.h>
40 #include <lustre_net.h>
41 #include <lustre_import.h>
42 #include <lustre_dlm.h>
43 #include <lustre_sec.h>
45 #include "ptlrpc_internal.h"
47 /****************************************
48 * bulk encryption page pools *
49 ****************************************/
53 #define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *))
54 #define PAGES_PER_POOL (PTRS_PER_PAGE)
56 #define IDLE_IDX_MAX (100)
57 #define IDLE_IDX_WEIGHT (3)
59 #define CACHE_QUIESCENT_PERIOD (20)
61 static struct ptlrpc_enc_page_pool {
65 unsigned long epp_max_pages; /* maximum pages can hold, const */
66 unsigned int epp_max_pools; /* number of pools, const */
69 * wait queue in case of not enough free pages.
71 cfs_waitq_t epp_waitq; /* waiting threads */
72 unsigned int epp_waitqlen; /* wait queue length */
73 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
74 unsigned int epp_growing:1; /* during adding pages */
77 * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
78 * this is counted based on each time when getting pages from
79 * the pools, not based on time. which means in case that system
80 * is idled for a while but the idle_idx might still be low if no
81 * activities happened in the pools.
83 unsigned long epp_idle_idx;
85 /* last shrink time due to mem tight */
90 * in-pool pages bookkeeping
92 spinlock_t epp_lock; /* protect following fields */
93 unsigned long epp_total_pages; /* total pages in pools */
94 unsigned long epp_free_pages; /* current pages available */
99 unsigned long epp_st_max_pages; /* # of pages ever reached */
100 unsigned int epp_st_grows; /* # of grows */
101 unsigned int epp_st_grow_fails; /* # of add pages failures */
102 unsigned int epp_st_shrinks; /* # of shrinks */
103 unsigned long epp_st_access; /* # of access */
104 unsigned long epp_st_missings; /* # of cache missing */
105 unsigned long epp_st_lowfree; /* lowest free pages reached */
106 unsigned int epp_st_max_wqlen; /* highest waitqueue length */
107 cfs_time_t epp_st_max_wait; /* in jeffies */
111 cfs_page_t ***epp_pools;
117 const int pools_shrinker_seeks = DEFAULT_SEEKS;
118 static struct shrinker *pools_shrinker = NULL;
122 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
124 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
125 int *eof, void *data)
129 spin_lock(&page_pools.epp_lock);
131 rc = snprintf(page, count,
132 "physical pages: %lu\n"
133 "pages per pool: %lu\n"
138 "idle index: %lu/100\n"
139 "last shrink: %lds\n"
140 "last access: %lds\n"
141 "max pages reached: %lu\n"
143 "grows failure: %u\n"
145 "cache access: %lu\n"
146 "cache missing: %lu\n"
147 "low free mark: %lu\n"
148 "max waitqueue depth: %u\n"
149 "max wait time: "CFS_TIME_T"/%u\n"
153 page_pools.epp_max_pages,
154 page_pools.epp_max_pools,
155 page_pools.epp_total_pages,
156 page_pools.epp_free_pages,
157 page_pools.epp_idle_idx,
158 cfs_time_current_sec() - page_pools.epp_last_shrink,
159 cfs_time_current_sec() - page_pools.epp_last_access,
160 page_pools.epp_st_max_pages,
161 page_pools.epp_st_grows,
162 page_pools.epp_st_grow_fails,
163 page_pools.epp_st_shrinks,
164 page_pools.epp_st_access,
165 page_pools.epp_st_missings,
166 page_pools.epp_st_lowfree,
167 page_pools.epp_st_max_wqlen,
168 page_pools.epp_st_max_wait, HZ
171 spin_unlock(&page_pools.epp_lock);
175 static void enc_pools_release_free_pages(long npages)
178 int p_idx_max1, p_idx_max2;
181 LASSERT(npages <= page_pools.epp_free_pages);
182 LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
184 /* max pool index before the release */
185 p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
187 page_pools.epp_free_pages -= npages;
188 page_pools.epp_total_pages -= npages;
190 /* max pool index after the release */
191 p_idx_max1 = page_pools.epp_total_pages == 0 ? 0 :
192 ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
194 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
195 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
196 LASSERT(page_pools.epp_pools[p_idx]);
199 LASSERT(page_pools.epp_pools[p_idx]);
200 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
202 cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
203 page_pools.epp_pools[p_idx][g_idx] = NULL;
205 if (++g_idx == PAGES_PER_POOL) {
211 /* free unused pools */
212 while (p_idx_max1 < p_idx_max2) {
213 LASSERT(page_pools.epp_pools[p_idx_max2]);
214 OBD_FREE(page_pools.epp_pools[p_idx_max2], CFS_PAGE_SIZE);
215 page_pools.epp_pools[p_idx_max2] = NULL;
221 * could be called frequently for query (@nr_to_scan == 0)
223 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
227 spin_lock(&page_pools.epp_lock);
229 if (nr_to_scan > page_pools.epp_free_pages)
230 nr_to_scan = page_pools.epp_free_pages;
232 if (nr_to_scan > 0) {
233 enc_pools_release_free_pages(nr_to_scan);
234 CDEBUG(D_SEC, "released %d pages, %ld left\n",
235 nr_to_scan, page_pools.epp_free_pages);
237 page_pools.epp_st_shrinks++;
238 page_pools.epp_last_shrink = cfs_time_current_sec();
242 * try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool
244 if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) {
250 * if no pool access for a long time, we consider it's fully idle
252 if (cfs_time_current_sec() - page_pools.epp_last_access >
253 CACHE_QUIESCENT_PERIOD)
254 page_pools.epp_idle_idx = IDLE_IDX_MAX;
256 LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
257 ret = (page_pools.epp_free_pages * page_pools.epp_idle_idx /
259 if (page_pools.epp_free_pages - ret < PTLRPC_MAX_BRW_PAGES)
260 ret = page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES;
263 spin_unlock(&page_pools.epp_lock);
268 int npages_to_npools(unsigned long npages)
270 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
274 * return how many pages cleaned up.
276 static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
278 unsigned long cleaned = 0;
281 for (i = 0; i < npools; i++) {
283 for (j = 0; j < PAGES_PER_POOL; j++) {
285 cfs_free_page(pools[i][j]);
289 OBD_FREE(pools[i], CFS_PAGE_SIZE);
298 * merge @npools pointed by @pools which contains @npages new pages
299 * into current pools.
301 * we have options to avoid most memory copy with some tricks. but we choose
302 * the simplest way to avoid complexity. It's not frequently called.
304 static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
307 int op_idx, np_idx, og_idx, ng_idx;
308 int cur_npools, end_npools;
311 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
312 LASSERT(npages_to_npools(npages) == npools);
314 spin_lock(&page_pools.epp_lock);
317 * (1) fill all the free slots of current pools.
319 /* free slots are those left by rent pages, and the extra ones with
320 * index >= total_pages, locate at the tail of last pool. */
321 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
323 freeslot = PAGES_PER_POOL - freeslot;
324 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
326 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
327 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
329 ng_idx = (npages - 1) % PAGES_PER_POOL;
332 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
333 LASSERT(pools[np_idx][ng_idx] != NULL);
335 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
336 pools[np_idx][ng_idx] = NULL;
340 if (++og_idx == PAGES_PER_POOL) {
348 ng_idx = PAGES_PER_POOL - 1;
353 * (2) add pools if needed.
355 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
357 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
359 LASSERT(end_npools <= page_pools.epp_max_pools);
362 while (cur_npools < end_npools) {
363 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
364 LASSERT(np_idx < npools);
365 LASSERT(pools[np_idx] != NULL);
367 page_pools.epp_pools[cur_npools++] = pools[np_idx];
368 pools[np_idx++] = NULL;
371 page_pools.epp_total_pages += npages;
372 page_pools.epp_free_pages += npages;
373 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
375 if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
376 page_pools.epp_st_max_pages = page_pools.epp_total_pages;
378 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
379 page_pools.epp_total_pages);
381 spin_unlock(&page_pools.epp_lock);
384 static int enc_pools_add_pages(int npages)
386 static DECLARE_MUTEX(sem_add_pages);
388 int npools, alloced = 0;
389 int i, j, rc = -ENOMEM;
391 if (npages < PTLRPC_MAX_BRW_PAGES)
392 npages = PTLRPC_MAX_BRW_PAGES;
394 down(&sem_add_pages);
396 if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
397 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
400 page_pools.epp_st_grows++;
402 npools = npages_to_npools(npages);
403 OBD_ALLOC(pools, npools * sizeof(*pools));
407 for (i = 0; i < npools; i++) {
408 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
409 if (pools[i] == NULL)
412 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
413 pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
415 if (pools[i][j] == NULL)
422 enc_pools_insert(pools, npools, npages);
423 CDEBUG(D_SEC, "added %d pages into pools\n", npages);
427 enc_pools_cleanup(pools, npools);
428 OBD_FREE(pools, npools * sizeof(*pools));
431 page_pools.epp_st_grow_fails++;
432 CERROR("Failed to allocate %d enc pages\n", npages);
439 static inline void enc_pools_wakeup(void)
441 if (unlikely(page_pools.epp_waitqlen)) {
442 LASSERT(page_pools.epp_waitqlen > 0);
443 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
444 cfs_waitq_broadcast(&page_pools.epp_waitq);
448 static int enc_pools_should_grow(int page_needed, long now)
450 /* don't grow if someone else is growing the pools right now,
451 * or the pools has reached its full capacity
453 if (page_pools.epp_growing ||
454 page_pools.epp_total_pages == page_pools.epp_max_pages)
457 /* if total pages is not enough, we need to grow */
458 if (page_pools.epp_total_pages < page_needed)
461 /* if we just did a shrink due to memory tight, we'd better
462 * wait a while to grow again.
464 if (now - page_pools.epp_last_shrink < 2)
468 * here we perhaps need consider other factors like wait queue
469 * length, idle index, etc. ?
472 /* grow the pools in any other cases */
477 * we allocate the requested pages atomically.
479 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
481 cfs_waitlink_t waitlink;
482 unsigned long this_idle = -1;
488 LASSERT(desc->bd_max_iov > 0);
489 LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
491 /* resent bulk, enc pages might have been allocated previously */
492 if (desc->bd_enc_pages != NULL)
495 OBD_ALLOC(desc->bd_enc_pages,
496 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
497 if (desc->bd_enc_pages == NULL)
500 spin_lock(&page_pools.epp_lock);
502 page_pools.epp_st_access++;
504 if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
506 tick = cfs_time_current();
508 now = cfs_time_current_sec();
510 page_pools.epp_st_missings++;
511 page_pools.epp_pages_short += desc->bd_max_iov;
513 if (enc_pools_should_grow(desc->bd_max_iov, now)) {
514 page_pools.epp_growing = 1;
516 spin_unlock(&page_pools.epp_lock);
517 enc_pools_add_pages(page_pools.epp_pages_short / 2);
518 spin_lock(&page_pools.epp_lock);
520 page_pools.epp_growing = 0;
522 if (++page_pools.epp_waitqlen >
523 page_pools.epp_st_max_wqlen)
524 page_pools.epp_st_max_wqlen =
525 page_pools.epp_waitqlen;
527 set_current_state(TASK_UNINTERRUPTIBLE);
528 cfs_waitlink_init(&waitlink);
529 cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
531 spin_unlock(&page_pools.epp_lock);
533 spin_lock(&page_pools.epp_lock);
535 LASSERT(page_pools.epp_waitqlen > 0);
536 page_pools.epp_waitqlen--;
539 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
540 page_pools.epp_pages_short -= desc->bd_max_iov;
546 /* record max wait time */
547 if (unlikely(tick != 0)) {
548 tick = cfs_time_current() - tick;
549 if (tick > page_pools.epp_st_max_wait)
550 page_pools.epp_st_max_wait = tick;
553 /* proceed with rest of allocation */
554 page_pools.epp_free_pages -= desc->bd_max_iov;
556 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
557 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
559 for (i = 0; i < desc->bd_max_iov; i++) {
560 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
561 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
562 page_pools.epp_pools[p_idx][g_idx] = NULL;
564 if (++g_idx == PAGES_PER_POOL) {
570 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
571 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
574 * new idle index = (old * weight + new) / (weight + 1)
576 if (this_idle == -1) {
577 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
578 page_pools.epp_total_pages;
580 page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
582 (IDLE_IDX_WEIGHT + 1);
584 page_pools.epp_last_access = cfs_time_current_sec();
586 spin_unlock(&page_pools.epp_lock);
589 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
591 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
596 if (desc->bd_enc_pages == NULL)
598 if (desc->bd_max_iov == 0)
601 spin_lock(&page_pools.epp_lock);
603 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
604 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
606 LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
607 page_pools.epp_total_pages);
608 LASSERT(page_pools.epp_pools[p_idx]);
610 for (i = 0; i < desc->bd_max_iov; i++) {
611 LASSERT(desc->bd_enc_pages[i] != NULL);
612 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
613 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
615 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
617 if (++g_idx == PAGES_PER_POOL) {
623 page_pools.epp_free_pages += desc->bd_max_iov;
627 spin_unlock(&page_pools.epp_lock);
629 OBD_FREE(desc->bd_enc_pages,
630 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
631 desc->bd_enc_pages = NULL;
633 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
636 * we don't do much stuff for add_user/del_user anymore, except adding some
637 * initial pages in add_user() if current pools are empty, rest would be
638 * handled by the pools's self-adaption.
640 int sptlrpc_enc_pool_add_user(void)
644 spin_lock(&page_pools.epp_lock);
645 if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
646 page_pools.epp_growing = 1;
649 spin_unlock(&page_pools.epp_lock);
652 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
654 spin_lock(&page_pools.epp_lock);
655 page_pools.epp_growing = 0;
657 spin_unlock(&page_pools.epp_lock);
661 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
663 int sptlrpc_enc_pool_del_user(void)
667 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
669 static inline void enc_pools_alloc(void)
671 LASSERT(page_pools.epp_max_pools);
673 * on system with huge memory but small page size, this might lead to
674 * high-order allocation. but it's not common, and we suppose memory
675 * be not too much fragmented at module loading time.
677 OBD_ALLOC(page_pools.epp_pools,
678 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
681 static inline void enc_pools_free(void)
683 LASSERT(page_pools.epp_max_pools);
684 LASSERT(page_pools.epp_pools);
686 OBD_FREE(page_pools.epp_pools,
687 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
690 int sptlrpc_enc_pool_init(void)
693 * maximum capacity is 1/8 of total physical memory.
694 * is the 1/8 a good number?
696 page_pools.epp_max_pages = num_physpages / 8;
697 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
699 cfs_waitq_init(&page_pools.epp_waitq);
700 page_pools.epp_waitqlen = 0;
701 page_pools.epp_pages_short = 0;
703 page_pools.epp_growing = 0;
705 page_pools.epp_idle_idx = 0;
706 page_pools.epp_last_shrink = cfs_time_current_sec();
707 page_pools.epp_last_access = cfs_time_current_sec();
709 spin_lock_init(&page_pools.epp_lock);
710 page_pools.epp_total_pages = 0;
711 page_pools.epp_free_pages = 0;
713 page_pools.epp_st_max_pages = 0;
714 page_pools.epp_st_grows = 0;
715 page_pools.epp_st_grow_fails = 0;
716 page_pools.epp_st_shrinks = 0;
717 page_pools.epp_st_access = 0;
718 page_pools.epp_st_missings = 0;
719 page_pools.epp_st_lowfree = 0;
720 page_pools.epp_st_max_wqlen = 0;
721 page_pools.epp_st_max_wait = 0;
724 if (page_pools.epp_pools == NULL)
727 pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
728 if (pools_shrinker == NULL) {
736 void sptlrpc_enc_pool_fini(void)
738 unsigned long cleaned, npools;
740 LASSERT(pools_shrinker);
741 LASSERT(page_pools.epp_pools);
742 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
744 remove_shrinker(pools_shrinker);
746 npools = npages_to_npools(page_pools.epp_total_pages);
747 cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
748 LASSERT(cleaned == page_pools.epp_total_pages);
752 if (page_pools.epp_st_access > 0) {
753 CWARN("max pages %lu, grows %u, grow fails %u, shrinks %u, "
754 "access %lu, missing %lu, max qlen %u, max wait "
756 page_pools.epp_st_max_pages, page_pools.epp_st_grows,
757 page_pools.epp_st_grow_fails,
758 page_pools.epp_st_shrinks, page_pools.epp_st_access,
759 page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
760 page_pools.epp_st_max_wait, HZ);
764 #else /* !__KERNEL__ */
766 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
771 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
775 int sptlrpc_enc_pool_init(void)
780 void sptlrpc_enc_pool_fini(void)
785 /****************************************
786 * Helpers to assist policy modules to *
787 * implement checksum funcationality *
788 ****************************************/
790 static struct sptlrpc_hash_type hash_types[] = {
791 [BULK_HASH_ALG_NULL] = { "null", "null", 0 },
792 [BULK_HASH_ALG_ADLER32] = { "adler32", "adler32", 4 },
793 [BULK_HASH_ALG_CRC32] = { "crc32", "crc32", 4 },
794 [BULK_HASH_ALG_MD5] = { "md5", "md5", 16 },
795 [BULK_HASH_ALG_SHA1] = { "sha1", "sha1", 20 },
796 [BULK_HASH_ALG_SHA256] = { "sha256", "sha256", 32 },
797 [BULK_HASH_ALG_SHA384] = { "sha384", "sha384", 48 },
798 [BULK_HASH_ALG_SHA512] = { "sha512", "sha512", 64 },
799 [BULK_HASH_ALG_WP256] = { "wp256", "wp256", 32 },
800 [BULK_HASH_ALG_WP384] = { "wp384", "wp384", 48 },
801 [BULK_HASH_ALG_WP512] = { "wp512", "wp512", 64 },
804 const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
806 struct sptlrpc_hash_type *ht;
808 if (hash_alg < BULK_HASH_ALG_MAX) {
809 ht = &hash_types[hash_alg];
810 if (ht->sht_tfm_name)
815 EXPORT_SYMBOL(sptlrpc_get_hash_type);
817 const char * sptlrpc_get_hash_name(__u8 hash_alg)
819 const struct sptlrpc_hash_type *ht;
821 ht = sptlrpc_get_hash_type(hash_alg);
827 EXPORT_SYMBOL(sptlrpc_get_hash_name);
829 int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
831 int size = sizeof(struct ptlrpc_bulk_sec_desc);
833 LASSERT(hash_alg < BULK_HASH_ALG_MAX);
835 /* read request don't need extra data */
836 if (!(read && request))
837 size += hash_types[hash_alg].sht_size;
841 EXPORT_SYMBOL(bulk_sec_desc_size);
843 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
845 struct ptlrpc_bulk_sec_desc *bsd;
846 int size = msg->lm_buflens[offset];
848 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
850 CERROR("Invalid bulk sec desc: size %d\n", size);
854 /* nothing to swab */
856 if (unlikely(bsd->bsd_version != 0)) {
857 CERROR("Unexpected version %u\n", bsd->bsd_version);
861 if (unlikely(bsd->bsd_flags != 0)) {
862 CERROR("Unexpected flags %x\n", bsd->bsd_flags);
866 if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
867 CERROR("Unsupported checksum algorithm %u\n",
872 if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
873 CERROR("Unsupported cipher algorithm %u\n",
878 if (unlikely(size > sizeof(*bsd)) &&
879 size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
880 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
881 bsd->bsd_hash_alg, size);
887 EXPORT_SYMBOL(bulk_sec_desc_unpack);
892 static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
900 for (i = 0; i < desc->bd_iov_count; i++) {
901 page = desc->bd_iov[i].kiov_page;
902 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
903 ptr = cfs_kmap(page) + off;
904 len = desc->bd_iov[i].kiov_len;
906 adler32 = adler32(adler32, ptr, len);
911 adler32 = cpu_to_le32(adler32);
912 memcpy(buf, &adler32, sizeof(adler32));
917 static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
925 for (i = 0; i < desc->bd_iov_count; i++) {
926 page = desc->bd_iov[i].kiov_page;
927 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
928 ptr = cfs_kmap(page) + off;
929 len = desc->bd_iov[i].kiov_len;
931 crc32 = crc32_le(crc32, ptr, len);
936 crc32 = cpu_to_le32(crc32);
937 memcpy(buf, &crc32, sizeof(crc32));
941 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
943 struct hash_desc hdesc;
944 struct scatterlist *sl;
945 int i, rc = 0, bytes = 0;
947 LASSERT(alg > BULK_HASH_ALG_NULL &&
948 alg < BULK_HASH_ALG_MAX);
951 case BULK_HASH_ALG_ADLER32:
953 return do_bulk_checksum_adler32(desc, buf);
955 CERROR("Adler32 not supported\n");
958 case BULK_HASH_ALG_CRC32:
959 return do_bulk_checksum_crc32(desc, buf);
962 hdesc.tfm = ll_crypto_alloc_hash(hash_types[alg].sht_tfm_name, 0, 0);
963 if (hdesc.tfm == NULL) {
964 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
969 OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
975 for (i = 0; i < desc->bd_iov_count; i++) {
976 sl[i].page = desc->bd_iov[i].kiov_page;
977 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
978 sl[i].length = desc->bd_iov[i].kiov_len;
979 bytes += desc->bd_iov[i].kiov_len;
982 ll_crypto_hash_init(&hdesc);
983 ll_crypto_hash_update(&hdesc, sl, bytes);
984 ll_crypto_hash_final(&hdesc, buf);
986 OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
989 ll_crypto_free_hash(hdesc.tfm);
993 #else /* !__KERNEL__ */
995 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
1000 LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
1002 if (alg == BULK_HASH_ALG_ADLER32)
1007 for (i = 0; i < desc->bd_iov_count; i++) {
1008 char *ptr = desc->bd_iov[i].iov_base;
1009 int len = desc->bd_iov[i].iov_len;
1012 case BULK_HASH_ALG_ADLER32:
1014 csum32 = adler32(csum32, ptr, len);
1016 CERROR("Adler32 not supported\n");
1020 case BULK_HASH_ALG_CRC32:
1021 csum32 = crc32_le(csum32, ptr, len);
1026 csum32 = cpu_to_le32(csum32);
1027 memcpy(buf, &csum32, sizeof(csum32));
1031 #endif /* __KERNEL__ */
1034 * perform algorithm @alg checksum on @desc, store result in @buf.
1035 * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
1038 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
1039 struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
1044 LASSERT(alg < BULK_HASH_ALG_MAX);
1046 bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
1048 if (alg == BULK_HASH_ALG_NULL)
1051 LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
1053 rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
1055 bsd->bsd_hash_alg = alg;
1061 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
1062 struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
1063 struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
1067 int csum_size, rc = 0;
1070 LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
1073 bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
1075 if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
1078 /* for all supported algorithms */
1079 csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
1081 if (bsdvsize < sizeof(*bsdv) + csum_size) {
1082 CERROR("verifier size %d too small, require %d\n",
1083 bsdvsize, (int) sizeof(*bsdv) + csum_size);
1088 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
1089 csum_p = (char *) bsdr->bsd_csum;
1091 OBD_ALLOC(buf, csum_size);
1097 rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
1099 if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
1100 CERROR("BAD %s CHECKSUM (%s), data mutated during "
1101 "transfer!\n", read ? "READ" : "WRITE",
1102 hash_types[bsdv->bsd_hash_alg].sht_name);
1105 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
1106 read ? "read" : "write",
1107 hash_types[bsdv->bsd_hash_alg].sht_name);
1111 bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
1112 memcpy(bsdr->bsd_csum, csum_p, csum_size);
1115 OBD_FREE(buf, csum_size);
1121 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
1122 __u32 alg, struct lustre_msg *rmsg, int roff)
1124 struct ptlrpc_bulk_sec_desc *bsdr;
1127 rsize = rmsg->lm_buflens[roff];
1128 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
1131 LASSERT(rsize >= sizeof(*bsdr));
1132 LASSERT(alg < BULK_HASH_ALG_MAX);
1135 bsdr->bsd_hash_alg = alg;
1137 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
1139 CERROR("bulk write: client failed to compute "
1140 "checksum: %d\n", rc);
1142 /* For sending we only compute the wrong checksum instead
1143 * of corrupting the data so it is still correct on a redo */
1144 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
1145 bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
1146 bsdr->bsd_csum[0] ^= 0x1;
1151 EXPORT_SYMBOL(bulk_csum_cli_request);
1153 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
1154 struct lustre_msg *rmsg, int roff,
1155 struct lustre_msg *vmsg, int voff)
1157 struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
1160 rsize = rmsg->lm_buflens[roff];
1161 vsize = vmsg->lm_buflens[voff];
1162 bsdr = lustre_msg_buf(rmsg, roff, 0);
1163 bsdv = lustre_msg_buf(vmsg, voff, 0);
1165 if (bsdv == NULL || vsize < sizeof(*bsdv)) {
1166 CERROR("Invalid checksum verifier from server: size %d\n",
1172 LASSERT(rsize >= sizeof(*bsdr));
1173 LASSERT(vsize >= sizeof(*bsdv));
1175 if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
1176 CERROR("bulk %s: checksum algorithm mismatch: client request "
1177 "%s but server reply with %s. try to use the new one "
1178 "for checksum verification\n",
1179 read ? "read" : "write",
1180 hash_types[bsdr->bsd_hash_alg].sht_name,
1181 hash_types[bsdv->bsd_hash_alg].sht_name);
1185 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
1187 char *cli, *srv, *new = NULL;
1188 int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
1190 LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
1191 if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
1194 if (vsize < sizeof(*bsdv) + csum_size) {
1195 CERROR("verifier size %d too small, require %d\n",
1196 vsize, (int) sizeof(*bsdv) + csum_size);
1200 cli = (char *) (bsdr + 1);
1201 srv = (char *) (bsdv + 1);
1203 if (!memcmp(cli, srv, csum_size)) {
1204 /* checksum confirmed */
1205 CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
1206 hash_types[bsdr->bsd_hash_alg].sht_name);
1210 /* checksum mismatch, re-compute a new one and compare with
1211 * others, give out proper warnings. */
1212 OBD_ALLOC(new, csum_size);
1216 do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
1218 if (!memcmp(new, srv, csum_size)) {
1219 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1220 "on the client after we checksummed them\n",
1221 hash_types[bsdr->bsd_hash_alg].sht_name);
1222 } else if (!memcmp(new, cli, csum_size)) {
1223 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1225 hash_types[bsdr->bsd_hash_alg].sht_name);
1227 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1228 "in transit, and the current page contents "
1229 "don't match the originals and what the server "
1231 hash_types[bsdr->bsd_hash_alg].sht_name);
1233 OBD_FREE(new, csum_size);
1238 EXPORT_SYMBOL(bulk_csum_cli_reply);
1241 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1244 unsigned int off, i;
1246 for (i = 0; i < desc->bd_iov_count; i++) {
1247 if (desc->bd_iov[i].kiov_len == 0)
1250 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
1251 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
1253 cfs_kunmap(desc->bd_iov[i].kiov_page);
1258 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1261 #endif /* __KERNEL__ */
1263 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
1264 struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
1265 struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
1269 LASSERT(vsize >= sizeof(*bsdv));
1270 LASSERT(rsize >= sizeof(*bsdr));
1271 LASSERT(bsdv && bsdr);
1274 rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
1276 CERROR("bulk read: server failed to generate %s "
1278 hash_types[bsdv->bsd_hash_alg].sht_name, rc);
1280 /* corrupt the data after we compute the checksum, to
1281 * simulate an OST->client data error */
1282 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1283 corrupt_bulk_data(desc);
1285 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
1290 EXPORT_SYMBOL(bulk_csum_svc);
1292 /****************************************
1293 * Helpers to assist policy modules to *
1294 * implement encryption funcationality *
1295 ****************************************/
1299 #define CRYPTO_TFM_MODE_ECB (0)
1300 #define CRYPTO_TFM_MODE_CBC (1)
1303 static struct sptlrpc_ciph_type cipher_types[] = {
1304 [BULK_CIPH_ALG_NULL] = {
1305 "null", "null", 0, 0, 0
1307 [BULK_CIPH_ALG_ARC4] = {
1308 "arc4", "ecb(arc4)", 0, 0, 16
1310 [BULK_CIPH_ALG_AES128] = {
1311 "aes128", "cbc(aes)", 0, 16, 16
1313 [BULK_CIPH_ALG_AES192] = {
1314 "aes192", "cbc(aes)", 0, 16, 24
1316 [BULK_CIPH_ALG_AES256] = {
1317 "aes256", "cbc(aes)", 0, 16, 32
1319 [BULK_CIPH_ALG_CAST128] = {
1320 "cast128", "cbc(cast5)", 0, 8, 16
1322 [BULK_CIPH_ALG_CAST256] = {
1323 "cast256", "cbc(cast6)", 0, 16, 32
1325 [BULK_CIPH_ALG_TWOFISH128] = {
1326 "twofish128", "cbc(twofish)", 0, 16, 16
1328 [BULK_CIPH_ALG_TWOFISH256] = {
1329 "twofish256", "cbc(twofish)", 0, 16, 32
1333 const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
1335 struct sptlrpc_ciph_type *ct;
1337 if (ciph_alg < BULK_CIPH_ALG_MAX) {
1338 ct = &cipher_types[ciph_alg];
1339 if (ct->sct_tfm_name)
1344 EXPORT_SYMBOL(sptlrpc_get_ciph_type);
1346 const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
1348 const struct sptlrpc_ciph_type *ct;
1350 ct = sptlrpc_get_ciph_type(ciph_alg);
1352 return ct->sct_name;
1356 EXPORT_SYMBOL(sptlrpc_get_ciph_name);