1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2006 Cluster File Systems, Inc.
5 * Author: Eric Mei <ericm@clusterfs.com>
7 * This file is part of Lustre, http://www.lustre.org.
9 * Lustre is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * Lustre is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with Lustre; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define DEBUG_SUBSYSTEM S_SEC
28 #include <libcfs/libcfs.h>
30 #include <liblustre.h>
31 #include <libcfs/list.h>
33 #include <linux/crypto.h>
37 #include <obd_class.h>
38 #include <obd_support.h>
39 #include <lustre_net.h>
40 #include <lustre_import.h>
41 #include <lustre_dlm.h>
42 #include <lustre_sec.h>
44 #include "ptlrpc_internal.h"
46 /****************************************
47 * bulk encryption page pools *
48 ****************************************/
52 #define PTRS_PER_PAGE (CFS_PAGE_SIZE / sizeof(void *))
53 #define PAGES_PER_POOL (PTRS_PER_PAGE)
55 static struct ptlrpc_enc_page_pool {
59 unsigned long epp_max_pages; /* maximum pages can hold, const */
60 unsigned int epp_max_pools; /* number of pools, const */
62 * users of the pools. the capacity grow as more user added,
63 * but doesn't shrink when users gone -- just current policy.
64 * during failover there might be user add/remove activities.
66 atomic_t epp_users; /* shared by how many users (osc) */
67 atomic_t epp_users_gone; /* users removed */
69 * wait queue in case of not enough free pages.
71 cfs_waitq_t epp_waitq; /* waiting threads */
72 unsigned int epp_waitqlen; /* wait queue length */
73 unsigned long epp_pages_short; /* # of pages wanted of in-q users */
74 unsigned long epp_adding:1, /* during adding pages */
75 epp_full:1; /* pools are all full */
77 * in-pool pages bookkeeping
79 spinlock_t epp_lock; /* protect following fields */
80 unsigned long epp_total_pages; /* total pages in pools */
81 unsigned long epp_free_pages; /* current pages available */
85 unsigned int epp_st_adds;
86 unsigned int epp_st_failadds; /* # of add pages failures */
87 unsigned long epp_st_reqs; /* # of get_pages requests */
88 unsigned long epp_st_missings; /* # of cache missing */
89 unsigned long epp_st_lowfree; /* lowest free pages ever reached */
90 unsigned long epp_st_max_wqlen;/* highest waitqueue length ever */
91 cfs_time_t epp_st_max_wait; /* in jeffies */
95 cfs_page_t ***epp_pools;
98 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
103 spin_lock(&page_pools.epp_lock);
105 rc = snprintf(page, count,
106 "physical pages: %lu\n"
107 "pages per pool: %lu\n"
111 "current waitqueue len: %u\n"
112 "current pages in short: %lu\n"
115 "add page times: %u\n"
116 "add page failed times: %u\n"
117 "total requests: %lu\n"
118 "cache missing: %lu\n"
119 "lowest free pages: %lu\n"
120 "max waitqueue depth: %lu\n"
121 "max wait time: "CFS_TIME_T"\n"
125 page_pools.epp_max_pages,
126 page_pools.epp_max_pools,
127 atomic_read(&page_pools.epp_users),
128 atomic_read(&page_pools.epp_users_gone),
129 page_pools.epp_waitqlen,
130 page_pools.epp_pages_short,
131 page_pools.epp_total_pages,
132 page_pools.epp_free_pages,
133 page_pools.epp_st_adds,
134 page_pools.epp_st_failadds,
135 page_pools.epp_st_reqs,
136 page_pools.epp_st_missings,
137 page_pools.epp_st_lowfree,
138 page_pools.epp_st_max_wqlen,
139 page_pools.epp_st_max_wait
142 spin_unlock(&page_pools.epp_lock);
147 int npages_to_npools(unsigned long npages)
149 return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
153 * return how many pages cleaned up.
155 static unsigned long enc_cleanup_pools(cfs_page_t ***pools, int npools)
157 unsigned long cleaned = 0;
160 for (i = 0; i < npools; i++) {
162 for (j = 0; j < PAGES_PER_POOL; j++) {
164 cfs_free_page(pools[i][j]);
168 OBD_FREE(pools[i], CFS_PAGE_SIZE);
177 * merge @npools pointed by @pools which contains @npages new pages
178 * into current pools.
180 * we have options to avoid most memory copy with some tricks. but we choose
181 * the simplest way to avoid complexity. It's not frequently called.
183 static void enc_insert_pool(cfs_page_t ***pools, int npools, int npages)
186 int op_idx, np_idx, og_idx, ng_idx;
187 int cur_npools, end_npools;
190 LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
191 LASSERT(npages_to_npools(npages) == npools);
193 spin_lock(&page_pools.epp_lock);
196 * (1) fill all the free slots of current pools.
198 /* free slots are those left by rent pages, and the extra ones with
199 * index >= eep_total_pages, locate at the tail of last pool. */
200 freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
202 freeslot = PAGES_PER_POOL - freeslot;
203 freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
205 op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
206 og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
208 ng_idx = (npages - 1) % PAGES_PER_POOL;
211 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
212 LASSERT(pools[np_idx][ng_idx] != NULL);
214 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
215 pools[np_idx][ng_idx] = NULL;
219 if (++og_idx == PAGES_PER_POOL) {
227 ng_idx = PAGES_PER_POOL - 1;
232 * (2) add pools if needed.
234 cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
236 end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
238 LASSERT(end_npools <= page_pools.epp_max_pools);
241 while (cur_npools < end_npools) {
242 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
243 LASSERT(np_idx < npools);
244 LASSERT(pools[np_idx] != NULL);
246 page_pools.epp_pools[cur_npools++] = pools[np_idx];
247 pools[np_idx++] = NULL;
250 page_pools.epp_total_pages += npages;
251 page_pools.epp_free_pages += npages;
252 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
254 if (page_pools.epp_total_pages == page_pools.epp_max_pages)
255 page_pools.epp_full = 1;
257 CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
258 page_pools.epp_total_pages);
260 spin_unlock(&page_pools.epp_lock);
263 static int enc_pools_add_pages(int npages)
265 static DECLARE_MUTEX(sem_add_pages);
267 int npools, alloced = 0;
268 int i, j, rc = -ENOMEM;
270 down(&sem_add_pages);
272 if (npages > page_pools.epp_max_pages - page_pools.epp_total_pages)
273 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
279 page_pools.epp_st_adds++;
281 npools = npages_to_npools(npages);
282 OBD_ALLOC(pools, npools * sizeof(*pools));
286 for (i = 0; i < npools; i++) {
287 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
288 if (pools[i] == NULL)
291 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
292 pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
294 if (pools[i][j] == NULL)
301 enc_insert_pool(pools, npools, npages);
302 CDEBUG(D_SEC, "add %d pages into enc page pools\n", npages);
306 enc_cleanup_pools(pools, npools);
307 OBD_FREE(pools, npools * sizeof(*pools));
310 page_pools.epp_st_failadds++;
311 CERROR("Failed to pre-allocate %d enc pages\n", npages);
319 * both "max bulk rpcs inflight" and "lnet MTU" are tunable, we use the
320 * default fixed value initially.
322 int sptlrpc_enc_pool_add_user(void)
324 int page_plus = PTLRPC_MAX_BRW_PAGES * OSC_MAX_RIF_DEFAULT;
325 int users, users_gone, shift, rc;
327 LASSERT(!in_interrupt());
328 LASSERT(atomic_read(&page_pools.epp_users) >= 0);
330 users_gone = atomic_dec_return(&page_pools.epp_users_gone);
331 if (users_gone >= 0) {
332 CWARN("%d users gone, skip\n", users_gone + 1);
335 atomic_inc(&page_pools.epp_users_gone);
338 * prepare full pages for first 2 users; 1/2 for next 2 users;
339 * 1/4 for next 4 users; 1/8 for next 8 users; 1/16 for next 16 users;
342 users = atomic_add_return(1, &page_pools.epp_users);
343 shift = fls(users - 1);
344 shift = shift > 1 ? shift - 1 : 0;
345 page_plus = page_plus >> shift;
346 page_plus = page_plus > 2 ? page_plus : 2;
348 rc = enc_pools_add_pages(page_plus);
351 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
353 int sptlrpc_enc_pool_del_user(void)
355 atomic_inc(&page_pools.epp_users_gone);
358 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
361 * we allocate the requested pages atomically.
363 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
365 cfs_waitlink_t waitlink;
366 cfs_time_t tick1 = 0, tick2;
370 LASSERT(desc->bd_max_iov > 0);
371 LASSERT(desc->bd_max_iov <= page_pools.epp_total_pages);
373 /* resent bulk, enc pages might have been allocated previously */
374 if (desc->bd_enc_pages != NULL)
377 OBD_ALLOC(desc->bd_enc_pages,
378 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
379 if (desc->bd_enc_pages == NULL)
382 spin_lock(&page_pools.epp_lock);
384 page_pools.epp_st_reqs++;
386 if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
388 tick1 = cfs_time_current();
390 page_pools.epp_st_missings++;
391 page_pools.epp_pages_short += desc->bd_max_iov;
393 if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen)
394 page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen;
396 /* we just wait if someone else is adding more pages, or
397 * wait queue length is not deep enough. otherwise try to
398 * add more pages in the pools.
400 * FIXME the policy of detecting resource tight & growing pool
401 * need to be reconsidered. */
402 if (page_pools.epp_adding || page_pools.epp_waitqlen < 2 ||
403 page_pools.epp_full) {
404 set_current_state(TASK_UNINTERRUPTIBLE);
405 cfs_waitlink_init(&waitlink);
406 cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
408 spin_unlock(&page_pools.epp_lock);
410 spin_lock(&page_pools.epp_lock);
412 page_pools.epp_adding = 1;
414 spin_unlock(&page_pools.epp_lock);
415 enc_pools_add_pages(page_pools.epp_pages_short / 2);
416 spin_lock(&page_pools.epp_lock);
418 page_pools.epp_adding = 0;
421 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
422 LASSERT(page_pools.epp_waitqlen > 0);
423 page_pools.epp_pages_short -= desc->bd_max_iov;
424 page_pools.epp_waitqlen--;
429 /* record max wait time */
430 if (unlikely(tick1 != 0)) {
431 tick2 = cfs_time_current();
432 if (tick2 - tick1 > page_pools.epp_st_max_wait)
433 page_pools.epp_st_max_wait = tick2 - tick1;
436 /* proceed with rest of allocation */
437 page_pools.epp_free_pages -= desc->bd_max_iov;
439 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
440 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
442 for (i = 0; i < desc->bd_max_iov; i++) {
443 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
444 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
445 page_pools.epp_pools[p_idx][g_idx] = NULL;
447 if (++g_idx == PAGES_PER_POOL) {
453 if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
454 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
456 spin_unlock(&page_pools.epp_lock);
459 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
461 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
466 if (desc->bd_enc_pages == NULL)
468 if (desc->bd_max_iov == 0)
471 spin_lock(&page_pools.epp_lock);
473 p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
474 g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
476 LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
477 page_pools.epp_total_pages);
478 LASSERT(page_pools.epp_pools[p_idx]);
480 for (i = 0; i < desc->bd_max_iov; i++) {
481 LASSERT(desc->bd_enc_pages[i] != NULL);
482 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
483 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
485 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
487 if (++g_idx == PAGES_PER_POOL) {
493 page_pools.epp_free_pages += desc->bd_max_iov;
495 if (unlikely(page_pools.epp_waitqlen)) {
496 LASSERT(page_pools.epp_waitqlen > 0);
497 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
498 cfs_waitq_broadcast(&page_pools.epp_waitq);
501 spin_unlock(&page_pools.epp_lock);
503 OBD_FREE(desc->bd_enc_pages,
504 desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
505 desc->bd_enc_pages = NULL;
507 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
509 int sptlrpc_enc_pool_init(void)
512 page_pools.epp_max_pages = num_physpages / 4;
513 page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
515 atomic_set(&page_pools.epp_users, 0);
516 atomic_set(&page_pools.epp_users_gone, 0);
518 cfs_waitq_init(&page_pools.epp_waitq);
519 page_pools.epp_waitqlen = 0;
520 page_pools.epp_pages_short = 0;
522 page_pools.epp_adding = 0;
523 page_pools.epp_full = 0;
525 spin_lock_init(&page_pools.epp_lock);
526 page_pools.epp_total_pages = 0;
527 page_pools.epp_free_pages = 0;
529 page_pools.epp_st_adds = 0;
530 page_pools.epp_st_failadds = 0;
531 page_pools.epp_st_reqs = 0;
532 page_pools.epp_st_missings = 0;
533 page_pools.epp_st_lowfree = 0;
534 page_pools.epp_st_max_wqlen = 0;
535 page_pools.epp_st_max_wait = 0;
537 OBD_ALLOC(page_pools.epp_pools,
538 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
539 if (page_pools.epp_pools == NULL)
545 void sptlrpc_enc_pool_fini(void)
547 unsigned long cleaned, npools;
549 LASSERT(page_pools.epp_pools);
550 LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
552 npools = npages_to_npools(page_pools.epp_total_pages);
553 cleaned = enc_cleanup_pools(page_pools.epp_pools, npools);
554 LASSERT(cleaned == page_pools.epp_total_pages);
556 OBD_FREE(page_pools.epp_pools,
557 page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
560 #else /* !__KERNEL__ */
562 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
567 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
571 int sptlrpc_enc_pool_init(void)
576 void sptlrpc_enc_pool_fini(void)
581 /****************************************
582 * Helpers to assist policy modules to *
583 * implement checksum funcationality *
584 ****************************************/
590 [BULK_CSUM_ALG_NULL] = { "null", 0 },
591 [BULK_CSUM_ALG_CRC32] = { "crc32", 4 },
592 [BULK_CSUM_ALG_MD5] = { "md5", 16 },
593 [BULK_CSUM_ALG_SHA1] = { "sha1", 20 },
594 [BULK_CSUM_ALG_SHA256] = { "sha256", 32 },
595 [BULK_CSUM_ALG_SHA384] = { "sha384", 48 },
596 [BULK_CSUM_ALG_SHA512] = { "sha512", 64 },
599 const char * sptlrpc_bulk_csum_alg2name(__u32 csum_alg)
601 if (csum_alg < BULK_CSUM_ALG_MAX)
602 return csum_types[csum_alg].name;
603 return "unknown_cksum";
605 EXPORT_SYMBOL(sptlrpc_bulk_csum_alg2name);
607 int bulk_sec_desc_size(__u32 csum_alg, int request, int read)
609 int size = sizeof(struct ptlrpc_bulk_sec_desc);
611 LASSERT(csum_alg < BULK_CSUM_ALG_MAX);
613 /* read request don't need extra data */
614 if (!(read && request))
615 size += csum_types[csum_alg].size;
619 EXPORT_SYMBOL(bulk_sec_desc_size);
621 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
623 struct ptlrpc_bulk_sec_desc *bsd;
624 int size = msg->lm_buflens[offset];
626 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
628 CERROR("Invalid bulk sec desc: size %d\n", size);
632 if (lustre_msg_swabbed(msg)) {
633 __swab32s(&bsd->bsd_version);
634 __swab32s(&bsd->bsd_pad);
635 __swab32s(&bsd->bsd_csum_alg);
636 __swab32s(&bsd->bsd_priv_alg);
639 if (bsd->bsd_version != 0) {
640 CERROR("Unexpected version %u\n", bsd->bsd_version);
644 if (bsd->bsd_csum_alg >= BULK_CSUM_ALG_MAX) {
645 CERROR("Unsupported checksum algorithm %u\n",
649 if (bsd->bsd_priv_alg >= BULK_PRIV_ALG_MAX) {
650 CERROR("Unsupported cipher algorithm %u\n",
655 if (size > sizeof(*bsd) &&
656 size < sizeof(*bsd) + csum_types[bsd->bsd_csum_alg].size) {
657 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
658 bsd->bsd_csum_alg, size);
664 EXPORT_SYMBOL(bulk_sec_desc_unpack);
668 int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
676 for (i = 0; i < desc->bd_iov_count; i++) {
677 page = desc->bd_iov[i].kiov_page;
678 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
679 ptr = cfs_kmap(page) + off;
680 len = desc->bd_iov[i].kiov_len;
682 crc32 = crc32_le(crc32, ptr, len);
687 *((__u32 *) buf) = crc32;
692 int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
694 struct crypto_tfm *tfm;
695 struct scatterlist *sl;
698 LASSERT(alg > BULK_CSUM_ALG_NULL &&
699 alg < BULK_CSUM_ALG_MAX);
701 if (alg == BULK_CSUM_ALG_CRC32)
702 return do_bulk_checksum_crc32(desc, buf);
704 tfm = crypto_alloc_tfm(csum_types[alg].name, 0);
706 CERROR("Unable to allocate tfm %s\n", csum_types[alg].name);
710 OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
716 for (i = 0; i < desc->bd_iov_count; i++) {
717 sl[i].page = desc->bd_iov[i].kiov_page;
718 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
719 sl[i].length = desc->bd_iov[i].kiov_len;
722 crypto_digest_init(tfm);
723 crypto_digest_update(tfm, sl, desc->bd_iov_count);
724 crypto_digest_final(tfm, buf);
726 OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
729 crypto_free_tfm(tfm);
733 #else /* !__KERNEL__ */
735 int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
740 LASSERT(alg == BULK_CSUM_ALG_CRC32);
742 for (i = 0; i < desc->bd_iov_count; i++) {
743 char *ptr = desc->bd_iov[i].iov_base;
744 int len = desc->bd_iov[i].iov_len;
746 crc32 = crc32_le(crc32, ptr, len);
749 *((__u32 *) buf) = crc32;
755 * perform algorithm @alg checksum on @desc, store result in @buf.
756 * if anything goes wrong, leave 'alg' be BULK_CSUM_ALG_NULL.
759 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
760 struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
765 LASSERT(alg < BULK_CSUM_ALG_MAX);
767 bsd->bsd_csum_alg = BULK_CSUM_ALG_NULL;
769 if (alg == BULK_CSUM_ALG_NULL)
772 LASSERT(bsdsize >= sizeof(*bsd) + csum_types[alg].size);
774 rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
776 bsd->bsd_csum_alg = alg;
782 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
783 struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
784 struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
788 int csum_size, rc = 0;
791 LASSERT(bsdv->bsd_csum_alg < BULK_CSUM_ALG_MAX);
794 bsdr->bsd_csum_alg = BULK_CSUM_ALG_NULL;
796 if (bsdv->bsd_csum_alg == BULK_CSUM_ALG_NULL)
799 /* for all supported algorithms */
800 csum_size = csum_types[bsdv->bsd_csum_alg].size;
802 if (bsdvsize < sizeof(*bsdv) + csum_size) {
803 CERROR("verifier size %d too small, require %d\n",
804 bsdvsize, (int) sizeof(*bsdv) + csum_size);
809 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
810 csum_p = (char *) bsdr->bsd_csum;
812 OBD_ALLOC(buf, csum_size);
818 rc = do_bulk_checksum(desc, bsdv->bsd_csum_alg, csum_p);
820 if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
821 CERROR("BAD %s CHECKSUM (%s), data mutated during "
822 "transfer!\n", read ? "READ" : "WRITE",
823 csum_types[bsdv->bsd_csum_alg].name);
826 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
827 read ? "read" : "write",
828 csum_types[bsdv->bsd_csum_alg].name);
832 bsdr->bsd_csum_alg = bsdv->bsd_csum_alg;
833 memcpy(bsdr->bsd_csum, csum_p, csum_size);
836 OBD_FREE(buf, csum_size);
842 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
843 __u32 alg, struct lustre_msg *rmsg, int roff)
845 struct ptlrpc_bulk_sec_desc *bsdr;
848 rsize = rmsg->lm_buflens[roff];
849 bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
852 LASSERT(rsize >= sizeof(*bsdr));
853 LASSERT(alg < BULK_CSUM_ALG_MAX);
856 bsdr->bsd_csum_alg = alg;
858 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
860 CERROR("client bulk write: failed to perform "
861 "checksum: %d\n", rc);
867 EXPORT_SYMBOL(bulk_csum_cli_request);
869 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
870 struct lustre_msg *rmsg, int roff,
871 struct lustre_msg *vmsg, int voff)
873 struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
876 rsize = rmsg->lm_buflens[roff];
877 vsize = vmsg->lm_buflens[voff];
878 bsdr = lustre_msg_buf(rmsg, roff, 0);
879 bsdv = lustre_msg_buf(vmsg, voff, 0);
881 if (bsdv == NULL || vsize < sizeof(*bsdv)) {
882 CERROR("Invalid checksum verifier from server: size %d\n",
888 LASSERT(rsize >= sizeof(*bsdr));
889 LASSERT(vsize >= sizeof(*bsdv));
891 if (bsdr->bsd_csum_alg != bsdv->bsd_csum_alg) {
892 CERROR("bulk %s: checksum algorithm mismatch: client request "
893 "%s but server reply with %s. try to use the new one "
894 "for checksum verification\n",
895 read ? "read" : "write",
896 csum_types[bsdr->bsd_csum_alg].name,
897 csum_types[bsdv->bsd_csum_alg].name);
901 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
903 char *cli, *srv, *new = NULL;
904 int csum_size = csum_types[bsdr->bsd_csum_alg].size;
906 LASSERT(bsdr->bsd_csum_alg < BULK_CSUM_ALG_MAX);
907 if (bsdr->bsd_csum_alg == BULK_CSUM_ALG_NULL)
910 if (vsize < sizeof(*bsdv) + csum_size) {
911 CERROR("verifier size %d too small, require %d\n",
912 vsize, (int) sizeof(*bsdv) + csum_size);
916 cli = (char *) (bsdr + 1);
917 srv = (char *) (bsdv + 1);
919 if (!memcmp(cli, srv, csum_size)) {
920 /* checksum confirmed */
921 CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
922 csum_types[bsdr->bsd_csum_alg].name);
926 /* checksum mismatch, re-compute a new one and compare with
927 * others, give out proper warnings. */
928 OBD_ALLOC(new, csum_size);
932 do_bulk_checksum(desc, bsdr->bsd_csum_alg, new);
934 if (!memcmp(new, srv, csum_size)) {
935 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
936 "on the client after we checksummed them\n",
937 csum_types[bsdr->bsd_csum_alg].name);
938 } else if (!memcmp(new, cli, csum_size)) {
939 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
941 csum_types[bsdr->bsd_csum_alg].name);
943 CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
944 "in transit, and the current page contents "
945 "don't match the originals and what the server "
947 csum_types[bsdr->bsd_csum_alg].name);
949 OBD_FREE(new, csum_size);
954 EXPORT_SYMBOL(bulk_csum_cli_reply);
956 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
957 struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
958 struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
962 LASSERT(vsize >= sizeof(*bsdv));
963 LASSERT(rsize >= sizeof(*bsdr));
964 LASSERT(bsdv && bsdr);
967 rc = generate_bulk_csum(desc, bsdv->bsd_csum_alg, bsdr, rsize);
969 CERROR("bulk read: server failed to generate %s "
971 csum_types[bsdv->bsd_csum_alg].name, rc);
973 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
977 EXPORT_SYMBOL(bulk_csum_svc);
979 /****************************************
980 * Helpers to assist policy modules to *
981 * implement encryption funcationality *
982 ****************************************/
985 * NOTE: These algorithms must be stream cipher!
991 [BULK_PRIV_ALG_NULL] = { "null", 0 },
992 [BULK_PRIV_ALG_ARC4] = { "arc4", 0 },
995 const char * sptlrpc_bulk_priv_alg2name(__u32 priv_alg)
997 if (priv_alg < BULK_PRIV_ALG_MAX)
998 return priv_types[priv_alg].name;
999 return "unknown_priv";
1001 EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2name);
1003 __u32 sptlrpc_bulk_priv_alg2flags(__u32 priv_alg)
1005 if (priv_alg < BULK_PRIV_ALG_MAX)
1006 return priv_types[priv_alg].flags;
1009 EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2flags);