Whamcloud - gitweb
if client_disconnect_export was called without force flag set,
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 #define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #include <libcfs/libcfs.h>
29 #ifndef __KERNEL__
30 #include <liblustre.h>
31 #include <libcfs/list.h>
32 #else
33 #include <linux/crypto.h>
34 #endif
35
36 #include <obd.h>
37 #include <obd_class.h>
38 #include <obd_support.h>
39 #include <lustre_net.h>
40 #include <lustre_import.h>
41 #include <lustre_dlm.h>
42 #include <lustre_sec.h>
43
44 #include "ptlrpc_internal.h"
45
46 /****************************************
47  * bulk encryption page pools           *
48  ****************************************/
49
50 #ifdef __KERNEL__
51
52 #define PTRS_PER_PAGE   (CFS_PAGE_SIZE / sizeof(void *))
53 #define PAGES_PER_POOL  (PTRS_PER_PAGE)
54
55 static struct ptlrpc_enc_page_pool {
56         /*
57          * constants
58          */
59         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
60         unsigned int     epp_max_pools;   /* number of pools, const */
61         /*
62          * users of the pools. the capacity grow as more user added,
63          * but doesn't shrink when users gone -- just current policy.
64          * during failover there might be user add/remove activities.
65          */
66         atomic_t         epp_users;       /* shared by how many users (osc) */
67         atomic_t         epp_users_gone;  /* users removed */
68         /*
69          * wait queue in case of not enough free pages.
70          */
71         cfs_waitq_t      epp_waitq;       /* waiting threads */
72         unsigned int     epp_waitqlen;    /* wait queue length */
73         unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
74         unsigned long    epp_adding:1,    /* during adding pages */
75                          epp_full:1;      /* pools are all full */
76         /*
77          * in-pool pages bookkeeping
78          */
79         spinlock_t       epp_lock;        /* protect following fields */
80         unsigned long    epp_total_pages; /* total pages in pools */
81         unsigned long    epp_free_pages;  /* current pages available */
82         /*
83          * statistics
84          */
85         unsigned int     epp_st_adds;
86         unsigned int     epp_st_failadds; /* # of add pages failures */
87         unsigned long    epp_st_reqs;     /* # of get_pages requests */
88         unsigned long    epp_st_missings; /* # of cache missing */
89         unsigned long    epp_st_lowfree;  /* lowest free pages ever reached */
90         unsigned long    epp_st_max_wqlen;/* highest waitqueue length ever */
91         cfs_time_t       epp_st_max_wait; /* in jeffies */
92         /*
93          * pointers to pools
94          */
95         cfs_page_t    ***epp_pools;
96 } page_pools;
97
98 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
99                                int *eof, void *data)
100 {
101         int     rc;
102
103         spin_lock(&page_pools.epp_lock);
104
105         rc = snprintf(page, count,
106                       "physical pages:          %lu\n"
107                       "pages per pool:          %lu\n"
108                       "max pages:               %lu\n"
109                       "max pools:               %u\n"
110                       "users:                   %d - %d\n"
111                       "current waitqueue len:   %u\n"
112                       "current pages in short:  %lu\n"
113                       "total pages:             %lu\n"
114                       "total free:              %lu\n"
115                       "add page times:          %u\n"
116                       "add page failed times:   %u\n"
117                       "total requests:          %lu\n"
118                       "cache missing:           %lu\n"
119                       "lowest free pages:       %lu\n"
120                       "max waitqueue depth:     %lu\n"
121                       "max wait time:           "CFS_TIME_T"\n"
122                       ,
123                       num_physpages,
124                       PAGES_PER_POOL,
125                       page_pools.epp_max_pages,
126                       page_pools.epp_max_pools,
127                       atomic_read(&page_pools.epp_users),
128                       atomic_read(&page_pools.epp_users_gone),
129                       page_pools.epp_waitqlen,
130                       page_pools.epp_pages_short,
131                       page_pools.epp_total_pages,
132                       page_pools.epp_free_pages,
133                       page_pools.epp_st_adds,
134                       page_pools.epp_st_failadds,
135                       page_pools.epp_st_reqs,
136                       page_pools.epp_st_missings,
137                       page_pools.epp_st_lowfree,
138                       page_pools.epp_st_max_wqlen,
139                       page_pools.epp_st_max_wait
140                      );
141
142         spin_unlock(&page_pools.epp_lock);
143         return rc;
144 }
145
146 static inline
147 int npages_to_npools(unsigned long npages)
148 {
149         return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
150 }
151
152 /*
153  * return how many pages cleaned up.
154  */
155 static unsigned long enc_cleanup_pools(cfs_page_t ***pools, int npools)
156 {
157         unsigned long cleaned = 0;
158         int           i, j;
159
160         for (i = 0; i < npools; i++) {
161                 if (pools[i]) {
162                         for (j = 0; j < PAGES_PER_POOL; j++) {
163                                 if (pools[i][j]) {
164                                         cfs_free_page(pools[i][j]);
165                                         cleaned++;
166                                 }
167                         }
168                         OBD_FREE(pools[i], CFS_PAGE_SIZE);
169                         pools[i] = NULL;
170                 }
171         }
172
173         return cleaned;
174 }
175
176 /*
177  * merge @npools pointed by @pools which contains @npages new pages
178  * into current pools.
179  *
180  * we have options to avoid most memory copy with some tricks. but we choose
181  * the simplest way to avoid complexity. It's not frequently called.
182  */
183 static void enc_insert_pool(cfs_page_t ***pools, int npools, int npages)
184 {
185         int     freeslot;
186         int     op_idx, np_idx, og_idx, ng_idx;
187         int     cur_npools, end_npools;
188
189         LASSERT(npages > 0);
190         LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
191         LASSERT(npages_to_npools(npages) == npools);
192
193         spin_lock(&page_pools.epp_lock);
194
195         /*
196          * (1) fill all the free slots of current pools.
197          */
198         /* free slots are those left by rent pages, and the extra ones with
199          * index >= eep_total_pages, locate at the tail of last pool. */
200         freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
201         if (freeslot != 0)
202                 freeslot = PAGES_PER_POOL - freeslot;
203         freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
204
205         op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
206         og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
207         np_idx = npools - 1;
208         ng_idx = (npages - 1) % PAGES_PER_POOL;
209
210         while (freeslot) {
211                 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
212                 LASSERT(pools[np_idx][ng_idx] != NULL);
213
214                 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
215                 pools[np_idx][ng_idx] = NULL;
216
217                 freeslot--;
218
219                 if (++og_idx == PAGES_PER_POOL) {
220                         op_idx++;
221                         og_idx = 0;
222                 }
223                 if (--ng_idx < 0) {
224                         if (np_idx == 0)
225                                 break;
226                         np_idx--;
227                         ng_idx = PAGES_PER_POOL - 1;
228                 }
229         }
230
231         /*
232          * (2) add pools if needed.
233          */
234         cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
235                      PAGES_PER_POOL;
236         end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
237                      PAGES_PER_POOL;
238         LASSERT(end_npools <= page_pools.epp_max_pools);
239
240         np_idx = 0;
241         while (cur_npools < end_npools) {
242                 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
243                 LASSERT(np_idx < npools);
244                 LASSERT(pools[np_idx] != NULL);
245
246                 page_pools.epp_pools[cur_npools++] = pools[np_idx];
247                 pools[np_idx++] = NULL;
248         }
249
250         page_pools.epp_total_pages += npages;
251         page_pools.epp_free_pages += npages;
252         page_pools.epp_st_lowfree = page_pools.epp_free_pages;
253
254         if (page_pools.epp_total_pages == page_pools.epp_max_pages)
255                 page_pools.epp_full = 1;
256
257         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
258                page_pools.epp_total_pages);
259
260         spin_unlock(&page_pools.epp_lock);
261 }
262
263 static int enc_pools_add_pages(int npages)
264 {
265         static DECLARE_MUTEX(sem_add_pages);
266         cfs_page_t   ***pools;
267         int             npools, alloced = 0;
268         int             i, j, rc = -ENOMEM;
269
270         down(&sem_add_pages);
271
272         if (npages > page_pools.epp_max_pages - page_pools.epp_total_pages)
273                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
274         if (npages == 0) {
275                 rc = 0;
276                 goto out;
277         }
278
279         page_pools.epp_st_adds++;
280
281         npools = npages_to_npools(npages);
282         OBD_ALLOC(pools, npools * sizeof(*pools));
283         if (pools == NULL)
284                 goto out;
285
286         for (i = 0; i < npools; i++) {
287                 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
288                 if (pools[i] == NULL)
289                         goto out_pools;
290
291                 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
292                         pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
293                                                      CFS_ALLOC_HIGH);
294                         if (pools[i][j] == NULL)
295                                 goto out_pools;
296
297                         alloced++;
298                 }
299         }
300
301         enc_insert_pool(pools, npools, npages);
302         CDEBUG(D_SEC, "add %d pages into enc page pools\n", npages);
303         rc = 0;
304
305 out_pools:
306         enc_cleanup_pools(pools, npools);
307         OBD_FREE(pools, npools * sizeof(*pools));
308 out:
309         if (rc) {
310                 page_pools.epp_st_failadds++;
311                 CERROR("Failed to pre-allocate %d enc pages\n", npages);
312         }
313
314         up(&sem_add_pages);
315         return rc;
316 }
317
318 /*
319  * both "max bulk rpcs inflight" and "lnet MTU" are tunable, we use the
320  * default fixed value initially.
321  */
322 int sptlrpc_enc_pool_add_user(void)
323 {
324         int page_plus = PTLRPC_MAX_BRW_PAGES * OSC_MAX_RIF_DEFAULT;
325         int users, users_gone, shift, rc;
326
327         LASSERT(!in_interrupt());
328         LASSERT(atomic_read(&page_pools.epp_users) >= 0);
329
330         users_gone = atomic_dec_return(&page_pools.epp_users_gone);
331         if (users_gone >= 0) {
332                 CWARN("%d users gone, skip\n", users_gone + 1);
333                 return 0;
334         }
335         atomic_inc(&page_pools.epp_users_gone);
336
337         /*
338          * prepare full pages for first 2 users; 1/2 for next 2 users;
339          * 1/4 for next 4 users; 1/8 for next 8 users; 1/16 for next 16 users;
340          * ...
341          */
342         users = atomic_add_return(1, &page_pools.epp_users);
343         shift = fls(users - 1);
344         shift = shift > 1 ? shift - 1 : 0;
345         page_plus = page_plus >> shift;
346         page_plus = page_plus > 2 ? page_plus : 2;
347
348         rc = enc_pools_add_pages(page_plus);
349         return 0;
350 }
351 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
352
353 int sptlrpc_enc_pool_del_user(void)
354 {
355         atomic_inc(&page_pools.epp_users_gone);
356         return 0;
357 }
358 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
359
360 /*
361  * we allocate the requested pages atomically.
362  */
363 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
364 {
365         cfs_waitlink_t  waitlink;
366         cfs_time_t      tick1 = 0, tick2;
367         int             p_idx, g_idx;
368         int             i;
369
370         LASSERT(desc->bd_max_iov > 0);
371         LASSERT(desc->bd_max_iov <= page_pools.epp_total_pages);
372
373         /* resent bulk, enc pages might have been allocated previously */
374         if (desc->bd_enc_pages != NULL)
375                 return 0;
376
377         OBD_ALLOC(desc->bd_enc_pages,
378                   desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
379         if (desc->bd_enc_pages == NULL)
380                 return -ENOMEM;
381
382         spin_lock(&page_pools.epp_lock);
383 again:
384         page_pools.epp_st_reqs++;
385
386         if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
387                 if (tick1 == 0)
388                         tick1 = cfs_time_current();
389
390                 page_pools.epp_st_missings++;
391                 page_pools.epp_pages_short += desc->bd_max_iov;
392
393                 if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen)
394                         page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen;
395
396                 /* we just wait if someone else is adding more pages, or
397                  * wait queue length is not deep enough. otherwise try to
398                  * add more pages in the pools.
399                  *
400                  * FIXME the policy of detecting resource tight & growing pool
401                  * need to be reconsidered. */
402                 if (page_pools.epp_adding || page_pools.epp_waitqlen < 2 ||
403                     page_pools.epp_full) {
404                         set_current_state(TASK_UNINTERRUPTIBLE);
405                         cfs_waitlink_init(&waitlink);
406                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
407
408                         spin_unlock(&page_pools.epp_lock);
409                         cfs_schedule();
410                         spin_lock(&page_pools.epp_lock);
411                 } else {
412                         page_pools.epp_adding = 1;
413
414                         spin_unlock(&page_pools.epp_lock);
415                         enc_pools_add_pages(page_pools.epp_pages_short / 2);
416                         spin_lock(&page_pools.epp_lock);
417
418                         page_pools.epp_adding = 0;
419                 }
420
421                 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
422                 LASSERT(page_pools.epp_waitqlen > 0);
423                 page_pools.epp_pages_short -= desc->bd_max_iov;
424                 page_pools.epp_waitqlen--;
425
426                 goto again;
427         }
428
429         /* record max wait time */
430         if (unlikely(tick1 != 0)) {
431                 tick2 = cfs_time_current();
432                 if (tick2 - tick1 > page_pools.epp_st_max_wait)
433                         page_pools.epp_st_max_wait = tick2 - tick1;
434         }
435
436         /* proceed with rest of allocation */
437         page_pools.epp_free_pages -= desc->bd_max_iov;
438
439         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
440         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
441
442         for (i = 0; i < desc->bd_max_iov; i++) {
443                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
444                 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
445                 page_pools.epp_pools[p_idx][g_idx] = NULL;
446
447                 if (++g_idx == PAGES_PER_POOL) {
448                         p_idx++;
449                         g_idx = 0;
450                 }
451         }
452
453         if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
454                 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
455
456         spin_unlock(&page_pools.epp_lock);
457         return 0;
458 }
459 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
460
461 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
462 {
463         int     p_idx, g_idx;
464         int     i;
465
466         if (desc->bd_enc_pages == NULL)
467                 return;
468         if (desc->bd_max_iov == 0)
469                 return;
470
471         spin_lock(&page_pools.epp_lock);
472
473         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
474         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
475
476         LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
477                 page_pools.epp_total_pages);
478         LASSERT(page_pools.epp_pools[p_idx]);
479
480         for (i = 0; i < desc->bd_max_iov; i++) {
481                 LASSERT(desc->bd_enc_pages[i] != NULL);
482                 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
483                 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
484
485                 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
486
487                 if (++g_idx == PAGES_PER_POOL) {
488                         p_idx++;
489                         g_idx = 0;
490                 }
491         }
492
493         page_pools.epp_free_pages += desc->bd_max_iov;
494
495         if (unlikely(page_pools.epp_waitqlen)) {
496                 LASSERT(page_pools.epp_waitqlen > 0);
497                 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
498                 cfs_waitq_broadcast(&page_pools.epp_waitq);
499         }
500
501         spin_unlock(&page_pools.epp_lock);
502
503         OBD_FREE(desc->bd_enc_pages,
504                  desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
505         desc->bd_enc_pages = NULL;
506 }
507 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
508
509 int sptlrpc_enc_pool_init(void)
510 {
511         /* constants */
512         page_pools.epp_max_pages = num_physpages / 4;
513         page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
514
515         atomic_set(&page_pools.epp_users, 0);
516         atomic_set(&page_pools.epp_users_gone, 0);
517
518         cfs_waitq_init(&page_pools.epp_waitq);
519         page_pools.epp_waitqlen = 0;
520         page_pools.epp_pages_short = 0;
521
522         page_pools.epp_adding = 0;
523         page_pools.epp_full = 0;
524
525         spin_lock_init(&page_pools.epp_lock);
526         page_pools.epp_total_pages = 0;
527         page_pools.epp_free_pages = 0;
528
529         page_pools.epp_st_adds = 0;
530         page_pools.epp_st_failadds = 0;
531         page_pools.epp_st_reqs = 0;
532         page_pools.epp_st_missings = 0;
533         page_pools.epp_st_lowfree = 0;
534         page_pools.epp_st_max_wqlen = 0;
535         page_pools.epp_st_max_wait = 0;
536
537         OBD_ALLOC(page_pools.epp_pools,
538                   page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
539         if (page_pools.epp_pools == NULL)
540                 return -ENOMEM;
541
542         return 0;
543 }
544
545 void sptlrpc_enc_pool_fini(void)
546 {
547         unsigned long cleaned, npools;
548
549         LASSERT(page_pools.epp_pools);
550         LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
551
552         npools = npages_to_npools(page_pools.epp_total_pages);
553         cleaned = enc_cleanup_pools(page_pools.epp_pools, npools);
554         LASSERT(cleaned == page_pools.epp_total_pages);
555
556         OBD_FREE(page_pools.epp_pools,
557                  page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
558 }
559
560 #else /* !__KERNEL__ */
561
562 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
563 {
564         return 0;
565 }
566
567 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
568 {
569 }
570
571 int sptlrpc_enc_pool_init(void)
572 {
573         return 0;
574 }
575
576 void sptlrpc_enc_pool_fini(void)
577 {
578 }
579 #endif
580
581 /****************************************
582  * Helpers to assist policy modules to  *
583  * implement checksum funcationality    *
584  ****************************************/
585
586 static struct {
587         char    *name;
588         int      size;
589 } csum_types[] = {
590         [BULK_CSUM_ALG_NULL]    = { "null",     0 },
591         [BULK_CSUM_ALG_CRC32]   = { "crc32",    4 },
592         [BULK_CSUM_ALG_MD5]     = { "md5",     16 },
593         [BULK_CSUM_ALG_SHA1]    = { "sha1",    20 },
594         [BULK_CSUM_ALG_SHA256]  = { "sha256",  32 },
595         [BULK_CSUM_ALG_SHA384]  = { "sha384",  48 },
596         [BULK_CSUM_ALG_SHA512]  = { "sha512",  64 },
597 };
598
599 const char * sptlrpc_bulk_csum_alg2name(__u32 csum_alg)
600 {
601         if (csum_alg < BULK_CSUM_ALG_MAX)
602                 return csum_types[csum_alg].name;
603         return "unknown_cksum";
604 }
605 EXPORT_SYMBOL(sptlrpc_bulk_csum_alg2name);
606
607 int bulk_sec_desc_size(__u32 csum_alg, int request, int read)
608 {
609         int size = sizeof(struct ptlrpc_bulk_sec_desc);
610
611         LASSERT(csum_alg < BULK_CSUM_ALG_MAX);
612
613         /* read request don't need extra data */
614         if (!(read && request))
615                 size += csum_types[csum_alg].size;
616
617         return size;
618 }
619 EXPORT_SYMBOL(bulk_sec_desc_size);
620
621 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
622 {
623         struct ptlrpc_bulk_sec_desc *bsd;
624         int    size = msg->lm_buflens[offset];
625
626         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
627         if (bsd == NULL) {
628                 CERROR("Invalid bulk sec desc: size %d\n", size);
629                 return -EINVAL;
630         }
631
632         if (lustre_msg_swabbed(msg)) {
633                 __swab32s(&bsd->bsd_version);
634                 __swab32s(&bsd->bsd_pad);
635                 __swab32s(&bsd->bsd_csum_alg);
636                 __swab32s(&bsd->bsd_priv_alg);
637         }
638
639         if (bsd->bsd_version != 0) {
640                 CERROR("Unexpected version %u\n", bsd->bsd_version);
641                 return -EPROTO;
642         }
643
644         if (bsd->bsd_csum_alg >= BULK_CSUM_ALG_MAX) {
645                 CERROR("Unsupported checksum algorithm %u\n",
646                        bsd->bsd_csum_alg);
647                 return -EINVAL;
648         }
649         if (bsd->bsd_priv_alg >= BULK_PRIV_ALG_MAX) {
650                 CERROR("Unsupported cipher algorithm %u\n",
651                        bsd->bsd_priv_alg);
652                 return -EINVAL;
653         }
654
655         if (size > sizeof(*bsd) &&
656             size < sizeof(*bsd) + csum_types[bsd->bsd_csum_alg].size) {
657                 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
658                        bsd->bsd_csum_alg, size);
659                 return -EINVAL;
660         }
661
662         return 0;
663 }
664 EXPORT_SYMBOL(bulk_sec_desc_unpack);
665
666 #ifdef __KERNEL__
667 static
668 int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
669 {
670         struct page *page;
671         int off;
672         char *ptr;
673         __u32 crc32 = ~0;
674         int len, i;
675
676         for (i = 0; i < desc->bd_iov_count; i++) {
677                 page = desc->bd_iov[i].kiov_page;
678                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
679                 ptr = cfs_kmap(page) + off;
680                 len = desc->bd_iov[i].kiov_len;
681
682                 crc32 = crc32_le(crc32, ptr, len);
683
684                 cfs_kunmap(page);
685         }
686
687         *((__u32 *) buf) = crc32;
688         return 0;
689 }
690
691 static
692 int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
693 {
694         struct crypto_tfm *tfm;
695         struct scatterlist *sl;
696         int i, rc = 0;
697
698         LASSERT(alg > BULK_CSUM_ALG_NULL &&
699                 alg < BULK_CSUM_ALG_MAX);
700
701         if (alg == BULK_CSUM_ALG_CRC32)
702                 return do_bulk_checksum_crc32(desc, buf);
703
704         tfm = crypto_alloc_tfm(csum_types[alg].name, 0);
705         if (tfm == NULL) {
706                 CERROR("Unable to allocate tfm %s\n", csum_types[alg].name);
707                 return -ENOMEM;
708         }
709
710         OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
711         if (sl == NULL) {
712                 rc = -ENOMEM;
713                 goto out_tfm;
714         }
715
716         for (i = 0; i < desc->bd_iov_count; i++) {
717                 sl[i].page = desc->bd_iov[i].kiov_page;
718                 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
719                 sl[i].length = desc->bd_iov[i].kiov_len;
720         }
721
722         crypto_digest_init(tfm);
723         crypto_digest_update(tfm, sl, desc->bd_iov_count);
724         crypto_digest_final(tfm, buf);
725
726         OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
727
728 out_tfm:
729         crypto_free_tfm(tfm);
730         return rc;
731 }
732                          
733 #else /* !__KERNEL__ */
734 static
735 int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
736 {
737         __u32 crc32 = ~0;
738         int i;
739
740         LASSERT(alg == BULK_CSUM_ALG_CRC32);
741
742         for (i = 0; i < desc->bd_iov_count; i++) {
743                 char *ptr = desc->bd_iov[i].iov_base;
744                 int len = desc->bd_iov[i].iov_len;
745
746                 crc32 = crc32_le(crc32, ptr, len);
747         }
748
749         *((__u32 *) buf) = crc32;
750         return 0;
751 }
752 #endif
753
754 /*
755  * perform algorithm @alg checksum on @desc, store result in @buf.
756  * if anything goes wrong, leave 'alg' be BULK_CSUM_ALG_NULL.
757  */
758 static
759 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
760                        struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
761 {
762         int rc;
763
764         LASSERT(bsd);
765         LASSERT(alg < BULK_CSUM_ALG_MAX);
766
767         bsd->bsd_csum_alg = BULK_CSUM_ALG_NULL;
768
769         if (alg == BULK_CSUM_ALG_NULL)
770                 return 0;
771
772         LASSERT(bsdsize >= sizeof(*bsd) + csum_types[alg].size);
773
774         rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
775         if (rc == 0)
776                 bsd->bsd_csum_alg = alg;
777
778         return rc;
779 }
780
781 static
782 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
783                      struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
784                      struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
785 {
786         char *csum_p;
787         char *buf = NULL;
788         int   csum_size, rc = 0;
789
790         LASSERT(bsdv);
791         LASSERT(bsdv->bsd_csum_alg < BULK_CSUM_ALG_MAX);
792
793         if (bsdr)
794                 bsdr->bsd_csum_alg = BULK_CSUM_ALG_NULL;
795
796         if (bsdv->bsd_csum_alg == BULK_CSUM_ALG_NULL)
797                 return 0;
798
799         /* for all supported algorithms */
800         csum_size = csum_types[bsdv->bsd_csum_alg].size;
801
802         if (bsdvsize < sizeof(*bsdv) + csum_size) {
803                 CERROR("verifier size %d too small, require %d\n",
804                        bsdvsize, (int) sizeof(*bsdv) + csum_size);
805                 return -EINVAL;
806         }
807
808         if (bsdr) {
809                 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
810                 csum_p = (char *) bsdr->bsd_csum;
811         } else {
812                 OBD_ALLOC(buf, csum_size);
813                 if (buf == NULL)
814                         return -EINVAL;
815                 csum_p = buf;
816         }
817
818         rc = do_bulk_checksum(desc, bsdv->bsd_csum_alg, csum_p);
819
820         if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
821                 CERROR("BAD %s CHECKSUM (%s), data mutated during "
822                        "transfer!\n", read ? "READ" : "WRITE",
823                        csum_types[bsdv->bsd_csum_alg].name);
824                 rc = -EINVAL;
825         } else {
826                 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
827                       read ? "read" : "write",
828                       csum_types[bsdv->bsd_csum_alg].name);
829         }
830
831         if (bsdr) {
832                 bsdr->bsd_csum_alg = bsdv->bsd_csum_alg;
833                 memcpy(bsdr->bsd_csum, csum_p, csum_size);
834         } else {
835                 LASSERT(buf);
836                 OBD_FREE(buf, csum_size);
837         }
838
839         return rc;
840 }
841
842 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
843                           __u32 alg, struct lustre_msg *rmsg, int roff)
844 {
845         struct ptlrpc_bulk_sec_desc *bsdr;
846         int    rsize, rc = 0;
847
848         rsize = rmsg->lm_buflens[roff];
849         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
850
851         LASSERT(bsdr);
852         LASSERT(rsize >= sizeof(*bsdr));
853         LASSERT(alg < BULK_CSUM_ALG_MAX);
854
855         if (read)
856                 bsdr->bsd_csum_alg = alg;
857         else {
858                 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
859                 if (rc) {
860                         CERROR("client bulk write: failed to perform "
861                                "checksum: %d\n", rc);
862                 }
863         }
864
865         return rc;
866 }
867 EXPORT_SYMBOL(bulk_csum_cli_request);
868
869 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
870                         struct lustre_msg *rmsg, int roff,
871                         struct lustre_msg *vmsg, int voff)
872 {
873         struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
874         int    rsize, vsize;
875
876         rsize = rmsg->lm_buflens[roff];
877         vsize = vmsg->lm_buflens[voff];
878         bsdr = lustre_msg_buf(rmsg, roff, 0);
879         bsdv = lustre_msg_buf(vmsg, voff, 0);
880
881         if (bsdv == NULL || vsize < sizeof(*bsdv)) {
882                 CERROR("Invalid checksum verifier from server: size %d\n",
883                        vsize);
884                 return -EINVAL;
885         }
886
887         LASSERT(bsdr);
888         LASSERT(rsize >= sizeof(*bsdr));
889         LASSERT(vsize >= sizeof(*bsdv));
890
891         if (bsdr->bsd_csum_alg != bsdv->bsd_csum_alg) {
892                 CERROR("bulk %s: checksum algorithm mismatch: client request "
893                        "%s but server reply with %s. try to use the new one "
894                        "for checksum verification\n",
895                        read ? "read" : "write",
896                        csum_types[bsdr->bsd_csum_alg].name,
897                        csum_types[bsdv->bsd_csum_alg].name);
898         }
899
900         if (read)
901                 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
902         else {
903                 char *cli, *srv, *new = NULL;
904                 int csum_size = csum_types[bsdr->bsd_csum_alg].size;
905
906                 LASSERT(bsdr->bsd_csum_alg < BULK_CSUM_ALG_MAX);
907                 if (bsdr->bsd_csum_alg == BULK_CSUM_ALG_NULL)
908                         return 0;
909
910                 if (vsize < sizeof(*bsdv) + csum_size) {
911                         CERROR("verifier size %d too small, require %d\n",
912                                vsize, (int) sizeof(*bsdv) + csum_size);
913                         return -EINVAL;
914                 }
915
916                 cli = (char *) (bsdr + 1);
917                 srv = (char *) (bsdv + 1);
918
919                 if (!memcmp(cli, srv, csum_size)) {
920                         /* checksum confirmed */
921                         CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
922                               csum_types[bsdr->bsd_csum_alg].name);
923                         return 0;
924                 }
925
926                 /* checksum mismatch, re-compute a new one and compare with
927                  * others, give out proper warnings. */
928                 OBD_ALLOC(new, csum_size);
929                 if (new == NULL)
930                         return -ENOMEM;
931
932                 do_bulk_checksum(desc, bsdr->bsd_csum_alg, new);
933
934                 if (!memcmp(new, srv, csum_size)) {
935                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
936                                "on the client after we checksummed them\n",
937                                csum_types[bsdr->bsd_csum_alg].name);
938                 } else if (!memcmp(new, cli, csum_size)) {
939                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
940                                "in transit\n",
941                                csum_types[bsdr->bsd_csum_alg].name);
942                 } else {
943                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
944                                "in transit, and the current page contents "
945                                "don't match the originals and what the server "
946                                "received\n",
947                                csum_types[bsdr->bsd_csum_alg].name);
948                 }
949                 OBD_FREE(new, csum_size);
950
951                 return -EINVAL;
952         }
953 }
954 EXPORT_SYMBOL(bulk_csum_cli_reply);
955
956 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
957                   struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
958                   struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
959 {
960         int    rc;
961
962         LASSERT(vsize >= sizeof(*bsdv));
963         LASSERT(rsize >= sizeof(*bsdr));
964         LASSERT(bsdv && bsdr);
965
966         if (read) {
967                 rc = generate_bulk_csum(desc, bsdv->bsd_csum_alg, bsdr, rsize);
968                 if (rc)
969                         CERROR("bulk read: server failed to generate %s "
970                                "checksum: %d\n",
971                                csum_types[bsdv->bsd_csum_alg].name, rc);
972         } else
973                 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
974
975         return rc;
976 }
977 EXPORT_SYMBOL(bulk_csum_svc);
978
979 /****************************************
980  * Helpers to assist policy modules to  *
981  * implement encryption funcationality  *
982  ****************************************/
983
984 /*
985  * NOTE: These algorithms must be stream cipher!
986  */
987 static struct {
988         char    *name;
989         __u32    flags;
990 } priv_types[] = {
991         [BULK_PRIV_ALG_NULL]   = { "null", 0   },
992         [BULK_PRIV_ALG_ARC4]   = { "arc4", 0   },
993 };
994
995 const char * sptlrpc_bulk_priv_alg2name(__u32 priv_alg)
996 {
997         if (priv_alg < BULK_PRIV_ALG_MAX)
998                 return priv_types[priv_alg].name;
999         return "unknown_priv";
1000 }
1001 EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2name);
1002
1003 __u32 sptlrpc_bulk_priv_alg2flags(__u32 priv_alg)
1004 {
1005         if (priv_alg < BULK_PRIV_ALG_MAX)
1006                 return priv_types[priv_alg].flags;
1007         return 0;
1008 }
1009 EXPORT_SYMBOL(sptlrpc_bulk_priv_alg2flags);