Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 #define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #include <libcfs/libcfs.h>
29 #ifndef __KERNEL__
30 #include <liblustre.h>
31 #include <libcfs/list.h>
32 #else
33 #include <linux/crypto.h>
34 #endif
35
36 #include <obd.h>
37 #include <obd_cksum.h>
38 #include <obd_class.h>
39 #include <obd_support.h>
40 #include <lustre_net.h>
41 #include <lustre_import.h>
42 #include <lustre_dlm.h>
43 #include <lustre_sec.h>
44
45 #include "ptlrpc_internal.h"
46
47 /****************************************
48  * bulk encryption page pools           *
49  ****************************************/
50
51 #ifdef __KERNEL__
52
53 #define PTRS_PER_PAGE   (CFS_PAGE_SIZE / sizeof(void *))
54 #define PAGES_PER_POOL  (PTRS_PER_PAGE)
55
56 #define IDLE_IDX_MAX            (100)
57 #define IDLE_IDX_WEIGHT         (3)
58
59 #define CACHE_QUIESCENCE_PERIOD (20)
60
61 static struct ptlrpc_enc_page_pool {
62         /*
63          * constants
64          */
65         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
66         unsigned int     epp_max_pools;   /* number of pools, const */
67
68         /*
69          * wait queue in case of not enough free pages.
70          */
71         cfs_waitq_t      epp_waitq;       /* waiting threads */
72         unsigned int     epp_waitqlen;    /* wait queue length */
73         unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
74         unsigned int     epp_growing:1;   /* during adding pages */
75
76         /*
77          * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
78          * this is counted based on each time when getting pages from
79          * the pools, not based on time. which means in case that system
80          * is idled for a while but the idle_idx might still be low if no
81          * activities happened in the pools.
82          */
83         unsigned long    epp_idle_idx;
84
85         /* last shrink time due to mem tight */
86         long             epp_last_shrink;
87         long             epp_last_access;
88
89         /*
90          * in-pool pages bookkeeping
91          */
92         spinlock_t       epp_lock;        /* protect following fields */
93         unsigned long    epp_total_pages; /* total pages in pools */
94         unsigned long    epp_free_pages;  /* current pages available */
95
96         /*
97          * statistics
98          */
99         unsigned int     epp_st_grows;          /* # of grows */
100         unsigned int     epp_st_grow_fails;     /* # of add pages failures */
101         unsigned int     epp_st_shrinks;        /* # of shrinks */
102         unsigned long    epp_st_access;         /* # of access */
103         unsigned long    epp_st_missings;       /* # of cache missing */
104         unsigned long    epp_st_lowfree;        /* lowest free pages reached */
105         unsigned long    epp_st_max_wqlen;      /* highest waitqueue length */
106         cfs_time_t       epp_st_max_wait;       /* in jeffies */
107         /*
108          * pointers to pools
109          */
110         cfs_page_t    ***epp_pools;
111 } page_pools;
112
113 /*
114  * memory shrinker
115  */
116 const int pools_shrinker_seeks = DEFAULT_SEEKS;
117 static struct shrinker *pools_shrinker = NULL;
118
119
120 /*
121  * /proc/fs/lustre/sptlrpc/encrypt_page_pools
122  */
123 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
124                                int *eof, void *data)
125 {
126         int     rc;
127
128         spin_lock(&page_pools.epp_lock);
129
130         rc = snprintf(page, count,
131                       "physical pages:          %lu\n"
132                       "pages per pool:          %lu\n"
133                       "max pages:               %lu\n"
134                       "max pools:               %u\n"
135                       "total pages:             %lu\n"
136                       "total free:              %lu\n"
137                       "idle index:              %lu/100\n"
138                       "last shrink:             %lds\n"
139                       "last access:             %lds\n"
140                       "grows:                   %u\n"
141                       "grows failure:           %u\n"
142                       "shrinks:                 %u\n"
143                       "cache access:            %lu\n"
144                       "cache missing:           %lu\n"
145                       "low free mark:           %lu\n"
146                       "max waitqueue depth:     %lu\n"
147                       "max wait time:           "CFS_TIME_T"/%u\n"
148                       ,
149                       num_physpages,
150                       PAGES_PER_POOL,
151                       page_pools.epp_max_pages,
152                       page_pools.epp_max_pools,
153                       page_pools.epp_total_pages,
154                       page_pools.epp_free_pages,
155                       page_pools.epp_idle_idx,
156                       cfs_time_current_sec() - page_pools.epp_last_shrink,
157                       cfs_time_current_sec() - page_pools.epp_last_access,
158                       page_pools.epp_st_grows,
159                       page_pools.epp_st_grow_fails,
160                       page_pools.epp_st_shrinks,
161                       page_pools.epp_st_access,
162                       page_pools.epp_st_missings,
163                       page_pools.epp_st_lowfree,
164                       page_pools.epp_st_max_wqlen,
165                       page_pools.epp_st_max_wait, HZ
166                      );
167
168         spin_unlock(&page_pools.epp_lock);
169         return rc;
170 }
171
172 static void enc_pools_release_free_pages(long npages)
173 {
174         int     p_idx, g_idx;
175
176         LASSERT(npages <= page_pools.epp_free_pages);
177
178         p_idx = (page_pools.epp_free_pages - 1) / PAGES_PER_POOL;
179         g_idx = (page_pools.epp_free_pages - 1) % PAGES_PER_POOL;
180         LASSERT(page_pools.epp_pools[p_idx]);
181
182         page_pools.epp_free_pages -= npages;
183         page_pools.epp_total_pages -= npages;
184
185         while (npages-- > 0) {
186                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
187
188                 cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
189                 page_pools.epp_pools[p_idx][g_idx] = NULL;
190
191                 if (g_idx-- == 0) {
192                         p_idx--;
193                         g_idx = PAGES_PER_POOL - 1;
194
195                         LASSERT(page_pools.epp_pools[p_idx]);
196                 }
197         }
198 }
199
200 /*
201  * could be called frequently for query (@nr_to_scan == 0)
202  */
203 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
204 {
205         unsigned long   ret;
206
207         spin_lock(&page_pools.epp_lock);
208
209         if (nr_to_scan) {
210                 if (nr_to_scan > page_pools.epp_free_pages)
211                         nr_to_scan = page_pools.epp_free_pages;
212
213                 enc_pools_release_free_pages(nr_to_scan);
214                 CDEBUG(D_SEC, "released %d pages, %ld left\n",
215                        nr_to_scan, page_pools.epp_free_pages);
216
217                 page_pools.epp_st_shrinks++;
218                 page_pools.epp_last_shrink = cfs_time_current_sec();
219         }
220
221         /*
222          * try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool
223          */
224         if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) {
225                 ret = 0;
226                 goto out_unlock;
227         }
228
229         /*
230          * if no pool access for a long time, we consider it's fully idle
231          */
232         if (cfs_time_current_sec() - page_pools.epp_last_access >
233             CACHE_QUIESCENCE_PERIOD)
234                 page_pools.epp_idle_idx = IDLE_IDX_MAX;
235
236         LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
237         ret = (page_pools.epp_free_pages * page_pools.epp_idle_idx /
238                IDLE_IDX_MAX);
239         if (page_pools.epp_free_pages - ret < PTLRPC_MAX_BRW_PAGES)
240                 ret = page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES;
241
242 out_unlock:
243         spin_unlock(&page_pools.epp_lock);
244         return ret;
245 }
246
247 static inline
248 int npages_to_npools(unsigned long npages)
249 {
250         return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
251 }
252
253 /*
254  * return how many pages cleaned up.
255  */
256 static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
257 {
258         unsigned long cleaned = 0;
259         int           i, j;
260
261         for (i = 0; i < npools; i++) {
262                 if (pools[i]) {
263                         for (j = 0; j < PAGES_PER_POOL; j++) {
264                                 if (pools[i][j]) {
265                                         cfs_free_page(pools[i][j]);
266                                         cleaned++;
267                                 }
268                         }
269                         OBD_FREE(pools[i], CFS_PAGE_SIZE);
270                         pools[i] = NULL;
271                 }
272         }
273
274         return cleaned;
275 }
276
277 /*
278  * merge @npools pointed by @pools which contains @npages new pages
279  * into current pools.
280  *
281  * we have options to avoid most memory copy with some tricks. but we choose
282  * the simplest way to avoid complexity. It's not frequently called.
283  */
284 static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
285 {
286         int     freeslot;
287         int     op_idx, np_idx, og_idx, ng_idx;
288         int     cur_npools, end_npools;
289
290         LASSERT(npages > 0);
291         LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
292         LASSERT(npages_to_npools(npages) == npools);
293
294         spin_lock(&page_pools.epp_lock);
295
296         /*
297          * (1) fill all the free slots of current pools.
298          */
299         /* free slots are those left by rent pages, and the extra ones with
300          * index >= eep_total_pages, locate at the tail of last pool. */
301         freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
302         if (freeslot != 0)
303                 freeslot = PAGES_PER_POOL - freeslot;
304         freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
305
306         op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
307         og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
308         np_idx = npools - 1;
309         ng_idx = (npages - 1) % PAGES_PER_POOL;
310
311         while (freeslot) {
312                 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
313                 LASSERT(pools[np_idx][ng_idx] != NULL);
314
315                 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
316                 pools[np_idx][ng_idx] = NULL;
317
318                 freeslot--;
319
320                 if (++og_idx == PAGES_PER_POOL) {
321                         op_idx++;
322                         og_idx = 0;
323                 }
324                 if (--ng_idx < 0) {
325                         if (np_idx == 0)
326                                 break;
327                         np_idx--;
328                         ng_idx = PAGES_PER_POOL - 1;
329                 }
330         }
331
332         /*
333          * (2) add pools if needed.
334          */
335         cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
336                      PAGES_PER_POOL;
337         end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
338                      PAGES_PER_POOL;
339         LASSERT(end_npools <= page_pools.epp_max_pools);
340
341         np_idx = 0;
342         while (cur_npools < end_npools) {
343                 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
344                 LASSERT(np_idx < npools);
345                 LASSERT(pools[np_idx] != NULL);
346
347                 page_pools.epp_pools[cur_npools++] = pools[np_idx];
348                 pools[np_idx++] = NULL;
349         }
350
351         page_pools.epp_total_pages += npages;
352         page_pools.epp_free_pages += npages;
353         page_pools.epp_st_lowfree = page_pools.epp_free_pages;
354
355         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
356                page_pools.epp_total_pages);
357
358         spin_unlock(&page_pools.epp_lock);
359 }
360
361 static int enc_pools_add_pages(int npages)
362 {
363         static DECLARE_MUTEX(sem_add_pages);
364         cfs_page_t   ***pools;
365         int             npools, alloced = 0;
366         int             i, j, rc = -ENOMEM;
367
368         if (npages < PTLRPC_MAX_BRW_PAGES)
369                 npages = PTLRPC_MAX_BRW_PAGES;
370
371         down(&sem_add_pages);
372
373         if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
374                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
375         LASSERT(npages > 0);
376
377         page_pools.epp_st_grows++;
378
379         npools = npages_to_npools(npages);
380         OBD_ALLOC(pools, npools * sizeof(*pools));
381         if (pools == NULL)
382                 goto out;
383
384         for (i = 0; i < npools; i++) {
385                 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
386                 if (pools[i] == NULL)
387                         goto out_pools;
388
389                 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
390                         pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
391                                                      CFS_ALLOC_HIGH);
392                         if (pools[i][j] == NULL)
393                                 goto out_pools;
394
395                         alloced++;
396                 }
397         }
398
399         enc_pools_insert(pools, npools, npages);
400         CDEBUG(D_SEC, "added %d pages into pools\n", npages);
401         rc = 0;
402
403 out_pools:
404         enc_pools_cleanup(pools, npools);
405         OBD_FREE(pools, npools * sizeof(*pools));
406 out:
407         if (rc) {
408                 page_pools.epp_st_grow_fails++;
409                 CERROR("Failed to allocate %d enc pages\n", npages);
410         }
411
412         up(&sem_add_pages);
413         return rc;
414 }
415
416 static inline void enc_pools_wakeup(void)
417 {
418         if (unlikely(page_pools.epp_waitqlen)) {
419                 LASSERT(page_pools.epp_waitqlen > 0);
420                 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
421                 cfs_waitq_broadcast(&page_pools.epp_waitq);
422         }
423 }
424
425 static int enc_pools_should_grow(int page_needed, long now)
426 {
427         /* don't grow if someone else is growing the pools right now,
428          * or the pools has reached its full capacity
429          */
430         if (page_pools.epp_growing ||
431             page_pools.epp_total_pages == page_pools.epp_max_pages)
432                 return 0;
433
434         /* if total pages is not enough, we need to grow */
435         if (page_pools.epp_total_pages < page_needed)
436                 return 1;
437
438         /* if we just did a shrink due to memory tight, we'd better
439          * wait a while to grow again.
440          */
441         if (now - page_pools.epp_last_shrink < 2)
442                 return 0;
443
444         /*
445          * here we perhaps need consider other factors like wait queue
446          * length, idle index, etc. ?
447          */
448
449         /* grow the pools in any other cases */
450         return 1;
451 }
452
453 /*
454  * we allocate the requested pages atomically.
455  */
456 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
457 {
458         cfs_waitlink_t  waitlink;
459         unsigned long   this_idle = -1;
460         cfs_time_t      tick = 0;
461         long            now;
462         int             p_idx, g_idx;
463         int             i;
464
465         LASSERT(desc->bd_max_iov > 0);
466         LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
467
468         /* resent bulk, enc pages might have been allocated previously */
469         if (desc->bd_enc_pages != NULL)
470                 return 0;
471
472         OBD_ALLOC(desc->bd_enc_pages,
473                   desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
474         if (desc->bd_enc_pages == NULL)
475                 return -ENOMEM;
476
477         spin_lock(&page_pools.epp_lock);
478
479         page_pools.epp_st_access++;
480 again:
481         if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
482                 if (tick == 0)
483                         tick = cfs_time_current();
484
485                 now = cfs_time_current_sec();
486
487                 page_pools.epp_st_missings++;
488                 page_pools.epp_pages_short += desc->bd_max_iov;
489
490                 if (enc_pools_should_grow(desc->bd_max_iov, now)) {
491                         page_pools.epp_growing = 1;
492
493                         spin_unlock(&page_pools.epp_lock);
494                         enc_pools_add_pages(page_pools.epp_pages_short / 2);
495                         spin_lock(&page_pools.epp_lock);
496
497                         page_pools.epp_growing = 0;
498                 } else {
499                         if (++page_pools.epp_waitqlen >
500                             page_pools.epp_st_max_wqlen)
501                                 page_pools.epp_st_max_wqlen =
502                                                 page_pools.epp_waitqlen;
503
504                         set_current_state(TASK_UNINTERRUPTIBLE);
505                         cfs_waitlink_init(&waitlink);
506                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
507
508                         spin_unlock(&page_pools.epp_lock);
509                         cfs_schedule();
510                         spin_lock(&page_pools.epp_lock);
511
512                         LASSERT(page_pools.epp_waitqlen > 0);
513                         page_pools.epp_waitqlen--;
514                 }
515
516                 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
517                 page_pools.epp_pages_short -= desc->bd_max_iov;
518
519                 this_idle = 0;
520                 goto again;
521         }
522
523         /* record max wait time */
524         if (unlikely(tick != 0)) {
525                 tick = cfs_time_current() - tick;
526                 if (tick > page_pools.epp_st_max_wait)
527                         page_pools.epp_st_max_wait = tick;
528         }
529
530         /* proceed with rest of allocation */
531         page_pools.epp_free_pages -= desc->bd_max_iov;
532
533         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
534         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
535
536         for (i = 0; i < desc->bd_max_iov; i++) {
537                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
538                 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
539                 page_pools.epp_pools[p_idx][g_idx] = NULL;
540
541                 if (++g_idx == PAGES_PER_POOL) {
542                         p_idx++;
543                         g_idx = 0;
544                 }
545         }
546
547         if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
548                 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
549
550         /*
551          * new idle index = (old * weight + new) / (weight + 1)
552          */
553         if (this_idle == -1) {
554                 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
555                             page_pools.epp_total_pages;
556         }
557         page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
558                                    this_idle) /
559                                   (IDLE_IDX_WEIGHT + 1);
560
561         page_pools.epp_last_access = cfs_time_current_sec();
562
563         spin_unlock(&page_pools.epp_lock);
564         return 0;
565 }
566 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
567
568 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
569 {
570         int     p_idx, g_idx;
571         int     i;
572
573         if (desc->bd_enc_pages == NULL)
574                 return;
575         if (desc->bd_max_iov == 0)
576                 return;
577
578         spin_lock(&page_pools.epp_lock);
579
580         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
581         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
582
583         LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
584                 page_pools.epp_total_pages);
585         LASSERT(page_pools.epp_pools[p_idx]);
586
587         for (i = 0; i < desc->bd_max_iov; i++) {
588                 LASSERT(desc->bd_enc_pages[i] != NULL);
589                 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
590                 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
591
592                 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
593
594                 if (++g_idx == PAGES_PER_POOL) {
595                         p_idx++;
596                         g_idx = 0;
597                 }
598         }
599
600         page_pools.epp_free_pages += desc->bd_max_iov;
601
602         enc_pools_wakeup();
603
604         spin_unlock(&page_pools.epp_lock);
605
606         OBD_FREE(desc->bd_enc_pages,
607                  desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
608         desc->bd_enc_pages = NULL;
609 }
610 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
611
612 /*
613  * we don't do much stuff for add_user/del_user anymore, except adding some
614  * initial pages in add_user() if current pools are empty, rest would be
615  * handled by the pools's self-adaption.
616  */
617 int sptlrpc_enc_pool_add_user(void)
618 {
619         int     need_grow = 0;
620
621         spin_lock(&page_pools.epp_lock);
622         if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
623                 page_pools.epp_growing = 1;
624                 need_grow = 1;
625         }
626         spin_unlock(&page_pools.epp_lock);
627
628         if (need_grow) {
629                 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
630
631                 spin_lock(&page_pools.epp_lock);
632                 page_pools.epp_growing = 0;
633                 enc_pools_wakeup();
634                 spin_unlock(&page_pools.epp_lock);
635         }
636         return 0;
637 }
638 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
639
640 int sptlrpc_enc_pool_del_user(void)
641 {
642         return 0;
643 }
644 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
645
646 static inline void enc_pools_alloc(void)
647 {
648         LASSERT(page_pools.epp_max_pools);
649         /*
650          * on system with huge memory but small page size, this might lead to
651          * high-order allocation. but it's not common, and we suppose memory
652          * be not too much fragmented at module loading time.
653          */
654         OBD_ALLOC(page_pools.epp_pools,
655                   page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
656 }
657
658 static inline void enc_pools_free(void)
659 {
660         LASSERT(page_pools.epp_max_pools);
661         LASSERT(page_pools.epp_pools);
662
663         OBD_FREE(page_pools.epp_pools,
664                  page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
665 }
666
667 int sptlrpc_enc_pool_init(void)
668 {
669         /*
670          * maximum capacity is 1/8 of total physical memory.
671          * is the 1/8 a good number?
672          */
673         page_pools.epp_max_pages = num_physpages / 8;
674         page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
675
676         cfs_waitq_init(&page_pools.epp_waitq);
677         page_pools.epp_waitqlen = 0;
678         page_pools.epp_pages_short = 0;
679
680         page_pools.epp_growing = 0;
681
682         page_pools.epp_idle_idx = 0;
683         page_pools.epp_last_shrink = cfs_time_current_sec();
684         page_pools.epp_last_access = cfs_time_current_sec();
685
686         spin_lock_init(&page_pools.epp_lock);
687         page_pools.epp_total_pages = 0;
688         page_pools.epp_free_pages = 0;
689
690         page_pools.epp_st_grows = 0;
691         page_pools.epp_st_grow_fails = 0;
692         page_pools.epp_st_shrinks = 0;
693         page_pools.epp_st_access = 0;
694         page_pools.epp_st_missings = 0;
695         page_pools.epp_st_lowfree = 0;
696         page_pools.epp_st_max_wqlen = 0;
697         page_pools.epp_st_max_wait = 0;
698
699         enc_pools_alloc();
700         if (page_pools.epp_pools == NULL)
701                 return -ENOMEM;
702
703         pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
704         if (pools_shrinker == NULL) {
705                 enc_pools_free();
706                 return -ENOMEM;
707         }
708
709         return 0;
710 }
711
712 void sptlrpc_enc_pool_fini(void)
713 {
714         unsigned long cleaned, npools;
715
716         LASSERT(pools_shrinker);
717         LASSERT(page_pools.epp_pools);
718         LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
719
720         remove_shrinker(pools_shrinker);
721
722         npools = npages_to_npools(page_pools.epp_total_pages);
723         cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
724         LASSERT(cleaned == page_pools.epp_total_pages);
725
726         enc_pools_free();
727 }
728
729 #else /* !__KERNEL__ */
730
731 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
732 {
733         return 0;
734 }
735
736 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
737 {
738 }
739
740 int sptlrpc_enc_pool_init(void)
741 {
742         return 0;
743 }
744
745 void sptlrpc_enc_pool_fini(void)
746 {
747 }
748 #endif
749
750 /****************************************
751  * Helpers to assist policy modules to  *
752  * implement checksum funcationality    *
753  ****************************************/
754
755 static struct sptlrpc_hash_type hash_types[] = {
756         [BULK_HASH_ALG_NULL]    = { "null",     "null",         0 },
757         [BULK_HASH_ALG_ADLER32] = { "adler32",  "adler32",      4 },
758         [BULK_HASH_ALG_CRC32]   = { "crc32",    "crc32",        4 },
759         [BULK_HASH_ALG_MD5]     = { "md5",      "md5",          16 },
760         [BULK_HASH_ALG_SHA1]    = { "sha1",     "sha1",         20 },
761         [BULK_HASH_ALG_SHA256]  = { "sha256",   "sha256",       32 },
762         [BULK_HASH_ALG_SHA384]  = { "sha384",   "sha384",       48 },
763         [BULK_HASH_ALG_SHA512]  = { "sha512",   "sha512",       64 },
764         [BULK_HASH_ALG_WP256]   = { "wp256",    "wp256",        32 },
765         [BULK_HASH_ALG_WP384]   = { "wp384",    "wp384",        48 },
766         [BULK_HASH_ALG_WP512]   = { "wp512",    "wp512",        64 },
767 };
768
769 const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
770 {
771         struct sptlrpc_hash_type *ht;
772
773         if (hash_alg < BULK_HASH_ALG_MAX) {
774                 ht = &hash_types[hash_alg];
775                 if (ht->sht_tfm_name)
776                         return ht;
777         }
778         return NULL;
779 }
780 EXPORT_SYMBOL(sptlrpc_get_hash_type);
781
782 const char * sptlrpc_get_hash_name(__u8 hash_alg)
783 {
784         const struct sptlrpc_hash_type *ht;
785
786         ht = sptlrpc_get_hash_type(hash_alg);
787         if (ht)
788                 return ht->sht_name;
789         else
790                 return "unknown";
791 }
792 EXPORT_SYMBOL(sptlrpc_get_hash_name);
793
794 int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
795 {
796         int size = sizeof(struct ptlrpc_bulk_sec_desc);
797
798         LASSERT(hash_alg < BULK_HASH_ALG_MAX);
799
800         /* read request don't need extra data */
801         if (!(read && request))
802                 size += hash_types[hash_alg].sht_size;
803
804         return size;
805 }
806 EXPORT_SYMBOL(bulk_sec_desc_size);
807
808 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
809 {
810         struct ptlrpc_bulk_sec_desc *bsd;
811         int    size = msg->lm_buflens[offset];
812
813         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
814         if (bsd == NULL) {
815                 CERROR("Invalid bulk sec desc: size %d\n", size);
816                 return -EINVAL;
817         }
818
819         /* nothing to swab */
820
821         if (unlikely(bsd->bsd_version != 0)) {
822                 CERROR("Unexpected version %u\n", bsd->bsd_version);
823                 return -EPROTO;
824         }
825
826         if (unlikely(bsd->bsd_flags != 0)) {
827                 CERROR("Unexpected flags %x\n", bsd->bsd_flags);
828                 return -EPROTO;
829         }
830
831         if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
832                 CERROR("Unsupported checksum algorithm %u\n",
833                        bsd->bsd_hash_alg);
834                 return -EINVAL;
835         }
836
837         if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
838                 CERROR("Unsupported cipher algorithm %u\n",
839                        bsd->bsd_ciph_alg);
840                 return -EINVAL;
841         }
842
843         if (unlikely(size > sizeof(*bsd)) &&
844             size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
845                 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
846                        bsd->bsd_hash_alg, size);
847                 return -EINVAL;
848         }
849
850         return 0;
851 }
852 EXPORT_SYMBOL(bulk_sec_desc_unpack);
853
854 #ifdef __KERNEL__
855
856 #ifdef HAVE_ADLER
857 static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
858 {
859         struct page    *page;
860         int             off;
861         char           *ptr;
862         __u32           adler32 = 1;
863         int             len, i;
864
865         for (i = 0; i < desc->bd_iov_count; i++) {
866                 page = desc->bd_iov[i].kiov_page;
867                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
868                 ptr = cfs_kmap(page) + off;
869                 len = desc->bd_iov[i].kiov_len;
870
871                 adler32 = adler32(adler32, ptr, len);
872
873                 cfs_kunmap(page);
874         }
875
876         adler32 = cpu_to_le32(adler32);
877         memcpy(buf, &adler32, sizeof(adler32));
878         return 0;
879 }
880 #endif
881
882 static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
883 {
884         struct page    *page;
885         int             off;
886         char           *ptr;
887         __u32           crc32 = ~0;
888         int             len, i;
889
890         for (i = 0; i < desc->bd_iov_count; i++) {
891                 page = desc->bd_iov[i].kiov_page;
892                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
893                 ptr = cfs_kmap(page) + off;
894                 len = desc->bd_iov[i].kiov_len;
895
896                 crc32 = crc32_le(crc32, ptr, len);
897
898                 cfs_kunmap(page);
899         }
900
901         crc32 = cpu_to_le32(crc32);
902         memcpy(buf, &crc32, sizeof(crc32));
903         return 0;
904 }
905
906 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
907 {
908         struct hash_desc    hdesc;
909         struct scatterlist *sl;
910         int i, rc = 0, bytes = 0;
911
912         LASSERT(alg > BULK_HASH_ALG_NULL &&
913                 alg < BULK_HASH_ALG_MAX);
914
915         switch (alg) {
916         case BULK_HASH_ALG_ADLER32:
917 #ifdef HAVE_ADLER
918                 return do_bulk_checksum_adler32(desc, buf);
919 #else
920                 CERROR("Adler32 not supported\n");
921                 return -EINVAL;
922 #endif
923         case BULK_HASH_ALG_CRC32:
924                 return do_bulk_checksum_crc32(desc, buf);
925         }
926
927         hdesc.tfm = ll_crypto_alloc_hash(hash_types[alg].sht_tfm_name, 0, 0);
928         if (hdesc.tfm == NULL) {
929                 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
930                 return -ENOMEM;
931         }
932         hdesc.flags = 0;
933
934         OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
935         if (sl == NULL) {
936                 rc = -ENOMEM;
937                 goto out_tfm;
938         }
939
940         for (i = 0; i < desc->bd_iov_count; i++) {
941                 sl[i].page = desc->bd_iov[i].kiov_page;
942                 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
943                 sl[i].length = desc->bd_iov[i].kiov_len;
944                 bytes += desc->bd_iov[i].kiov_len;
945         }
946
947         ll_crypto_hash_init(&hdesc);
948         ll_crypto_hash_update(&hdesc, sl, bytes);
949         ll_crypto_hash_final(&hdesc, buf);
950
951         OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
952
953 out_tfm:
954         ll_crypto_free_hash(hdesc.tfm);
955         return rc;
956 }
957
958 #else /* !__KERNEL__ */
959
960 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
961 {
962         __u32   csum32;
963         int     i;
964
965         LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
966
967         if (alg == BULK_HASH_ALG_ADLER32)
968                 csum32 = 1;
969         else
970                 csum32 = ~0;
971
972         for (i = 0; i < desc->bd_iov_count; i++) {
973                 char *ptr = desc->bd_iov[i].iov_base;
974                 int len = desc->bd_iov[i].iov_len;
975
976                 switch (alg) {
977                 case BULK_HASH_ALG_ADLER32:
978 #ifdef HAVE_ADLER
979                         csum32 = adler32(csum32, ptr, len);
980 #else
981                         CERROR("Adler32 not supported\n");
982                         return -EINVAL;
983 #endif
984                         break;
985                 case BULK_HASH_ALG_CRC32:
986                         csum32 = crc32_le(csum32, ptr, len);
987                         break;
988                 }
989         }
990
991         csum32 = cpu_to_le32(csum32);
992         memcpy(buf, &csum32, sizeof(csum32));
993         return 0;
994 }
995
996 #endif /* __KERNEL__ */
997
998 /*
999  * perform algorithm @alg checksum on @desc, store result in @buf.
1000  * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
1001  */
1002 static
1003 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
1004                        struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
1005 {
1006         int rc;
1007
1008         LASSERT(bsd);
1009         LASSERT(alg < BULK_HASH_ALG_MAX);
1010
1011         bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
1012
1013         if (alg == BULK_HASH_ALG_NULL)
1014                 return 0;
1015
1016         LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
1017
1018         rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
1019         if (rc == 0)
1020                 bsd->bsd_hash_alg = alg;
1021
1022         return rc;
1023 }
1024
1025 static
1026 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
1027                      struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
1028                      struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
1029 {
1030         char *csum_p;
1031         char *buf = NULL;
1032         int   csum_size, rc = 0;
1033
1034         LASSERT(bsdv);
1035         LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
1036
1037         if (bsdr)
1038                 bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
1039
1040         if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
1041                 return 0;
1042
1043         /* for all supported algorithms */
1044         csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
1045
1046         if (bsdvsize < sizeof(*bsdv) + csum_size) {
1047                 CERROR("verifier size %d too small, require %d\n",
1048                        bsdvsize, (int) sizeof(*bsdv) + csum_size);
1049                 return -EINVAL;
1050         }
1051
1052         if (bsdr) {
1053                 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
1054                 csum_p = (char *) bsdr->bsd_csum;
1055         } else {
1056                 OBD_ALLOC(buf, csum_size);
1057                 if (buf == NULL)
1058                         return -EINVAL;
1059                 csum_p = buf;
1060         }
1061
1062         rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
1063
1064         if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
1065                 CERROR("BAD %s CHECKSUM (%s), data mutated during "
1066                        "transfer!\n", read ? "READ" : "WRITE",
1067                        hash_types[bsdv->bsd_hash_alg].sht_name);
1068                 rc = -EINVAL;
1069         } else {
1070                 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
1071                       read ? "read" : "write",
1072                       hash_types[bsdv->bsd_hash_alg].sht_name);
1073         }
1074
1075         if (bsdr) {
1076                 bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
1077                 memcpy(bsdr->bsd_csum, csum_p, csum_size);
1078         } else {
1079                 LASSERT(buf);
1080                 OBD_FREE(buf, csum_size);
1081         }
1082
1083         return rc;
1084 }
1085
1086 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
1087                           __u32 alg, struct lustre_msg *rmsg, int roff)
1088 {
1089         struct ptlrpc_bulk_sec_desc *bsdr;
1090         int    rsize, rc = 0;
1091
1092         rsize = rmsg->lm_buflens[roff];
1093         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
1094
1095         LASSERT(bsdr);
1096         LASSERT(rsize >= sizeof(*bsdr));
1097         LASSERT(alg < BULK_HASH_ALG_MAX);
1098
1099         if (read) {
1100                 bsdr->bsd_hash_alg = alg;
1101         } else {
1102                 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
1103                 if (rc)
1104                         CERROR("bulk write: client failed to compute "
1105                                "checksum: %d\n", rc);
1106
1107                 /* For sending we only compute the wrong checksum instead
1108                  * of corrupting the data so it is still correct on a redo */
1109                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
1110                     bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
1111                         bsdr->bsd_csum[0] ^= 0x1;
1112         }
1113
1114         return rc;
1115 }
1116 EXPORT_SYMBOL(bulk_csum_cli_request);
1117
1118 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
1119                         struct lustre_msg *rmsg, int roff,
1120                         struct lustre_msg *vmsg, int voff)
1121 {
1122         struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
1123         int    rsize, vsize;
1124
1125         rsize = rmsg->lm_buflens[roff];
1126         vsize = vmsg->lm_buflens[voff];
1127         bsdr = lustre_msg_buf(rmsg, roff, 0);
1128         bsdv = lustre_msg_buf(vmsg, voff, 0);
1129
1130         if (bsdv == NULL || vsize < sizeof(*bsdv)) {
1131                 CERROR("Invalid checksum verifier from server: size %d\n",
1132                        vsize);
1133                 return -EINVAL;
1134         }
1135
1136         LASSERT(bsdr);
1137         LASSERT(rsize >= sizeof(*bsdr));
1138         LASSERT(vsize >= sizeof(*bsdv));
1139
1140         if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
1141                 CERROR("bulk %s: checksum algorithm mismatch: client request "
1142                        "%s but server reply with %s. try to use the new one "
1143                        "for checksum verification\n",
1144                        read ? "read" : "write",
1145                        hash_types[bsdr->bsd_hash_alg].sht_name,
1146                        hash_types[bsdv->bsd_hash_alg].sht_name);
1147         }
1148
1149         if (read)
1150                 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
1151         else {
1152                 char *cli, *srv, *new = NULL;
1153                 int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
1154
1155                 LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
1156                 if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
1157                         return 0;
1158
1159                 if (vsize < sizeof(*bsdv) + csum_size) {
1160                         CERROR("verifier size %d too small, require %d\n",
1161                                vsize, (int) sizeof(*bsdv) + csum_size);
1162                         return -EINVAL;
1163                 }
1164
1165                 cli = (char *) (bsdr + 1);
1166                 srv = (char *) (bsdv + 1);
1167
1168                 if (!memcmp(cli, srv, csum_size)) {
1169                         /* checksum confirmed */
1170                         CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
1171                                hash_types[bsdr->bsd_hash_alg].sht_name);
1172                         return 0;
1173                 }
1174
1175                 /* checksum mismatch, re-compute a new one and compare with
1176                  * others, give out proper warnings. */
1177                 OBD_ALLOC(new, csum_size);
1178                 if (new == NULL)
1179                         return -ENOMEM;
1180
1181                 do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
1182
1183                 if (!memcmp(new, srv, csum_size)) {
1184                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1185                                "on the client after we checksummed them\n",
1186                                hash_types[bsdr->bsd_hash_alg].sht_name);
1187                 } else if (!memcmp(new, cli, csum_size)) {
1188                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1189                                "in transit\n",
1190                                hash_types[bsdr->bsd_hash_alg].sht_name);
1191                 } else {
1192                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1193                                "in transit, and the current page contents "
1194                                "don't match the originals and what the server "
1195                                "received\n",
1196                                hash_types[bsdr->bsd_hash_alg].sht_name);
1197                 }
1198                 OBD_FREE(new, csum_size);
1199
1200                 return -EINVAL;
1201         }
1202 }
1203 EXPORT_SYMBOL(bulk_csum_cli_reply);
1204
1205 #ifdef __KERNEL__
1206 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1207 {
1208         char           *ptr;
1209         unsigned int    off, i;
1210
1211         for (i = 0; i < desc->bd_iov_count; i++) {
1212                 if (desc->bd_iov[i].kiov_len == 0)
1213                         continue;
1214
1215                 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
1216                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
1217                 ptr[off] ^= 0x1;
1218                 cfs_kunmap(desc->bd_iov[i].kiov_page);
1219                 return;
1220         }
1221 }
1222 #else
1223 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1224 {
1225 }
1226 #endif /* __KERNEL__ */
1227
1228 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
1229                   struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
1230                   struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
1231 {
1232         int    rc;
1233
1234         LASSERT(vsize >= sizeof(*bsdv));
1235         LASSERT(rsize >= sizeof(*bsdr));
1236         LASSERT(bsdv && bsdr);
1237
1238         if (read) {
1239                 rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
1240                 if (rc)
1241                         CERROR("bulk read: server failed to generate %s "
1242                                "checksum: %d\n",
1243                                hash_types[bsdv->bsd_hash_alg].sht_name, rc);
1244
1245                 /* corrupt the data after we compute the checksum, to
1246                  * simulate an OST->client data error */
1247                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1248                         corrupt_bulk_data(desc);
1249         } else {
1250                 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
1251         }
1252
1253         return rc;
1254 }
1255 EXPORT_SYMBOL(bulk_csum_svc);
1256
1257 /****************************************
1258  * Helpers to assist policy modules to  *
1259  * implement encryption funcationality  *
1260  ****************************************/
1261
1262 /* FIXME */
1263 #ifndef __KERNEL__
1264 #define CRYPTO_TFM_MODE_ECB     (0)
1265 #define CRYPTO_TFM_MODE_CBC     (1)
1266 #endif
1267
1268 static struct sptlrpc_ciph_type cipher_types[] = {
1269         [BULK_CIPH_ALG_NULL]    = {
1270                 "null",         "null",       0,                   0,  0
1271         },
1272         [BULK_CIPH_ALG_ARC4]    = {
1273                 "arc4",         "ecb(arc4)",       0, 0,  16
1274         },
1275         [BULK_CIPH_ALG_AES128]  = {
1276                 "aes128",       "cbc(aes)",        0, 16, 16
1277         },
1278         [BULK_CIPH_ALG_AES192]  = {
1279                 "aes192",       "cbc(aes)",        0, 16, 24
1280         },
1281         [BULK_CIPH_ALG_AES256]  = {
1282                 "aes256",       "cbc(aes)",        0, 16, 32
1283         },
1284         [BULK_CIPH_ALG_CAST128] = {
1285                 "cast128",      "cbc(cast5)",      0, 8,  16
1286         },
1287         [BULK_CIPH_ALG_CAST256] = {
1288                 "cast256",      "cbc(cast6)",      0, 16, 32
1289         },
1290         [BULK_CIPH_ALG_TWOFISH128] = {
1291                 "twofish128",   "cbc(twofish)",    0, 16, 16
1292         },
1293         [BULK_CIPH_ALG_TWOFISH256] = {
1294                 "twofish256",   "cbc(twofish)",    0, 16, 32
1295         },
1296 };
1297
1298 const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
1299 {
1300         struct sptlrpc_ciph_type *ct;
1301
1302         if (ciph_alg < BULK_CIPH_ALG_MAX) {
1303                 ct = &cipher_types[ciph_alg];
1304                 if (ct->sct_tfm_name)
1305                         return ct;
1306         }
1307         return NULL;
1308 }
1309 EXPORT_SYMBOL(sptlrpc_get_ciph_type);
1310
1311 const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
1312 {
1313         const struct sptlrpc_ciph_type *ct;
1314
1315         ct = sptlrpc_get_ciph_type(ciph_alg);
1316         if (ct)
1317                 return ct->sct_name;
1318         else
1319                 return "unknown";
1320 }
1321 EXPORT_SYMBOL(sptlrpc_get_ciph_name);