Whamcloud - gitweb
e49d19b08266ce0d4e0861b79da1731651970750
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 #define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #include <libcfs/libcfs.h>
29 #ifndef __KERNEL__
30 #include <liblustre.h>
31 #include <libcfs/list.h>
32 #else
33 #include <linux/crypto.h>
34 #endif
35
36 #include <obd.h>
37 #include <obd_class.h>
38 #include <obd_support.h>
39 #include <lustre_net.h>
40 #include <lustre_import.h>
41 #include <lustre_dlm.h>
42 #include <lustre_sec.h>
43
44 #include "ptlrpc_internal.h"
45
46 /****************************************
47  * bulk encryption page pools           *
48  ****************************************/
49
50 #ifdef __KERNEL__
51
52 #define PTRS_PER_PAGE   (CFS_PAGE_SIZE / sizeof(void *))
53 #define PAGES_PER_POOL  (PTRS_PER_PAGE)
54
55 #define IDLE_IDX_MAX            (100)
56 #define IDLE_IDX_WEIGHT         (3)
57
58 #define CACHE_QUIESCENCE_PERIOD (20)
59
60 static struct ptlrpc_enc_page_pool {
61         /*
62          * constants
63          */
64         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
65         unsigned int     epp_max_pools;   /* number of pools, const */
66
67         /*
68          * wait queue in case of not enough free pages.
69          */
70         cfs_waitq_t      epp_waitq;       /* waiting threads */
71         unsigned int     epp_waitqlen;    /* wait queue length */
72         unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
73         unsigned int     epp_growing:1;   /* during adding pages */
74
75         /*
76          * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
77          * this is counted based on each time when getting pages from
78          * the pools, not based on time. which means in case that system
79          * is idled for a while but the idle_idx might still be low if no
80          * activities happened in the pools.
81          */
82         unsigned long    epp_idle_idx;
83
84         /* last shrink time due to mem tight */
85         long             epp_last_shrink;
86         long             epp_last_access;
87
88         /*
89          * in-pool pages bookkeeping
90          */
91         spinlock_t       epp_lock;        /* protect following fields */
92         unsigned long    epp_total_pages; /* total pages in pools */
93         unsigned long    epp_free_pages;  /* current pages available */
94
95         /*
96          * statistics
97          */
98         unsigned int     epp_st_grows;          /* # of grows */
99         unsigned int     epp_st_grow_fails;     /* # of add pages failures */
100         unsigned int     epp_st_shrinks;        /* # of shrinks */
101         unsigned long    epp_st_access;         /* # of access */
102         unsigned long    epp_st_missings;       /* # of cache missing */
103         unsigned long    epp_st_lowfree;        /* lowest free pages reached */
104         unsigned long    epp_st_max_wqlen;      /* highest waitqueue length */
105         cfs_time_t       epp_st_max_wait;       /* in jeffies */
106         /*
107          * pointers to pools
108          */
109         cfs_page_t    ***epp_pools;
110 } page_pools;
111
112 /*
113  * memory shrinker
114  */
115 const int pools_shrinker_seeks = DEFAULT_SEEKS;
116 static struct shrinker *pools_shrinker = NULL;
117
118
119 /*
120  * /proc/fs/lustre/sptlrpc/encrypt_page_pools
121  */
122 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
123                                int *eof, void *data)
124 {
125         int     rc;
126
127         spin_lock(&page_pools.epp_lock);
128
129         rc = snprintf(page, count,
130                       "physical pages:          %lu\n"
131                       "pages per pool:          %lu\n"
132                       "max pages:               %lu\n"
133                       "max pools:               %u\n"
134                       "total pages:             %lu\n"
135                       "total free:              %lu\n"
136                       "idle index:              %lu/100\n"
137                       "last shrink:             %lds\n"
138                       "last access:             %lds\n"
139                       "grows:                   %u\n"
140                       "grows failure:           %u\n"
141                       "shrinks:                 %u\n"
142                       "cache access:            %lu\n"
143                       "cache missing:           %lu\n"
144                       "low free mark:           %lu\n"
145                       "max waitqueue depth:     %lu\n"
146                       "max wait time:           "CFS_TIME_T"/%u\n"
147                       ,
148                       num_physpages,
149                       PAGES_PER_POOL,
150                       page_pools.epp_max_pages,
151                       page_pools.epp_max_pools,
152                       page_pools.epp_total_pages,
153                       page_pools.epp_free_pages,
154                       page_pools.epp_idle_idx,
155                       cfs_time_current_sec() - page_pools.epp_last_shrink,
156                       cfs_time_current_sec() - page_pools.epp_last_access,
157                       page_pools.epp_st_grows,
158                       page_pools.epp_st_grow_fails,
159                       page_pools.epp_st_shrinks,
160                       page_pools.epp_st_access,
161                       page_pools.epp_st_missings,
162                       page_pools.epp_st_lowfree,
163                       page_pools.epp_st_max_wqlen,
164                       page_pools.epp_st_max_wait, HZ
165                      );
166
167         spin_unlock(&page_pools.epp_lock);
168         return rc;
169 }
170
171 static void enc_pools_release_free_pages(long npages)
172 {
173         int     p_idx, g_idx;
174
175         LASSERT(npages <= page_pools.epp_free_pages);
176
177         p_idx = (page_pools.epp_free_pages - 1) / PAGES_PER_POOL;
178         g_idx = (page_pools.epp_free_pages - 1) % PAGES_PER_POOL;
179         LASSERT(page_pools.epp_pools[p_idx]);
180
181         page_pools.epp_free_pages -= npages;
182         page_pools.epp_total_pages -= npages;
183
184         while (npages-- > 0) {
185                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
186
187                 cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
188                 page_pools.epp_pools[p_idx][g_idx] = NULL;
189
190                 if (g_idx-- == 0) {
191                         p_idx--;
192                         g_idx = PAGES_PER_POOL - 1;
193
194                         LASSERT(page_pools.epp_pools[p_idx]);
195                 }
196         }
197 }
198
199 /*
200  * could be called frequently for query (@nr_to_scan == 0)
201  */
202 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
203 {
204         unsigned long   ret;
205
206         spin_lock(&page_pools.epp_lock);
207
208         if (nr_to_scan) {
209                 if (nr_to_scan > page_pools.epp_free_pages)
210                         nr_to_scan = page_pools.epp_free_pages;
211
212                 enc_pools_release_free_pages(nr_to_scan);
213                 CDEBUG(D_SEC, "released %d pages, %ld left\n",
214                        nr_to_scan, page_pools.epp_free_pages);
215
216                 page_pools.epp_st_shrinks++;
217                 page_pools.epp_last_shrink = cfs_time_current_sec();
218         }
219
220         /*
221          * try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool
222          */
223         if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) {
224                 ret = 0;
225                 goto out_unlock;
226         }
227
228         /*
229          * if no pool access for a long time, we consider it's fully idle
230          */
231         if (cfs_time_current_sec() - page_pools.epp_last_access >
232             CACHE_QUIESCENCE_PERIOD)
233                 page_pools.epp_idle_idx = IDLE_IDX_MAX;
234
235         LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
236         ret = (page_pools.epp_free_pages * page_pools.epp_idle_idx /
237                IDLE_IDX_MAX);
238         if (page_pools.epp_free_pages - ret < PTLRPC_MAX_BRW_PAGES)
239                 ret = page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES;
240
241 out_unlock:
242         spin_unlock(&page_pools.epp_lock);
243         return ret;
244 }
245
246 static inline
247 int npages_to_npools(unsigned long npages)
248 {
249         return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
250 }
251
252 /*
253  * return how many pages cleaned up.
254  */
255 static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
256 {
257         unsigned long cleaned = 0;
258         int           i, j;
259
260         for (i = 0; i < npools; i++) {
261                 if (pools[i]) {
262                         for (j = 0; j < PAGES_PER_POOL; j++) {
263                                 if (pools[i][j]) {
264                                         cfs_free_page(pools[i][j]);
265                                         cleaned++;
266                                 }
267                         }
268                         OBD_FREE(pools[i], CFS_PAGE_SIZE);
269                         pools[i] = NULL;
270                 }
271         }
272
273         return cleaned;
274 }
275
276 /*
277  * merge @npools pointed by @pools which contains @npages new pages
278  * into current pools.
279  *
280  * we have options to avoid most memory copy with some tricks. but we choose
281  * the simplest way to avoid complexity. It's not frequently called.
282  */
283 static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
284 {
285         int     freeslot;
286         int     op_idx, np_idx, og_idx, ng_idx;
287         int     cur_npools, end_npools;
288
289         LASSERT(npages > 0);
290         LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
291         LASSERT(npages_to_npools(npages) == npools);
292
293         spin_lock(&page_pools.epp_lock);
294
295         /*
296          * (1) fill all the free slots of current pools.
297          */
298         /* free slots are those left by rent pages, and the extra ones with
299          * index >= eep_total_pages, locate at the tail of last pool. */
300         freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
301         if (freeslot != 0)
302                 freeslot = PAGES_PER_POOL - freeslot;
303         freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
304
305         op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
306         og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
307         np_idx = npools - 1;
308         ng_idx = (npages - 1) % PAGES_PER_POOL;
309
310         while (freeslot) {
311                 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
312                 LASSERT(pools[np_idx][ng_idx] != NULL);
313
314                 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
315                 pools[np_idx][ng_idx] = NULL;
316
317                 freeslot--;
318
319                 if (++og_idx == PAGES_PER_POOL) {
320                         op_idx++;
321                         og_idx = 0;
322                 }
323                 if (--ng_idx < 0) {
324                         if (np_idx == 0)
325                                 break;
326                         np_idx--;
327                         ng_idx = PAGES_PER_POOL - 1;
328                 }
329         }
330
331         /*
332          * (2) add pools if needed.
333          */
334         cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
335                      PAGES_PER_POOL;
336         end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
337                      PAGES_PER_POOL;
338         LASSERT(end_npools <= page_pools.epp_max_pools);
339
340         np_idx = 0;
341         while (cur_npools < end_npools) {
342                 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
343                 LASSERT(np_idx < npools);
344                 LASSERT(pools[np_idx] != NULL);
345
346                 page_pools.epp_pools[cur_npools++] = pools[np_idx];
347                 pools[np_idx++] = NULL;
348         }
349
350         page_pools.epp_total_pages += npages;
351         page_pools.epp_free_pages += npages;
352         page_pools.epp_st_lowfree = page_pools.epp_free_pages;
353
354         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
355                page_pools.epp_total_pages);
356
357         spin_unlock(&page_pools.epp_lock);
358 }
359
360 static int enc_pools_add_pages(int npages)
361 {
362         static DECLARE_MUTEX(sem_add_pages);
363         cfs_page_t   ***pools;
364         int             npools, alloced = 0;
365         int             i, j, rc = -ENOMEM;
366
367         if (npages < PTLRPC_MAX_BRW_PAGES)
368                 npages = PTLRPC_MAX_BRW_PAGES;
369
370         down(&sem_add_pages);
371
372         if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
373                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
374         LASSERT(npages > 0);
375
376         page_pools.epp_st_grows++;
377
378         npools = npages_to_npools(npages);
379         OBD_ALLOC(pools, npools * sizeof(*pools));
380         if (pools == NULL)
381                 goto out;
382
383         for (i = 0; i < npools; i++) {
384                 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
385                 if (pools[i] == NULL)
386                         goto out_pools;
387
388                 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
389                         pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
390                                                      CFS_ALLOC_HIGH);
391                         if (pools[i][j] == NULL)
392                                 goto out_pools;
393
394                         alloced++;
395                 }
396         }
397
398         enc_pools_insert(pools, npools, npages);
399         CDEBUG(D_SEC, "added %d pages into pools\n", npages);
400         rc = 0;
401
402 out_pools:
403         enc_pools_cleanup(pools, npools);
404         OBD_FREE(pools, npools * sizeof(*pools));
405 out:
406         if (rc) {
407                 page_pools.epp_st_grow_fails++;
408                 CERROR("Failed to allocate %d enc pages\n", npages);
409         }
410
411         up(&sem_add_pages);
412         return rc;
413 }
414
415 static inline void enc_pools_wakeup(void)
416 {
417         if (unlikely(page_pools.epp_waitqlen)) {
418                 LASSERT(page_pools.epp_waitqlen > 0);
419                 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
420                 cfs_waitq_broadcast(&page_pools.epp_waitq);
421         }
422 }
423
424 static int enc_pools_should_grow(int page_needed, long now)
425 {
426         /* don't grow if someone else is growing the pools right now,
427          * or the pools has reached its full capacity
428          */
429         if (page_pools.epp_growing ||
430             page_pools.epp_total_pages == page_pools.epp_max_pages)
431                 return 0;
432
433         /* if total pages is not enough, we need to grow */
434         if (page_pools.epp_total_pages < page_needed)
435                 return 1;
436
437         /* if we just did a shrink due to memory tight, we'd better
438          * wait a while to grow again.
439          */
440         if (now - page_pools.epp_last_shrink < 2)
441                 return 0;
442
443         /*
444          * here we perhaps need consider other factors like wait queue
445          * length, idle index, etc. ?
446          */
447
448         /* grow the pools in any other cases */
449         return 1;
450 }
451
452 /*
453  * we allocate the requested pages atomically.
454  */
455 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
456 {
457         cfs_waitlink_t  waitlink;
458         unsigned long   this_idle = -1;
459         cfs_time_t      tick = 0;
460         long            now;
461         int             p_idx, g_idx;
462         int             i;
463
464         LASSERT(desc->bd_max_iov > 0);
465         LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
466
467         /* resent bulk, enc pages might have been allocated previously */
468         if (desc->bd_enc_pages != NULL)
469                 return 0;
470
471         OBD_ALLOC(desc->bd_enc_pages,
472                   desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
473         if (desc->bd_enc_pages == NULL)
474                 return -ENOMEM;
475
476         spin_lock(&page_pools.epp_lock);
477
478         page_pools.epp_st_access++;
479 again:
480         if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
481                 if (tick == 0)
482                         tick = cfs_time_current();
483
484                 now = cfs_time_current_sec();
485
486                 page_pools.epp_st_missings++;
487                 page_pools.epp_pages_short += desc->bd_max_iov;
488
489                 if (enc_pools_should_grow(desc->bd_max_iov, now)) {
490                         page_pools.epp_growing = 1;
491
492                         spin_unlock(&page_pools.epp_lock);
493                         enc_pools_add_pages(page_pools.epp_pages_short / 2);
494                         spin_lock(&page_pools.epp_lock);
495
496                         page_pools.epp_growing = 0;
497                 } else {
498                         if (++page_pools.epp_waitqlen >
499                             page_pools.epp_st_max_wqlen)
500                                 page_pools.epp_st_max_wqlen =
501                                                 page_pools.epp_waitqlen;
502
503                         set_current_state(TASK_UNINTERRUPTIBLE);
504                         cfs_waitlink_init(&waitlink);
505                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
506
507                         spin_unlock(&page_pools.epp_lock);
508                         cfs_schedule();
509                         spin_lock(&page_pools.epp_lock);
510
511                         LASSERT(page_pools.epp_waitqlen > 0);
512                         page_pools.epp_waitqlen--;
513                 }
514
515                 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
516                 page_pools.epp_pages_short -= desc->bd_max_iov;
517
518                 this_idle = 0;
519                 goto again;
520         }
521
522         /* record max wait time */
523         if (unlikely(tick != 0)) {
524                 tick = cfs_time_current() - tick;
525                 if (tick > page_pools.epp_st_max_wait)
526                         page_pools.epp_st_max_wait = tick;
527         }
528
529         /* proceed with rest of allocation */
530         page_pools.epp_free_pages -= desc->bd_max_iov;
531
532         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
533         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
534
535         for (i = 0; i < desc->bd_max_iov; i++) {
536                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
537                 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
538                 page_pools.epp_pools[p_idx][g_idx] = NULL;
539
540                 if (++g_idx == PAGES_PER_POOL) {
541                         p_idx++;
542                         g_idx = 0;
543                 }
544         }
545
546         if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
547                 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
548
549         /*
550          * new idle index = (old * weight + new) / (weight + 1)
551          */
552         if (this_idle == -1) {
553                 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
554                             page_pools.epp_total_pages;
555         }
556         page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
557                                    this_idle) /
558                                   (IDLE_IDX_WEIGHT + 1);
559
560         page_pools.epp_last_access = cfs_time_current_sec();
561
562         spin_unlock(&page_pools.epp_lock);
563         return 0;
564 }
565 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
566
567 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
568 {
569         int     p_idx, g_idx;
570         int     i;
571
572         if (desc->bd_enc_pages == NULL)
573                 return;
574         if (desc->bd_max_iov == 0)
575                 return;
576
577         spin_lock(&page_pools.epp_lock);
578
579         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
580         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
581
582         LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
583                 page_pools.epp_total_pages);
584         LASSERT(page_pools.epp_pools[p_idx]);
585
586         for (i = 0; i < desc->bd_max_iov; i++) {
587                 LASSERT(desc->bd_enc_pages[i] != NULL);
588                 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
589                 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
590
591                 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
592
593                 if (++g_idx == PAGES_PER_POOL) {
594                         p_idx++;
595                         g_idx = 0;
596                 }
597         }
598
599         page_pools.epp_free_pages += desc->bd_max_iov;
600
601         enc_pools_wakeup();
602
603         spin_unlock(&page_pools.epp_lock);
604
605         OBD_FREE(desc->bd_enc_pages,
606                  desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
607         desc->bd_enc_pages = NULL;
608 }
609 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
610
611 /*
612  * we don't do much stuff for add_user/del_user anymore, except adding some
613  * initial pages in add_user() if current pools are empty, rest would be
614  * handled by the pools's self-adaption.
615  */
616 int sptlrpc_enc_pool_add_user(void)
617 {
618         int     need_grow = 0;
619
620         spin_lock(&page_pools.epp_lock);
621         if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
622                 page_pools.epp_growing = 1;
623                 need_grow = 1;
624         }
625         spin_unlock(&page_pools.epp_lock);
626
627         if (need_grow) {
628                 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
629
630                 spin_lock(&page_pools.epp_lock);
631                 page_pools.epp_growing = 0;
632                 enc_pools_wakeup();
633                 spin_unlock(&page_pools.epp_lock);
634         }
635         return 0;
636 }
637 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
638
639 int sptlrpc_enc_pool_del_user(void)
640 {
641         return 0;
642 }
643 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
644
645 static inline void enc_pools_alloc(void)
646 {
647         LASSERT(page_pools.epp_max_pools);
648         /*
649          * on system with huge memory but small page size, this might lead to
650          * high-order allocation. but it's not common, and we suppose memory
651          * be not too much fragmented at module loading time.
652          */
653         OBD_ALLOC(page_pools.epp_pools,
654                   page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
655 }
656
657 static inline void enc_pools_free(void)
658 {
659         LASSERT(page_pools.epp_max_pools);
660         LASSERT(page_pools.epp_pools);
661
662         OBD_FREE(page_pools.epp_pools,
663                  page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
664 }
665
666 int sptlrpc_enc_pool_init(void)
667 {
668         /*
669          * maximum capacity is 1/8 of total physical memory.
670          * is the 1/8 a good number?
671          */
672         page_pools.epp_max_pages = num_physpages / 8;
673         page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
674
675         cfs_waitq_init(&page_pools.epp_waitq);
676         page_pools.epp_waitqlen = 0;
677         page_pools.epp_pages_short = 0;
678
679         page_pools.epp_growing = 0;
680
681         page_pools.epp_idle_idx = 0;
682         page_pools.epp_last_shrink = cfs_time_current_sec();
683         page_pools.epp_last_access = cfs_time_current_sec();
684
685         spin_lock_init(&page_pools.epp_lock);
686         page_pools.epp_total_pages = 0;
687         page_pools.epp_free_pages = 0;
688
689         page_pools.epp_st_grows = 0;
690         page_pools.epp_st_grow_fails = 0;
691         page_pools.epp_st_shrinks = 0;
692         page_pools.epp_st_access = 0;
693         page_pools.epp_st_missings = 0;
694         page_pools.epp_st_lowfree = 0;
695         page_pools.epp_st_max_wqlen = 0;
696         page_pools.epp_st_max_wait = 0;
697
698         enc_pools_alloc();
699         if (page_pools.epp_pools == NULL)
700                 return -ENOMEM;
701
702         pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
703         if (pools_shrinker == NULL) {
704                 enc_pools_free();
705                 return -ENOMEM;
706         }
707
708         return 0;
709 }
710
711 void sptlrpc_enc_pool_fini(void)
712 {
713         unsigned long cleaned, npools;
714
715         LASSERT(pools_shrinker);
716         LASSERT(page_pools.epp_pools);
717         LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
718
719         remove_shrinker(pools_shrinker);
720
721         npools = npages_to_npools(page_pools.epp_total_pages);
722         cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
723         LASSERT(cleaned == page_pools.epp_total_pages);
724
725         enc_pools_free();
726 }
727
728 #else /* !__KERNEL__ */
729
730 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
731 {
732         return 0;
733 }
734
735 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
736 {
737 }
738
739 int sptlrpc_enc_pool_init(void)
740 {
741         return 0;
742 }
743
744 void sptlrpc_enc_pool_fini(void)
745 {
746 }
747 #endif
748
749 /****************************************
750  * Helpers to assist policy modules to  *
751  * implement checksum funcationality    *
752  ****************************************/
753
754 static struct sptlrpc_hash_type hash_types[] = {
755         [BULK_HASH_ALG_NULL]    = { "null",     "null",         0 },
756         [BULK_HASH_ALG_ADLER32] = { "adler32",  "adler32",      4 },
757         [BULK_HASH_ALG_CRC32]   = { "crc32",    "crc32",        4 },
758         [BULK_HASH_ALG_MD5]     = { "md5",      "md5",          16 },
759         [BULK_HASH_ALG_SHA1]    = { "sha1",     "sha1",         20 },
760         [BULK_HASH_ALG_SHA256]  = { "sha256",   "sha256",       32 },
761         [BULK_HASH_ALG_SHA384]  = { "sha384",   "sha384",       48 },
762         [BULK_HASH_ALG_SHA512]  = { "sha512",   "sha512",       64 },
763         [BULK_HASH_ALG_WP256]   = { "wp256",    "wp256",        32 },
764         [BULK_HASH_ALG_WP384]   = { "wp384",    "wp384",        48 },
765         [BULK_HASH_ALG_WP512]   = { "wp512",    "wp512",        64 },
766 };
767
768 const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
769 {
770         struct sptlrpc_hash_type *ht;
771
772         if (hash_alg < BULK_HASH_ALG_MAX) {
773                 ht = &hash_types[hash_alg];
774                 if (ht->sht_tfm_name)
775                         return ht;
776         }
777         return NULL;
778 }
779 EXPORT_SYMBOL(sptlrpc_get_hash_type);
780
781 const char * sptlrpc_get_hash_name(__u8 hash_alg)
782 {
783         const struct sptlrpc_hash_type *ht;
784
785         ht = sptlrpc_get_hash_type(hash_alg);
786         if (ht)
787                 return ht->sht_name;
788         else
789                 return "unknown";
790 }
791 EXPORT_SYMBOL(sptlrpc_get_hash_name);
792
793 int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
794 {
795         int size = sizeof(struct ptlrpc_bulk_sec_desc);
796
797         LASSERT(hash_alg < BULK_HASH_ALG_MAX);
798
799         /* read request don't need extra data */
800         if (!(read && request))
801                 size += hash_types[hash_alg].sht_size;
802
803         return size;
804 }
805 EXPORT_SYMBOL(bulk_sec_desc_size);
806
807 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
808 {
809         struct ptlrpc_bulk_sec_desc *bsd;
810         int    size = msg->lm_buflens[offset];
811
812         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
813         if (bsd == NULL) {
814                 CERROR("Invalid bulk sec desc: size %d\n", size);
815                 return -EINVAL;
816         }
817
818         /* nothing to swab */
819
820         if (unlikely(bsd->bsd_version != 0)) {
821                 CERROR("Unexpected version %u\n", bsd->bsd_version);
822                 return -EPROTO;
823         }
824
825         if (unlikely(bsd->bsd_flags != 0)) {
826                 CERROR("Unexpected flags %x\n", bsd->bsd_flags);
827                 return -EPROTO;
828         }
829
830         if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
831                 CERROR("Unsupported checksum algorithm %u\n",
832                        bsd->bsd_hash_alg);
833                 return -EINVAL;
834         }
835
836         if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
837                 CERROR("Unsupported cipher algorithm %u\n",
838                        bsd->bsd_ciph_alg);
839                 return -EINVAL;
840         }
841
842         if (unlikely(size > sizeof(*bsd)) &&
843             size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
844                 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
845                        bsd->bsd_hash_alg, size);
846                 return -EINVAL;
847         }
848
849         return 0;
850 }
851 EXPORT_SYMBOL(bulk_sec_desc_unpack);
852
853 #ifdef __KERNEL__
854
855 #ifdef HAVE_ADLER
856 static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
857 {
858         struct page    *page;
859         int             off;
860         char           *ptr;
861         __u32           adler32 = 1;
862         int             len, i;
863
864         for (i = 0; i < desc->bd_iov_count; i++) {
865                 page = desc->bd_iov[i].kiov_page;
866                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
867                 ptr = cfs_kmap(page) + off;
868                 len = desc->bd_iov[i].kiov_len;
869
870                 adler32 = zlib_adler32(adler32, ptr, len);
871
872                 cfs_kunmap(page);
873         }
874
875         adler32 = cpu_to_le32(adler32);
876         memcpy(buf, &adler32, sizeof(adler32));
877         return 0;
878 }
879 #endif
880
881 static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
882 {
883         struct page    *page;
884         int             off;
885         char           *ptr;
886         __u32           crc32 = ~0;
887         int             len, i;
888
889         for (i = 0; i < desc->bd_iov_count; i++) {
890                 page = desc->bd_iov[i].kiov_page;
891                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
892                 ptr = cfs_kmap(page) + off;
893                 len = desc->bd_iov[i].kiov_len;
894
895                 crc32 = crc32_le(crc32, ptr, len);
896
897                 cfs_kunmap(page);
898         }
899
900         crc32 = cpu_to_le32(crc32);
901         memcpy(buf, &crc32, sizeof(crc32));
902         return 0;
903 }
904
905 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
906 {
907         struct hash_desc    hdesc;
908         struct scatterlist *sl;
909         int i, rc = 0, bytes = 0;
910
911         LASSERT(alg > BULK_HASH_ALG_NULL &&
912                 alg < BULK_HASH_ALG_MAX);
913
914         switch (alg) {
915         case BULK_HASH_ALG_ADLER32:
916 #ifdef HAVE_ADLER
917                 return do_bulk_checksum_adler32(desc, buf);
918 #else
919                 CERROR("Adler32 not supported\n");
920                 return -EINVAL;
921 #endif
922         case BULK_HASH_ALG_CRC32:
923                 return do_bulk_checksum_crc32(desc, buf);
924         }
925
926         hdesc.tfm = ll_crypto_alloc_hash(hash_types[alg].sht_tfm_name, 0, 0);
927         if (hdesc.tfm == NULL) {
928                 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
929                 return -ENOMEM;
930         }
931         hdesc.flags = 0;
932
933         OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
934         if (sl == NULL) {
935                 rc = -ENOMEM;
936                 goto out_tfm;
937         }
938
939         for (i = 0; i < desc->bd_iov_count; i++) {
940                 sl[i].page = desc->bd_iov[i].kiov_page;
941                 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
942                 sl[i].length = desc->bd_iov[i].kiov_len;
943                 bytes += desc->bd_iov[i].kiov_len;
944         }
945
946         ll_crypto_hash_init(&hdesc);
947         ll_crypto_hash_update(&hdesc, sl, bytes);
948         ll_crypto_hash_final(&hdesc, buf);
949
950         OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
951
952 out_tfm:
953         ll_crypto_free_hash(hdesc.tfm);
954         return rc;
955 }
956
957 #else /* !__KERNEL__ */
958
959 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
960 {
961         __u32   csum32;
962         int     i;
963
964         LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
965
966         if (alg == BULK_HASH_ALG_ADLER32)
967                 csum32 = 1;
968         else
969                 csum32 = ~0;
970
971         for (i = 0; i < desc->bd_iov_count; i++) {
972                 char *ptr = desc->bd_iov[i].iov_base;
973                 int len = desc->bd_iov[i].iov_len;
974
975                 switch (alg) {
976                 case BULK_HASH_ALG_ADLER32:
977 #ifdef HAVE_ADLER
978                         csum32 = zlib_adler32(csum32, ptr, len);
979 #else
980                         CERROR("Adler32 not supported\n");
981                         return -EINVAL;
982 #endif
983                         break;
984                 case BULK_HASH_ALG_CRC32:
985                         csum32 = crc32_le(csum32, ptr, len);
986                         break;
987                 }
988         }
989
990         csum32 = cpu_to_le32(csum32);
991         memcpy(buf, &csum32, sizeof(csum32));
992         return 0;
993 }
994
995 #endif /* __KERNEL__ */
996
997 /*
998  * perform algorithm @alg checksum on @desc, store result in @buf.
999  * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
1000  */
1001 static
1002 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
1003                        struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
1004 {
1005         int rc;
1006
1007         LASSERT(bsd);
1008         LASSERT(alg < BULK_HASH_ALG_MAX);
1009
1010         bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
1011
1012         if (alg == BULK_HASH_ALG_NULL)
1013                 return 0;
1014
1015         LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
1016
1017         rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
1018         if (rc == 0)
1019                 bsd->bsd_hash_alg = alg;
1020
1021         return rc;
1022 }
1023
1024 static
1025 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
1026                      struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
1027                      struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
1028 {
1029         char *csum_p;
1030         char *buf = NULL;
1031         int   csum_size, rc = 0;
1032
1033         LASSERT(bsdv);
1034         LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
1035
1036         if (bsdr)
1037                 bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
1038
1039         if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
1040                 return 0;
1041
1042         /* for all supported algorithms */
1043         csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
1044
1045         if (bsdvsize < sizeof(*bsdv) + csum_size) {
1046                 CERROR("verifier size %d too small, require %d\n",
1047                        bsdvsize, (int) sizeof(*bsdv) + csum_size);
1048                 return -EINVAL;
1049         }
1050
1051         if (bsdr) {
1052                 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
1053                 csum_p = (char *) bsdr->bsd_csum;
1054         } else {
1055                 OBD_ALLOC(buf, csum_size);
1056                 if (buf == NULL)
1057                         return -EINVAL;
1058                 csum_p = buf;
1059         }
1060
1061         rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
1062
1063         if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
1064                 CERROR("BAD %s CHECKSUM (%s), data mutated during "
1065                        "transfer!\n", read ? "READ" : "WRITE",
1066                        hash_types[bsdv->bsd_hash_alg].sht_name);
1067                 rc = -EINVAL;
1068         } else {
1069                 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
1070                       read ? "read" : "write",
1071                       hash_types[bsdv->bsd_hash_alg].sht_name);
1072         }
1073
1074         if (bsdr) {
1075                 bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
1076                 memcpy(bsdr->bsd_csum, csum_p, csum_size);
1077         } else {
1078                 LASSERT(buf);
1079                 OBD_FREE(buf, csum_size);
1080         }
1081
1082         return rc;
1083 }
1084
1085 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
1086                           __u32 alg, struct lustre_msg *rmsg, int roff)
1087 {
1088         struct ptlrpc_bulk_sec_desc *bsdr;
1089         int    rsize, rc = 0;
1090
1091         rsize = rmsg->lm_buflens[roff];
1092         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
1093
1094         LASSERT(bsdr);
1095         LASSERT(rsize >= sizeof(*bsdr));
1096         LASSERT(alg < BULK_HASH_ALG_MAX);
1097
1098         if (read) {
1099                 bsdr->bsd_hash_alg = alg;
1100         } else {
1101                 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
1102                 if (rc)
1103                         CERROR("bulk write: client failed to compute "
1104                                "checksum: %d\n", rc);
1105
1106                 /* For sending we only compute the wrong checksum instead
1107                  * of corrupting the data so it is still correct on a redo */
1108                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
1109                     bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
1110                         bsdr->bsd_csum[0] ^= 0x1;
1111         }
1112
1113         return rc;
1114 }
1115 EXPORT_SYMBOL(bulk_csum_cli_request);
1116
1117 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
1118                         struct lustre_msg *rmsg, int roff,
1119                         struct lustre_msg *vmsg, int voff)
1120 {
1121         struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
1122         int    rsize, vsize;
1123
1124         rsize = rmsg->lm_buflens[roff];
1125         vsize = vmsg->lm_buflens[voff];
1126         bsdr = lustre_msg_buf(rmsg, roff, 0);
1127         bsdv = lustre_msg_buf(vmsg, voff, 0);
1128
1129         if (bsdv == NULL || vsize < sizeof(*bsdv)) {
1130                 CERROR("Invalid checksum verifier from server: size %d\n",
1131                        vsize);
1132                 return -EINVAL;
1133         }
1134
1135         LASSERT(bsdr);
1136         LASSERT(rsize >= sizeof(*bsdr));
1137         LASSERT(vsize >= sizeof(*bsdv));
1138
1139         if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
1140                 CERROR("bulk %s: checksum algorithm mismatch: client request "
1141                        "%s but server reply with %s. try to use the new one "
1142                        "for checksum verification\n",
1143                        read ? "read" : "write",
1144                        hash_types[bsdr->bsd_hash_alg].sht_name,
1145                        hash_types[bsdv->bsd_hash_alg].sht_name);
1146         }
1147
1148         if (read)
1149                 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
1150         else {
1151                 char *cli, *srv, *new = NULL;
1152                 int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
1153
1154                 LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
1155                 if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
1156                         return 0;
1157
1158                 if (vsize < sizeof(*bsdv) + csum_size) {
1159                         CERROR("verifier size %d too small, require %d\n",
1160                                vsize, (int) sizeof(*bsdv) + csum_size);
1161                         return -EINVAL;
1162                 }
1163
1164                 cli = (char *) (bsdr + 1);
1165                 srv = (char *) (bsdv + 1);
1166
1167                 if (!memcmp(cli, srv, csum_size)) {
1168                         /* checksum confirmed */
1169                         CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
1170                                hash_types[bsdr->bsd_hash_alg].sht_name);
1171                         return 0;
1172                 }
1173
1174                 /* checksum mismatch, re-compute a new one and compare with
1175                  * others, give out proper warnings. */
1176                 OBD_ALLOC(new, csum_size);
1177                 if (new == NULL)
1178                         return -ENOMEM;
1179
1180                 do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
1181
1182                 if (!memcmp(new, srv, csum_size)) {
1183                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1184                                "on the client after we checksummed them\n",
1185                                hash_types[bsdr->bsd_hash_alg].sht_name);
1186                 } else if (!memcmp(new, cli, csum_size)) {
1187                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1188                                "in transit\n",
1189                                hash_types[bsdr->bsd_hash_alg].sht_name);
1190                 } else {
1191                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1192                                "in transit, and the current page contents "
1193                                "don't match the originals and what the server "
1194                                "received\n",
1195                                hash_types[bsdr->bsd_hash_alg].sht_name);
1196                 }
1197                 OBD_FREE(new, csum_size);
1198
1199                 return -EINVAL;
1200         }
1201 }
1202 EXPORT_SYMBOL(bulk_csum_cli_reply);
1203
1204 #ifdef __KERNEL__
1205 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1206 {
1207         char           *ptr;
1208         unsigned int    off, i;
1209
1210         for (i = 0; i < desc->bd_iov_count; i++) {
1211                 if (desc->bd_iov[i].kiov_len == 0)
1212                         continue;
1213
1214                 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
1215                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
1216                 ptr[off] ^= 0x1;
1217                 cfs_kunmap(desc->bd_iov[i].kiov_page);
1218                 return;
1219         }
1220 }
1221 #else
1222 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1223 {
1224 }
1225 #endif /* __KERNEL__ */
1226
1227 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
1228                   struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
1229                   struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
1230 {
1231         int    rc;
1232
1233         LASSERT(vsize >= sizeof(*bsdv));
1234         LASSERT(rsize >= sizeof(*bsdr));
1235         LASSERT(bsdv && bsdr);
1236
1237         if (read) {
1238                 rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
1239                 if (rc)
1240                         CERROR("bulk read: server failed to generate %s "
1241                                "checksum: %d\n",
1242                                hash_types[bsdv->bsd_hash_alg].sht_name, rc);
1243
1244                 /* corrupt the data after we compute the checksum, to
1245                  * simulate an OST->client data error */
1246                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1247                         corrupt_bulk_data(desc);
1248         } else {
1249                 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
1250         }
1251
1252         return rc;
1253 }
1254 EXPORT_SYMBOL(bulk_csum_svc);
1255
1256 /****************************************
1257  * Helpers to assist policy modules to  *
1258  * implement encryption funcationality  *
1259  ****************************************/
1260
1261 /* FIXME */
1262 #ifndef __KERNEL__
1263 #define CRYPTO_TFM_MODE_ECB     (0)
1264 #define CRYPTO_TFM_MODE_CBC     (1)
1265 #endif
1266
1267 static struct sptlrpc_ciph_type cipher_types[] = {
1268         [BULK_CIPH_ALG_NULL]    = {
1269                 "null",         "null",       0,                   0,  0
1270         },
1271         [BULK_CIPH_ALG_ARC4]    = {
1272                 "arc4",         "ecb(arc4)",       0, 0,  16
1273         },
1274         [BULK_CIPH_ALG_AES128]  = {
1275                 "aes128",       "cbc(aes)",        0, 16, 16
1276         },
1277         [BULK_CIPH_ALG_AES192]  = {
1278                 "aes192",       "cbc(aes)",        0, 16, 24
1279         },
1280         [BULK_CIPH_ALG_AES256]  = {
1281                 "aes256",       "cbc(aes)",        0, 16, 32
1282         },
1283         [BULK_CIPH_ALG_CAST128] = {
1284                 "cast128",      "cbc(cast5)",      0, 8,  16
1285         },
1286         [BULK_CIPH_ALG_CAST256] = {
1287                 "cast256",      "cbc(cast6)",      0, 16, 32
1288         },
1289         [BULK_CIPH_ALG_TWOFISH128] = {
1290                 "twofish128",   "cbc(twofish)",    0, 16, 16
1291         },
1292         [BULK_CIPH_ALG_TWOFISH256] = {
1293                 "twofish256",   "cbc(twofish)",    0, 16, 32
1294         },
1295 };
1296
1297 const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
1298 {
1299         struct sptlrpc_ciph_type *ct;
1300
1301         if (ciph_alg < BULK_CIPH_ALG_MAX) {
1302                 ct = &cipher_types[ciph_alg];
1303                 if (ct->sct_tfm_name)
1304                         return ct;
1305         }
1306         return NULL;
1307 }
1308 EXPORT_SYMBOL(sptlrpc_get_ciph_type);
1309
1310 const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
1311 {
1312         const struct sptlrpc_ciph_type *ct;
1313
1314         ct = sptlrpc_get_ciph_type(ciph_alg);
1315         if (ct)
1316                 return ct->sct_name;
1317         else
1318                 return "unknown";
1319 }
1320 EXPORT_SYMBOL(sptlrpc_get_ciph_name);