Whamcloud - gitweb
branch: HEAD
[fs/lustre-release.git] / lustre / ptlrpc / sec_bulk.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2006-2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 #define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #include <libcfs/libcfs.h>
29 #ifndef __KERNEL__
30 #include <liblustre.h>
31 #include <libcfs/list.h>
32 #include <zlib.h>
33 #else
34 #include <linux/crypto.h>
35 #include <linux/zutil.h>
36 #endif
37
38 #include <obd.h>
39 #include <obd_class.h>
40 #include <obd_support.h>
41 #include <lustre_net.h>
42 #include <lustre_import.h>
43 #include <lustre_dlm.h>
44 #include <lustre_sec.h>
45
46 #include "ptlrpc_internal.h"
47
48 /****************************************
49  * bulk encryption page pools           *
50  ****************************************/
51
52 #ifdef __KERNEL__
53
54 #define PTRS_PER_PAGE   (CFS_PAGE_SIZE / sizeof(void *))
55 #define PAGES_PER_POOL  (PTRS_PER_PAGE)
56
57 #define IDLE_IDX_MAX            (100)
58 #define IDLE_IDX_WEIGHT         (3)
59
60 #define CACHE_QUIESCENCE_PERIOD (20)
61
62 static struct ptlrpc_enc_page_pool {
63         /*
64          * constants
65          */
66         unsigned long    epp_max_pages;   /* maximum pages can hold, const */
67         unsigned int     epp_max_pools;   /* number of pools, const */
68
69         /*
70          * wait queue in case of not enough free pages.
71          */
72         cfs_waitq_t      epp_waitq;       /* waiting threads */
73         unsigned int     epp_waitqlen;    /* wait queue length */
74         unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
75         unsigned int     epp_growing:1;   /* during adding pages */
76
77         /*
78          * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
79          * this is counted based on each time when getting pages from
80          * the pools, not based on time. which means in case that system
81          * is idled for a while but the idle_idx might still be low if no
82          * activities happened in the pools.
83          */
84         unsigned long    epp_idle_idx;
85
86         /* last shrink time due to mem tight */
87         long             epp_last_shrink;
88         long             epp_last_access;
89
90         /*
91          * in-pool pages bookkeeping
92          */
93         spinlock_t       epp_lock;        /* protect following fields */
94         unsigned long    epp_total_pages; /* total pages in pools */
95         unsigned long    epp_free_pages;  /* current pages available */
96
97         /*
98          * statistics
99          */
100         unsigned int     epp_st_grows;          /* # of grows */
101         unsigned int     epp_st_grow_fails;     /* # of add pages failures */
102         unsigned int     epp_st_shrinks;        /* # of shrinks */
103         unsigned long    epp_st_access;         /* # of access */
104         unsigned long    epp_st_missings;       /* # of cache missing */
105         unsigned long    epp_st_lowfree;        /* lowest free pages reached */
106         unsigned long    epp_st_max_wqlen;      /* highest waitqueue length */
107         cfs_time_t       epp_st_max_wait;       /* in jeffies */
108         /*
109          * pointers to pools
110          */
111         cfs_page_t    ***epp_pools;
112 } page_pools;
113
114 /*
115  * memory shrinker
116  */
117 const int pools_shrinker_seeks = DEFAULT_SEEKS;
118 static struct shrinker *pools_shrinker = NULL;
119
120
121 /*
122  * /proc/fs/lustre/sptlrpc/encrypt_page_pools
123  */
124 int sptlrpc_proc_read_enc_pool(char *page, char **start, off_t off, int count,
125                                int *eof, void *data)
126 {
127         int     rc;
128
129         spin_lock(&page_pools.epp_lock);
130
131         rc = snprintf(page, count,
132                       "physical pages:          %lu\n"
133                       "pages per pool:          %lu\n"
134                       "max pages:               %lu\n"
135                       "max pools:               %u\n"
136                       "total pages:             %lu\n"
137                       "total free:              %lu\n"
138                       "idle index:              %lu/100\n"
139                       "last shrink:             %lds\n"
140                       "last access:             %lds\n"
141                       "grows:                   %u\n"
142                       "grows failure:           %u\n"
143                       "shrinks:                 %u\n"
144                       "cache access:            %lu\n"
145                       "cache missing:           %lu\n"
146                       "low free mark:           %lu\n"
147                       "max waitqueue depth:     %lu\n"
148                       "max wait time:           "CFS_TIME_T"/%u\n"
149                       ,
150                       num_physpages,
151                       PAGES_PER_POOL,
152                       page_pools.epp_max_pages,
153                       page_pools.epp_max_pools,
154                       page_pools.epp_total_pages,
155                       page_pools.epp_free_pages,
156                       page_pools.epp_idle_idx,
157                       cfs_time_current_sec() - page_pools.epp_last_shrink,
158                       cfs_time_current_sec() - page_pools.epp_last_access,
159                       page_pools.epp_st_grows,
160                       page_pools.epp_st_grow_fails,
161                       page_pools.epp_st_shrinks,
162                       page_pools.epp_st_access,
163                       page_pools.epp_st_missings,
164                       page_pools.epp_st_lowfree,
165                       page_pools.epp_st_max_wqlen,
166                       page_pools.epp_st_max_wait, HZ
167                      );
168
169         spin_unlock(&page_pools.epp_lock);
170         return rc;
171 }
172
173 static void enc_pools_release_free_pages(long npages)
174 {
175         int     p_idx, g_idx;
176
177         LASSERT(npages <= page_pools.epp_free_pages);
178
179         p_idx = (page_pools.epp_free_pages - 1) / PAGES_PER_POOL;
180         g_idx = (page_pools.epp_free_pages - 1) % PAGES_PER_POOL;
181         LASSERT(page_pools.epp_pools[p_idx]);
182
183         page_pools.epp_free_pages -= npages;
184         page_pools.epp_total_pages -= npages;
185
186         while (npages-- > 0) {
187                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
188
189                 cfs_free_page(page_pools.epp_pools[p_idx][g_idx]);
190                 page_pools.epp_pools[p_idx][g_idx] = NULL;
191
192                 if (g_idx-- == 0) {
193                         p_idx--;
194                         g_idx = PAGES_PER_POOL - 1;
195
196                         LASSERT(page_pools.epp_pools[p_idx]);
197                 }
198         }
199 }
200
201 /*
202  * could be called frequently for query (@nr_to_scan == 0)
203  */
204 static int enc_pools_shrink(int nr_to_scan, unsigned int gfp_mask)
205 {
206         unsigned long   ret;
207
208         spin_lock(&page_pools.epp_lock);
209
210         if (nr_to_scan) {
211                 if (nr_to_scan > page_pools.epp_free_pages)
212                         nr_to_scan = page_pools.epp_free_pages;
213
214                 enc_pools_release_free_pages(nr_to_scan);
215                 CDEBUG(D_SEC, "released %d pages, %ld left\n",
216                        nr_to_scan, page_pools.epp_free_pages);
217
218                 page_pools.epp_st_shrinks++;
219                 page_pools.epp_last_shrink = cfs_time_current_sec();
220         }
221
222         /*
223          * try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool
224          */
225         if (page_pools.epp_free_pages <= PTLRPC_MAX_BRW_PAGES) {
226                 ret = 0;
227                 goto out_unlock;
228         }
229
230         /*
231          * if no pool access for a long time, we consider it's fully idle
232          */
233         if (cfs_time_current_sec() - page_pools.epp_last_access >
234             CACHE_QUIESCENCE_PERIOD)
235                 page_pools.epp_idle_idx = IDLE_IDX_MAX;
236
237         LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
238         ret = (page_pools.epp_free_pages * page_pools.epp_idle_idx /
239                IDLE_IDX_MAX);
240         if (page_pools.epp_free_pages - ret < PTLRPC_MAX_BRW_PAGES)
241                 ret = page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES;
242
243 out_unlock:
244         spin_unlock(&page_pools.epp_lock);
245         return ret;
246 }
247
248 static inline
249 int npages_to_npools(unsigned long npages)
250 {
251         return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
252 }
253
254 /*
255  * return how many pages cleaned up.
256  */
257 static unsigned long enc_pools_cleanup(cfs_page_t ***pools, int npools)
258 {
259         unsigned long cleaned = 0;
260         int           i, j;
261
262         for (i = 0; i < npools; i++) {
263                 if (pools[i]) {
264                         for (j = 0; j < PAGES_PER_POOL; j++) {
265                                 if (pools[i][j]) {
266                                         cfs_free_page(pools[i][j]);
267                                         cleaned++;
268                                 }
269                         }
270                         OBD_FREE(pools[i], CFS_PAGE_SIZE);
271                         pools[i] = NULL;
272                 }
273         }
274
275         return cleaned;
276 }
277
278 /*
279  * merge @npools pointed by @pools which contains @npages new pages
280  * into current pools.
281  *
282  * we have options to avoid most memory copy with some tricks. but we choose
283  * the simplest way to avoid complexity. It's not frequently called.
284  */
285 static void enc_pools_insert(cfs_page_t ***pools, int npools, int npages)
286 {
287         int     freeslot;
288         int     op_idx, np_idx, og_idx, ng_idx;
289         int     cur_npools, end_npools;
290
291         LASSERT(npages > 0);
292         LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
293         LASSERT(npages_to_npools(npages) == npools);
294
295         spin_lock(&page_pools.epp_lock);
296
297         /*
298          * (1) fill all the free slots of current pools.
299          */
300         /* free slots are those left by rent pages, and the extra ones with
301          * index >= eep_total_pages, locate at the tail of last pool. */
302         freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
303         if (freeslot != 0)
304                 freeslot = PAGES_PER_POOL - freeslot;
305         freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
306
307         op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
308         og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
309         np_idx = npools - 1;
310         ng_idx = (npages - 1) % PAGES_PER_POOL;
311
312         while (freeslot) {
313                 LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
314                 LASSERT(pools[np_idx][ng_idx] != NULL);
315
316                 page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
317                 pools[np_idx][ng_idx] = NULL;
318
319                 freeslot--;
320
321                 if (++og_idx == PAGES_PER_POOL) {
322                         op_idx++;
323                         og_idx = 0;
324                 }
325                 if (--ng_idx < 0) {
326                         if (np_idx == 0)
327                                 break;
328                         np_idx--;
329                         ng_idx = PAGES_PER_POOL - 1;
330                 }
331         }
332
333         /*
334          * (2) add pools if needed.
335          */
336         cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
337                      PAGES_PER_POOL;
338         end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
339                      PAGES_PER_POOL;
340         LASSERT(end_npools <= page_pools.epp_max_pools);
341
342         np_idx = 0;
343         while (cur_npools < end_npools) {
344                 LASSERT(page_pools.epp_pools[cur_npools] == NULL);
345                 LASSERT(np_idx < npools);
346                 LASSERT(pools[np_idx] != NULL);
347
348                 page_pools.epp_pools[cur_npools++] = pools[np_idx];
349                 pools[np_idx++] = NULL;
350         }
351
352         page_pools.epp_total_pages += npages;
353         page_pools.epp_free_pages += npages;
354         page_pools.epp_st_lowfree = page_pools.epp_free_pages;
355
356         CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
357                page_pools.epp_total_pages);
358
359         spin_unlock(&page_pools.epp_lock);
360 }
361
362 static int enc_pools_add_pages(int npages)
363 {
364         static DECLARE_MUTEX(sem_add_pages);
365         cfs_page_t   ***pools;
366         int             npools, alloced = 0;
367         int             i, j, rc = -ENOMEM;
368
369         if (npages < PTLRPC_MAX_BRW_PAGES)
370                 npages = PTLRPC_MAX_BRW_PAGES;
371
372         down(&sem_add_pages);
373
374         if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
375                 npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
376         LASSERT(npages > 0);
377
378         page_pools.epp_st_grows++;
379
380         npools = npages_to_npools(npages);
381         OBD_ALLOC(pools, npools * sizeof(*pools));
382         if (pools == NULL)
383                 goto out;
384
385         for (i = 0; i < npools; i++) {
386                 OBD_ALLOC(pools[i], CFS_PAGE_SIZE);
387                 if (pools[i] == NULL)
388                         goto out_pools;
389
390                 for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
391                         pools[i][j] = cfs_alloc_page(CFS_ALLOC_IO |
392                                                      CFS_ALLOC_HIGH);
393                         if (pools[i][j] == NULL)
394                                 goto out_pools;
395
396                         alloced++;
397                 }
398         }
399
400         enc_pools_insert(pools, npools, npages);
401         CDEBUG(D_SEC, "added %d pages into pools\n", npages);
402         rc = 0;
403
404 out_pools:
405         enc_pools_cleanup(pools, npools);
406         OBD_FREE(pools, npools * sizeof(*pools));
407 out:
408         if (rc) {
409                 page_pools.epp_st_grow_fails++;
410                 CERROR("Failed to allocate %d enc pages\n", npages);
411         }
412
413         up(&sem_add_pages);
414         return rc;
415 }
416
417 static inline void enc_pools_wakeup(void)
418 {
419         if (unlikely(page_pools.epp_waitqlen)) {
420                 LASSERT(page_pools.epp_waitqlen > 0);
421                 LASSERT(cfs_waitq_active(&page_pools.epp_waitq));
422                 cfs_waitq_broadcast(&page_pools.epp_waitq);
423         }
424 }
425
426 static int enc_pools_should_grow(int page_needed, long now)
427 {
428         /* don't grow if someone else is growing the pools right now,
429          * or the pools has reached its full capacity
430          */
431         if (page_pools.epp_growing ||
432             page_pools.epp_total_pages == page_pools.epp_max_pages)
433                 return 0;
434
435         /* if total pages is not enough, we need to grow */
436         if (page_pools.epp_total_pages < page_needed)
437                 return 1;
438
439         /* if we just did a shrink due to memory tight, we'd better
440          * wait a while to grow again.
441          */
442         if (now - page_pools.epp_last_shrink < 2)
443                 return 0;
444
445         /*
446          * here we perhaps need consider other factors like wait queue
447          * length, idle index, etc. ?
448          */
449
450         /* grow the pools in any other cases */
451         return 1;
452 }
453
454 /*
455  * we allocate the requested pages atomically.
456  */
457 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
458 {
459         cfs_waitlink_t  waitlink;
460         unsigned long   this_idle = -1;
461         cfs_time_t      tick = 0;
462         long            now;
463         int             p_idx, g_idx;
464         int             i;
465
466         LASSERT(desc->bd_max_iov > 0);
467         LASSERT(desc->bd_max_iov <= page_pools.epp_max_pages);
468
469         /* resent bulk, enc pages might have been allocated previously */
470         if (desc->bd_enc_pages != NULL)
471                 return 0;
472
473         OBD_ALLOC(desc->bd_enc_pages,
474                   desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
475         if (desc->bd_enc_pages == NULL)
476                 return -ENOMEM;
477
478         spin_lock(&page_pools.epp_lock);
479
480         page_pools.epp_st_access++;
481 again:
482         if (unlikely(page_pools.epp_free_pages < desc->bd_max_iov)) {
483                 if (tick == 0)
484                         tick = cfs_time_current();
485
486                 now = cfs_time_current_sec();
487
488                 page_pools.epp_st_missings++;
489                 page_pools.epp_pages_short += desc->bd_max_iov;
490
491                 if (enc_pools_should_grow(desc->bd_max_iov, now)) {
492                         page_pools.epp_growing = 1;
493
494                         spin_unlock(&page_pools.epp_lock);
495                         enc_pools_add_pages(page_pools.epp_pages_short / 2);
496                         spin_lock(&page_pools.epp_lock);
497
498                         page_pools.epp_growing = 0;
499                 } else {
500                         if (++page_pools.epp_waitqlen >
501                             page_pools.epp_st_max_wqlen)
502                                 page_pools.epp_st_max_wqlen =
503                                                 page_pools.epp_waitqlen;
504
505                         set_current_state(TASK_UNINTERRUPTIBLE);
506                         cfs_waitlink_init(&waitlink);
507                         cfs_waitq_add(&page_pools.epp_waitq, &waitlink);
508
509                         spin_unlock(&page_pools.epp_lock);
510                         cfs_schedule();
511                         spin_lock(&page_pools.epp_lock);
512
513                         LASSERT(page_pools.epp_waitqlen > 0);
514                         page_pools.epp_waitqlen--;
515                 }
516
517                 LASSERT(page_pools.epp_pages_short >= desc->bd_max_iov);
518                 page_pools.epp_pages_short -= desc->bd_max_iov;
519
520                 this_idle = 0;
521                 goto again;
522         }
523
524         /* record max wait time */
525         if (unlikely(tick != 0)) {
526                 tick = cfs_time_current() - tick;
527                 if (tick > page_pools.epp_st_max_wait)
528                         page_pools.epp_st_max_wait = tick;
529         }
530
531         /* proceed with rest of allocation */
532         page_pools.epp_free_pages -= desc->bd_max_iov;
533
534         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
535         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
536
537         for (i = 0; i < desc->bd_max_iov; i++) {
538                 LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
539                 desc->bd_enc_pages[i] = page_pools.epp_pools[p_idx][g_idx];
540                 page_pools.epp_pools[p_idx][g_idx] = NULL;
541
542                 if (++g_idx == PAGES_PER_POOL) {
543                         p_idx++;
544                         g_idx = 0;
545                 }
546         }
547
548         if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
549                 page_pools.epp_st_lowfree = page_pools.epp_free_pages;
550
551         /*
552          * new idle index = (old * weight + new) / (weight + 1)
553          */
554         if (this_idle == -1) {
555                 this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
556                             page_pools.epp_total_pages;
557         }
558         page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
559                                    this_idle) /
560                                   (IDLE_IDX_WEIGHT + 1);
561
562         page_pools.epp_last_access = cfs_time_current_sec();
563
564         spin_unlock(&page_pools.epp_lock);
565         return 0;
566 }
567 EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
568
569 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
570 {
571         int     p_idx, g_idx;
572         int     i;
573
574         if (desc->bd_enc_pages == NULL)
575                 return;
576         if (desc->bd_max_iov == 0)
577                 return;
578
579         spin_lock(&page_pools.epp_lock);
580
581         p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
582         g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
583
584         LASSERT(page_pools.epp_free_pages + desc->bd_max_iov <=
585                 page_pools.epp_total_pages);
586         LASSERT(page_pools.epp_pools[p_idx]);
587
588         for (i = 0; i < desc->bd_max_iov; i++) {
589                 LASSERT(desc->bd_enc_pages[i] != NULL);
590                 LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
591                 LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
592
593                 page_pools.epp_pools[p_idx][g_idx] = desc->bd_enc_pages[i];
594
595                 if (++g_idx == PAGES_PER_POOL) {
596                         p_idx++;
597                         g_idx = 0;
598                 }
599         }
600
601         page_pools.epp_free_pages += desc->bd_max_iov;
602
603         enc_pools_wakeup();
604
605         spin_unlock(&page_pools.epp_lock);
606
607         OBD_FREE(desc->bd_enc_pages,
608                  desc->bd_max_iov * sizeof(*desc->bd_enc_pages));
609         desc->bd_enc_pages = NULL;
610 }
611 EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
612
613 /*
614  * we don't do much stuff for add_user/del_user anymore, except adding some
615  * initial pages in add_user() if current pools are empty, rest would be
616  * handled by the pools's self-adaption.
617  */
618 int sptlrpc_enc_pool_add_user(void)
619 {
620         int     need_grow = 0;
621
622         spin_lock(&page_pools.epp_lock);
623         if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
624                 page_pools.epp_growing = 1;
625                 need_grow = 1;
626         }
627         spin_unlock(&page_pools.epp_lock);
628
629         if (need_grow) {
630                 enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES);
631
632                 spin_lock(&page_pools.epp_lock);
633                 page_pools.epp_growing = 0;
634                 enc_pools_wakeup();
635                 spin_unlock(&page_pools.epp_lock);
636         }
637         return 0;
638 }
639 EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
640
641 int sptlrpc_enc_pool_del_user(void)
642 {
643         return 0;
644 }
645 EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
646
647 static inline void enc_pools_alloc(void)
648 {
649         LASSERT(page_pools.epp_max_pools);
650         /*
651          * on system with huge memory but small page size, this might lead to
652          * high-order allocation. but it's not common, and we suppose memory
653          * be not too much fragmented at module loading time.
654          */
655         OBD_ALLOC(page_pools.epp_pools,
656                   page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
657 }
658
659 static inline void enc_pools_free(void)
660 {
661         LASSERT(page_pools.epp_max_pools);
662         LASSERT(page_pools.epp_pools);
663
664         OBD_FREE(page_pools.epp_pools,
665                  page_pools.epp_max_pools * sizeof(*page_pools.epp_pools));
666 }
667
668 int sptlrpc_enc_pool_init(void)
669 {
670         /*
671          * maximum capacity is 1/8 of total physical memory.
672          * is the 1/8 a good number?
673          */
674         page_pools.epp_max_pages = num_physpages / 8;
675         page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
676
677         cfs_waitq_init(&page_pools.epp_waitq);
678         page_pools.epp_waitqlen = 0;
679         page_pools.epp_pages_short = 0;
680
681         page_pools.epp_growing = 0;
682
683         page_pools.epp_idle_idx = 0;
684         page_pools.epp_last_shrink = cfs_time_current_sec();
685         page_pools.epp_last_access = cfs_time_current_sec();
686
687         spin_lock_init(&page_pools.epp_lock);
688         page_pools.epp_total_pages = 0;
689         page_pools.epp_free_pages = 0;
690
691         page_pools.epp_st_grows = 0;
692         page_pools.epp_st_grow_fails = 0;
693         page_pools.epp_st_shrinks = 0;
694         page_pools.epp_st_access = 0;
695         page_pools.epp_st_missings = 0;
696         page_pools.epp_st_lowfree = 0;
697         page_pools.epp_st_max_wqlen = 0;
698         page_pools.epp_st_max_wait = 0;
699
700         enc_pools_alloc();
701         if (page_pools.epp_pools == NULL)
702                 return -ENOMEM;
703
704         pools_shrinker = set_shrinker(pools_shrinker_seeks, enc_pools_shrink);
705         if (pools_shrinker == NULL) {
706                 enc_pools_free();
707                 return -ENOMEM;
708         }
709
710         return 0;
711 }
712
713 void sptlrpc_enc_pool_fini(void)
714 {
715         unsigned long cleaned, npools;
716
717         LASSERT(pools_shrinker);
718         LASSERT(page_pools.epp_pools);
719         LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
720
721         remove_shrinker(pools_shrinker);
722
723         npools = npages_to_npools(page_pools.epp_total_pages);
724         cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
725         LASSERT(cleaned == page_pools.epp_total_pages);
726
727         enc_pools_free();
728 }
729
730 #else /* !__KERNEL__ */
731
732 int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
733 {
734         return 0;
735 }
736
737 void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
738 {
739 }
740
741 int sptlrpc_enc_pool_init(void)
742 {
743         return 0;
744 }
745
746 void sptlrpc_enc_pool_fini(void)
747 {
748 }
749 #endif
750
751 /****************************************
752  * Helpers to assist policy modules to  *
753  * implement checksum funcationality    *
754  ****************************************/
755
756 static struct sptlrpc_hash_type hash_types[] = {
757         [BULK_HASH_ALG_NULL]    = { "null",     "null",         0 },
758         [BULK_HASH_ALG_ADLER32] = { "adler32",  "adler32",      4 },
759         [BULK_HASH_ALG_CRC32]   = { "crc32",    "crc32",        4 },
760         [BULK_HASH_ALG_MD5]     = { "md5",      "md5",          16 },
761         [BULK_HASH_ALG_SHA1]    = { "sha1",     "sha1",         20 },
762         [BULK_HASH_ALG_SHA256]  = { "sha256",   "sha256",       32 },
763         [BULK_HASH_ALG_SHA384]  = { "sha384",   "sha384",       48 },
764         [BULK_HASH_ALG_SHA512]  = { "sha512",   "sha512",       64 },
765         [BULK_HASH_ALG_WP256]   = { "wp256",    "wp256",        32 },
766         [BULK_HASH_ALG_WP384]   = { "wp384",    "wp384",        48 },
767         [BULK_HASH_ALG_WP512]   = { "wp512",    "wp512",        64 },
768 };
769
770 const struct sptlrpc_hash_type *sptlrpc_get_hash_type(__u8 hash_alg)
771 {
772         struct sptlrpc_hash_type *ht;
773
774         if (hash_alg < BULK_HASH_ALG_MAX) {
775                 ht = &hash_types[hash_alg];
776                 if (ht->sht_tfm_name)
777                         return ht;
778         }
779         return NULL;
780 }
781 EXPORT_SYMBOL(sptlrpc_get_hash_type);
782
783 const char * sptlrpc_get_hash_name(__u8 hash_alg)
784 {
785         const struct sptlrpc_hash_type *ht;
786
787         ht = sptlrpc_get_hash_type(hash_alg);
788         if (ht)
789                 return ht->sht_name;
790         else
791                 return "unknown";
792 }
793 EXPORT_SYMBOL(sptlrpc_get_hash_name);
794
795 int bulk_sec_desc_size(__u8 hash_alg, int request, int read)
796 {
797         int size = sizeof(struct ptlrpc_bulk_sec_desc);
798
799         LASSERT(hash_alg < BULK_HASH_ALG_MAX);
800
801         /* read request don't need extra data */
802         if (!(read && request))
803                 size += hash_types[hash_alg].sht_size;
804
805         return size;
806 }
807 EXPORT_SYMBOL(bulk_sec_desc_size);
808
809 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset)
810 {
811         struct ptlrpc_bulk_sec_desc *bsd;
812         int    size = msg->lm_buflens[offset];
813
814         bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
815         if (bsd == NULL) {
816                 CERROR("Invalid bulk sec desc: size %d\n", size);
817                 return -EINVAL;
818         }
819
820         /* nothing to swab */
821
822         if (unlikely(bsd->bsd_version != 0)) {
823                 CERROR("Unexpected version %u\n", bsd->bsd_version);
824                 return -EPROTO;
825         }
826
827         if (unlikely(bsd->bsd_flags != 0)) {
828                 CERROR("Unexpected flags %x\n", bsd->bsd_flags);
829                 return -EPROTO;
830         }
831
832         if (unlikely(!sptlrpc_get_hash_type(bsd->bsd_hash_alg))) {
833                 CERROR("Unsupported checksum algorithm %u\n",
834                        bsd->bsd_hash_alg);
835                 return -EINVAL;
836         }
837
838         if (unlikely(!sptlrpc_get_ciph_type(bsd->bsd_ciph_alg))) {
839                 CERROR("Unsupported cipher algorithm %u\n",
840                        bsd->bsd_ciph_alg);
841                 return -EINVAL;
842         }
843
844         if (unlikely(size > sizeof(*bsd)) &&
845             size < sizeof(*bsd) + hash_types[bsd->bsd_hash_alg].sht_size) {
846                 CERROR("Mal-formed checksum data: csum alg %u, size %d\n",
847                        bsd->bsd_hash_alg, size);
848                 return -EINVAL;
849         }
850
851         return 0;
852 }
853 EXPORT_SYMBOL(bulk_sec_desc_unpack);
854
855 #ifdef __KERNEL__
856
857 static int do_bulk_checksum_adler32(struct ptlrpc_bulk_desc *desc, void *buf)
858 {
859         struct page    *page;
860         int             off;
861         char           *ptr;
862         __u32           adler32 = 1;
863         int             len, i;
864
865         for (i = 0; i < desc->bd_iov_count; i++) {
866                 page = desc->bd_iov[i].kiov_page;
867                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
868                 ptr = cfs_kmap(page) + off;
869                 len = desc->bd_iov[i].kiov_len;
870
871                 adler32 = zlib_adler32(adler32, ptr, len);
872
873                 cfs_kunmap(page);
874         }
875
876         adler32 = cpu_to_le32(adler32);
877         memcpy(buf, &adler32, sizeof(adler32));
878         return 0;
879 }
880
881 static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf)
882 {
883         struct page    *page;
884         int             off;
885         char           *ptr;
886         __u32           crc32 = ~0;
887         int             len, i;
888
889         for (i = 0; i < desc->bd_iov_count; i++) {
890                 page = desc->bd_iov[i].kiov_page;
891                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
892                 ptr = cfs_kmap(page) + off;
893                 len = desc->bd_iov[i].kiov_len;
894
895                 crc32 = crc32_le(crc32, ptr, len);
896
897                 cfs_kunmap(page);
898         }
899
900         crc32 = cpu_to_le32(crc32);
901         memcpy(buf, &crc32, sizeof(crc32));
902         return 0;
903 }
904
905 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
906 {
907         struct crypto_tfm *tfm;
908         struct scatterlist *sl;
909         int i, rc = 0;
910
911         LASSERT(alg > BULK_HASH_ALG_NULL &&
912                 alg < BULK_HASH_ALG_MAX);
913
914         if (alg == BULK_HASH_ALG_ADLER32)
915                 return do_bulk_checksum_adler32(desc, buf);
916         if (alg == BULK_HASH_ALG_CRC32)
917                 return do_bulk_checksum_crc32(desc, buf);
918
919         tfm = crypto_alloc_tfm(hash_types[alg].sht_tfm_name, 0);
920         if (tfm == NULL) {
921                 CERROR("Unable to allocate TFM %s\n", hash_types[alg].sht_name);
922                 return -ENOMEM;
923         }
924
925         OBD_ALLOC(sl, sizeof(*sl) * desc->bd_iov_count);
926         if (sl == NULL) {
927                 rc = -ENOMEM;
928                 goto out_tfm;
929         }
930
931         for (i = 0; i < desc->bd_iov_count; i++) {
932                 sl[i].page = desc->bd_iov[i].kiov_page;
933                 sl[i].offset = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
934                 sl[i].length = desc->bd_iov[i].kiov_len;
935         }
936
937         crypto_digest_init(tfm);
938         crypto_digest_update(tfm, sl, desc->bd_iov_count);
939         crypto_digest_final(tfm, buf);
940
941         OBD_FREE(sl, sizeof(*sl) * desc->bd_iov_count);
942
943 out_tfm:
944         crypto_free_tfm(tfm);
945         return rc;
946 }
947
948 #else /* !__KERNEL__ */
949
950 static int do_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u32 alg, void *buf)
951 {
952         __u32   csum32 = ~0;
953         int     i;
954
955         LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32);
956
957         if (alg == BULK_HASH_ALG_ADLER32)
958                 csum32 = 1;
959         else
960                 csum32 = ~0;
961
962         for (i = 0; i < desc->bd_iov_count; i++) {
963                 char *ptr = desc->bd_iov[i].iov_base;
964                 int len = desc->bd_iov[i].iov_len;
965
966                 if (alg == BULK_HASH_ALG_ADLER32)
967                         csum32 = zlib_adler32(csum32, ptr, len);
968                 else
969                         csum32 = crc32_le(csum32, ptr, len);
970         }
971
972         *((__u32 *) buf) = csum32;
973         return 0;
974 }
975
976 #endif
977
978 /*
979  * perform algorithm @alg checksum on @desc, store result in @buf.
980  * if anything goes wrong, leave 'alg' be BULK_HASH_ALG_NULL.
981  */
982 static
983 int generate_bulk_csum(struct ptlrpc_bulk_desc *desc, __u32 alg,
984                        struct ptlrpc_bulk_sec_desc *bsd, int bsdsize)
985 {
986         int rc;
987
988         LASSERT(bsd);
989         LASSERT(alg < BULK_HASH_ALG_MAX);
990
991         bsd->bsd_hash_alg = BULK_HASH_ALG_NULL;
992
993         if (alg == BULK_HASH_ALG_NULL)
994                 return 0;
995
996         LASSERT(bsdsize >= sizeof(*bsd) + hash_types[alg].sht_size);
997
998         rc = do_bulk_checksum(desc, alg, bsd->bsd_csum);
999         if (rc == 0)
1000                 bsd->bsd_hash_alg = alg;
1001
1002         return rc;
1003 }
1004
1005 static
1006 int verify_bulk_csum(struct ptlrpc_bulk_desc *desc, int read,
1007                      struct ptlrpc_bulk_sec_desc *bsdv, int bsdvsize,
1008                      struct ptlrpc_bulk_sec_desc *bsdr, int bsdrsize)
1009 {
1010         char *csum_p;
1011         char *buf = NULL;
1012         int   csum_size, rc = 0;
1013
1014         LASSERT(bsdv);
1015         LASSERT(bsdv->bsd_hash_alg < BULK_HASH_ALG_MAX);
1016
1017         if (bsdr)
1018                 bsdr->bsd_hash_alg = BULK_HASH_ALG_NULL;
1019
1020         if (bsdv->bsd_hash_alg == BULK_HASH_ALG_NULL)
1021                 return 0;
1022
1023         /* for all supported algorithms */
1024         csum_size = hash_types[bsdv->bsd_hash_alg].sht_size;
1025
1026         if (bsdvsize < sizeof(*bsdv) + csum_size) {
1027                 CERROR("verifier size %d too small, require %d\n",
1028                        bsdvsize, (int) sizeof(*bsdv) + csum_size);
1029                 return -EINVAL;
1030         }
1031
1032         if (bsdr) {
1033                 LASSERT(bsdrsize >= sizeof(*bsdr) + csum_size);
1034                 csum_p = (char *) bsdr->bsd_csum;
1035         } else {
1036                 OBD_ALLOC(buf, csum_size);
1037                 if (buf == NULL)
1038                         return -EINVAL;
1039                 csum_p = buf;
1040         }
1041
1042         rc = do_bulk_checksum(desc, bsdv->bsd_hash_alg, csum_p);
1043
1044         if (memcmp(bsdv->bsd_csum, csum_p, csum_size)) {
1045                 CERROR("BAD %s CHECKSUM (%s), data mutated during "
1046                        "transfer!\n", read ? "READ" : "WRITE",
1047                        hash_types[bsdv->bsd_hash_alg].sht_name);
1048                 rc = -EINVAL;
1049         } else {
1050                 CDEBUG(D_SEC, "bulk %s checksum (%s) verified\n",
1051                       read ? "read" : "write",
1052                       hash_types[bsdv->bsd_hash_alg].sht_name);
1053         }
1054
1055         if (bsdr) {
1056                 bsdr->bsd_hash_alg = bsdv->bsd_hash_alg;
1057                 memcpy(bsdr->bsd_csum, csum_p, csum_size);
1058         } else {
1059                 LASSERT(buf);
1060                 OBD_FREE(buf, csum_size);
1061         }
1062
1063         return rc;
1064 }
1065
1066 int bulk_csum_cli_request(struct ptlrpc_bulk_desc *desc, int read,
1067                           __u32 alg, struct lustre_msg *rmsg, int roff)
1068 {
1069         struct ptlrpc_bulk_sec_desc *bsdr;
1070         int    rsize, rc = 0;
1071
1072         rsize = rmsg->lm_buflens[roff];
1073         bsdr = lustre_msg_buf(rmsg, roff, sizeof(*bsdr));
1074
1075         LASSERT(bsdr);
1076         LASSERT(rsize >= sizeof(*bsdr));
1077         LASSERT(alg < BULK_HASH_ALG_MAX);
1078
1079         if (read) {
1080                 bsdr->bsd_hash_alg = alg;
1081         } else {
1082                 rc = generate_bulk_csum(desc, alg, bsdr, rsize);
1083                 if (rc)
1084                         CERROR("bulk write: client failed to compute "
1085                                "checksum: %d\n", rc);
1086
1087                 /* For sending we only compute the wrong checksum instead
1088                  * of corrupting the data so it is still correct on a redo */
1089                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
1090                     bsdr->bsd_hash_alg != BULK_HASH_ALG_NULL)
1091                         bsdr->bsd_csum[0] ^= 0x1;
1092         }
1093
1094         return rc;
1095 }
1096 EXPORT_SYMBOL(bulk_csum_cli_request);
1097
1098 int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read,
1099                         struct lustre_msg *rmsg, int roff,
1100                         struct lustre_msg *vmsg, int voff)
1101 {
1102         struct ptlrpc_bulk_sec_desc *bsdv, *bsdr;
1103         int    rsize, vsize;
1104
1105         rsize = rmsg->lm_buflens[roff];
1106         vsize = vmsg->lm_buflens[voff];
1107         bsdr = lustre_msg_buf(rmsg, roff, 0);
1108         bsdv = lustre_msg_buf(vmsg, voff, 0);
1109
1110         if (bsdv == NULL || vsize < sizeof(*bsdv)) {
1111                 CERROR("Invalid checksum verifier from server: size %d\n",
1112                        vsize);
1113                 return -EINVAL;
1114         }
1115
1116         LASSERT(bsdr);
1117         LASSERT(rsize >= sizeof(*bsdr));
1118         LASSERT(vsize >= sizeof(*bsdv));
1119
1120         if (bsdr->bsd_hash_alg != bsdv->bsd_hash_alg) {
1121                 CERROR("bulk %s: checksum algorithm mismatch: client request "
1122                        "%s but server reply with %s. try to use the new one "
1123                        "for checksum verification\n",
1124                        read ? "read" : "write",
1125                        hash_types[bsdr->bsd_hash_alg].sht_name,
1126                        hash_types[bsdv->bsd_hash_alg].sht_name);
1127         }
1128
1129         if (read)
1130                 return verify_bulk_csum(desc, 1, bsdv, vsize, NULL, 0);
1131         else {
1132                 char *cli, *srv, *new = NULL;
1133                 int csum_size = hash_types[bsdr->bsd_hash_alg].sht_size;
1134
1135                 LASSERT(bsdr->bsd_hash_alg < BULK_HASH_ALG_MAX);
1136                 if (bsdr->bsd_hash_alg == BULK_HASH_ALG_NULL)
1137                         return 0;
1138
1139                 if (vsize < sizeof(*bsdv) + csum_size) {
1140                         CERROR("verifier size %d too small, require %d\n",
1141                                vsize, (int) sizeof(*bsdv) + csum_size);
1142                         return -EINVAL;
1143                 }
1144
1145                 cli = (char *) (bsdr + 1);
1146                 srv = (char *) (bsdv + 1);
1147
1148                 if (!memcmp(cli, srv, csum_size)) {
1149                         /* checksum confirmed */
1150                         CDEBUG(D_SEC, "bulk write checksum (%s) confirmed\n",
1151                                hash_types[bsdr->bsd_hash_alg].sht_name);
1152                         return 0;
1153                 }
1154
1155                 /* checksum mismatch, re-compute a new one and compare with
1156                  * others, give out proper warnings. */
1157                 OBD_ALLOC(new, csum_size);
1158                 if (new == NULL)
1159                         return -ENOMEM;
1160
1161                 do_bulk_checksum(desc, bsdr->bsd_hash_alg, new);
1162
1163                 if (!memcmp(new, srv, csum_size)) {
1164                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1165                                "on the client after we checksummed them\n",
1166                                hash_types[bsdr->bsd_hash_alg].sht_name);
1167                 } else if (!memcmp(new, cli, csum_size)) {
1168                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1169                                "in transit\n",
1170                                hash_types[bsdr->bsd_hash_alg].sht_name);
1171                 } else {
1172                         CERROR("BAD WRITE CHECKSUM (%s): pages were mutated "
1173                                "in transit, and the current page contents "
1174                                "don't match the originals and what the server "
1175                                "received\n",
1176                                hash_types[bsdr->bsd_hash_alg].sht_name);
1177                 }
1178                 OBD_FREE(new, csum_size);
1179
1180                 return -EINVAL;
1181         }
1182 }
1183 EXPORT_SYMBOL(bulk_csum_cli_reply);
1184
1185 #ifdef __KERNEL__
1186 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1187 {
1188         char           *ptr;
1189         unsigned int    off, i;
1190
1191         for (i = 0; i < desc->bd_iov_count; i++) {
1192                 if (desc->bd_iov[i].kiov_len == 0)
1193                         continue;
1194
1195                 ptr = cfs_kmap(desc->bd_iov[i].kiov_page);
1196                 off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
1197                 ptr[off] ^= 0x1;
1198                 cfs_kunmap(desc->bd_iov[i].kiov_page);
1199                 return;
1200         }
1201 }
1202 #else
1203 static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
1204 {
1205 }
1206 #endif /* __KERNEL__ */
1207
1208 int bulk_csum_svc(struct ptlrpc_bulk_desc *desc, int read,
1209                   struct ptlrpc_bulk_sec_desc *bsdv, int vsize,
1210                   struct ptlrpc_bulk_sec_desc *bsdr, int rsize)
1211 {
1212         int    rc;
1213
1214         LASSERT(vsize >= sizeof(*bsdv));
1215         LASSERT(rsize >= sizeof(*bsdr));
1216         LASSERT(bsdv && bsdr);
1217
1218         if (read) {
1219                 rc = generate_bulk_csum(desc, bsdv->bsd_hash_alg, bsdr, rsize);
1220                 if (rc)
1221                         CERROR("bulk read: server failed to generate %s "
1222                                "checksum: %d\n",
1223                                hash_types[bsdv->bsd_hash_alg].sht_name, rc);
1224
1225                 /* corrupt the data after we compute the checksum, to
1226                  * simulate an OST->client data error */
1227                 if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1228                         corrupt_bulk_data(desc);
1229         } else {
1230                 rc = verify_bulk_csum(desc, 0, bsdv, vsize, bsdr, rsize);
1231         }
1232
1233         return rc;
1234 }
1235 EXPORT_SYMBOL(bulk_csum_svc);
1236
1237 /****************************************
1238  * Helpers to assist policy modules to  *
1239  * implement encryption funcationality  *
1240  ****************************************/
1241
1242 /* FIXME */
1243 #ifndef __KERNEL__
1244 #define CRYPTO_TFM_MODE_ECB     (0)
1245 #define CRYPTO_TFM_MODE_CBC     (1)
1246 #endif
1247
1248 static struct sptlrpc_ciph_type cipher_types[] = {
1249         [BULK_CIPH_ALG_NULL]    = {
1250                 "null",         "null",       0,                   0,  0
1251         },
1252         [BULK_CIPH_ALG_ARC4]    = {
1253                 "arc4",         "arc4",       CRYPTO_TFM_MODE_ECB, 0,  16
1254         },
1255         [BULK_CIPH_ALG_AES128]  = {
1256                 "aes128",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 16
1257         },
1258         [BULK_CIPH_ALG_AES192]  = {
1259                 "aes192",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 24
1260         },
1261         [BULK_CIPH_ALG_AES256]  = {
1262                 "aes256",       "aes",        CRYPTO_TFM_MODE_CBC, 16, 32
1263         },
1264         [BULK_CIPH_ALG_CAST128] = {
1265                 "cast128",      "cast5",      CRYPTO_TFM_MODE_CBC, 8,  16
1266         },
1267         [BULK_CIPH_ALG_CAST256] = {
1268                 "cast256",      "cast6",      CRYPTO_TFM_MODE_CBC, 16, 32
1269         },
1270         [BULK_CIPH_ALG_TWOFISH128] = {
1271                 "twofish128",   "twofish",    CRYPTO_TFM_MODE_CBC, 16, 16
1272         },
1273         [BULK_CIPH_ALG_TWOFISH256] = {
1274                 "twofish256",   "twofish",    CRYPTO_TFM_MODE_CBC, 16, 32
1275         },
1276 };
1277
1278 const struct sptlrpc_ciph_type *sptlrpc_get_ciph_type(__u8 ciph_alg)
1279 {
1280         struct sptlrpc_ciph_type *ct;
1281
1282         if (ciph_alg < BULK_CIPH_ALG_MAX) {
1283                 ct = &cipher_types[ciph_alg];
1284                 if (ct->sct_tfm_name)
1285                         return ct;
1286         }
1287         return NULL;
1288 }
1289 EXPORT_SYMBOL(sptlrpc_get_ciph_type);
1290
1291 const char *sptlrpc_get_ciph_name(__u8 ciph_alg)
1292 {
1293         const struct sptlrpc_ciph_type *ct;
1294
1295         ct = sptlrpc_get_ciph_type(ciph_alg);
1296         if (ct)
1297                 return ct->sct_name;
1298         else
1299                 return "unknown";
1300 }
1301 EXPORT_SYMBOL(sptlrpc_get_ciph_name);