Whamcloud - gitweb
b=13201
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004-2006 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  */
21
22 #ifndef EXPORT_SYMTAB
23 #define EXPORT_SYMTAB
24 #endif
25 #define DEBUG_SUBSYSTEM S_SEC
26
27 #include <libcfs/libcfs.h>
28 #ifndef __KERNEL__
29 #include <liblustre.h>
30 #include <libcfs/list.h>
31 #else
32 #include <linux/crypto.h>
33 #endif
34
35 #include <obd.h>
36 #include <obd_class.h>
37 #include <obd_support.h>
38 #include <lustre_net.h>
39 #include <lustre_import.h>
40 #include <lustre_dlm.h>
41 #include <lustre_sec.h>
42
43 #include "ptlrpc_internal.h"
44
45 static void sptlrpc_sec_destroy(struct ptlrpc_sec *sec);
46 static int sptlrpc_sec_destroy_ctx(struct ptlrpc_sec *sec,
47                                    struct ptlrpc_cli_ctx *ctx);
48 static void sptlrpc_ctx_refresh(struct ptlrpc_cli_ctx *ctx);
49
50 /***********************************************
51  * policy registers                            *
52  ***********************************************/
53
54 static rwlock_t policy_lock = RW_LOCK_UNLOCKED;
55 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
56         NULL,
57 };
58
59 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
60 {
61         __u32 number = policy->sp_policy;
62
63         LASSERT(policy->sp_name);
64         LASSERT(policy->sp_cops);
65         LASSERT(policy->sp_sops);
66
67         if (number >= SPTLRPC_POLICY_MAX)
68                 return -EINVAL;
69
70         write_lock(&policy_lock);
71         if (unlikely(policies[number])) {
72                 write_unlock(&policy_lock);
73                 return -EALREADY;
74         }
75         policies[number] = policy;
76         write_unlock(&policy_lock);
77
78         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
79         return 0;
80 }
81 EXPORT_SYMBOL(sptlrpc_register_policy);
82
83 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
84 {
85         __u32 number = policy->sp_policy;
86
87         LASSERT(number < SPTLRPC_POLICY_MAX);
88
89         write_lock(&policy_lock);
90         if (unlikely(policies[number] == NULL)) {
91                 write_unlock(&policy_lock);
92                 CERROR("%s: already unregistered\n", policy->sp_name);
93                 return -EINVAL;
94         }
95
96         LASSERT(policies[number] == policy);
97         policies[number] = NULL;
98         write_unlock(&policy_lock);
99
100         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
101         return 0;
102 }
103 EXPORT_SYMBOL(sptlrpc_unregister_policy);
104
105 static
106 struct ptlrpc_sec_policy * sptlrpc_flavor2policy(ptlrpc_sec_flavor_t flavor)
107 {
108 #ifdef CONFIG_KMOD
109         static DECLARE_MUTEX(load_mutex);
110 #endif
111         static atomic_t         loaded = ATOMIC_INIT(0);
112         struct                  ptlrpc_sec_policy *policy;
113         __u32                   number = SEC_FLAVOR_POLICY(flavor), flag = 0;
114
115         if (number >= SPTLRPC_POLICY_MAX)
116                 return NULL;
117
118 #ifdef CONFIG_KMOD
119 again:
120 #endif
121         read_lock(&policy_lock);
122         policy = policies[number];
123         if (policy && !try_module_get(policy->sp_owner))
124                 policy = NULL;
125         if (policy == NULL)
126                 flag = atomic_read(&loaded);
127         read_unlock(&policy_lock);
128
129 #ifdef CONFIG_KMOD
130         /* if failure, try to load gss module, once */
131         if (unlikely(policy == NULL) &&
132             number == SPTLRPC_POLICY_GSS && flag == 0) {
133                 mutex_down(&load_mutex);
134                 if (atomic_read(&loaded) == 0) {
135                         if (request_module("ptlrpc_gss") != 0)
136                                 CERROR("Unable to load module ptlrpc_gss\n");
137                         else
138                                 CWARN("module ptlrpc_gss loaded\n");
139
140                         atomic_set(&loaded, 1);
141                 }
142                 mutex_up(&load_mutex);
143
144                 goto again;
145         }
146 #endif
147
148         return policy;
149 }
150
151 ptlrpc_sec_flavor_t sptlrpc_name2flavor(const char *name)
152 {
153         if (!strcmp(name, "null"))
154                 return SPTLRPC_FLVR_NULL;
155         if (!strcmp(name, "plain"))
156                 return SPTLRPC_FLVR_PLAIN;
157         if (!strcmp(name, "krb5"))
158                 return SPTLRPC_FLVR_KRB5;
159         if (!strcmp(name, "krb5i"))
160                 return SPTLRPC_FLVR_KRB5I;
161         if (!strcmp(name, "krb5p"))
162                 return SPTLRPC_FLVR_KRB5P;
163
164         return SPTLRPC_FLVR_INVALID;
165 }
166 EXPORT_SYMBOL(sptlrpc_name2flavor);
167
168 char *sptlrpc_flavor2name(ptlrpc_sec_flavor_t flavor)
169 {
170         switch (flavor) {
171         case SPTLRPC_FLVR_NULL:
172                 return "null";
173         case SPTLRPC_FLVR_PLAIN:
174                 return "plain";
175         case SPTLRPC_FLVR_KRB5:
176                 return "krb5";
177         case SPTLRPC_FLVR_KRB5I:
178                 return "krb5i";
179         case SPTLRPC_FLVR_KRB5P:
180                 return "krb5p";
181         default:
182                 CERROR("invalid flavor 0x%x(p%u,s%u,v%u)\n", flavor,
183                        SEC_FLAVOR_POLICY(flavor), SEC_FLAVOR_SUBPOLICY(flavor),
184                        SEC_FLAVOR_SVC(flavor));
185         }
186         return "UNKNOWN";
187 }
188 EXPORT_SYMBOL(sptlrpc_flavor2name);
189
190 /***********************************************
191  * context helpers                             *
192  * internal APIs                               *
193  * cache management                            *
194  ***********************************************/
195
196 static inline
197 unsigned long ctx_status(struct ptlrpc_cli_ctx *ctx)
198 {
199         smp_mb();
200         return (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK);
201 }
202
203 static inline
204 int ctx_is_uptodate(struct ptlrpc_cli_ctx *ctx)
205 {
206         return (ctx_status(ctx) == PTLRPC_CTX_UPTODATE);
207 }
208
209 static inline
210 int ctx_is_refreshed(struct ptlrpc_cli_ctx *ctx)
211 {
212         return (ctx_status(ctx) != 0);
213 }
214
215 static inline
216 int ctx_is_dead(struct ptlrpc_cli_ctx *ctx)
217 {
218         smp_mb();
219         return ((ctx->cc_flags & (PTLRPC_CTX_DEAD | PTLRPC_CTX_ERROR)) != 0);
220 }
221
222 static inline
223 int ctx_is_eternal(struct ptlrpc_cli_ctx *ctx)
224 {
225         smp_mb();
226         return ((ctx->cc_flags & PTLRPC_CTX_ETERNAL) != 0);
227 }
228
229 static
230 int ctx_expire(struct ptlrpc_cli_ctx *ctx)
231 {
232         LASSERT(atomic_read(&ctx->cc_refcount));
233
234         if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) {
235                 cfs_time_t now = cfs_time_current_sec();
236
237                 smp_mb();
238                 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
239
240                 if (ctx->cc_expire && cfs_time_aftereq(now, ctx->cc_expire))
241                         CWARN("ctx %p(%u->%s): get expired (%lds exceeds)\n",
242                               ctx, ctx->cc_vcred.vc_uid,
243                               sec2target_str(ctx->cc_sec),
244                               cfs_time_sub(now, ctx->cc_expire));
245                 else
246                         CWARN("ctx %p(%u->%s): force to die (%lds remains)\n",
247                               ctx, ctx->cc_vcred.vc_uid,
248                               sec2target_str(ctx->cc_sec),
249                               ctx->cc_expire == 0 ? 0 :
250                               cfs_time_sub(ctx->cc_expire, now));
251
252                 return 1;
253         }
254         return 0;
255 }
256
257 static
258 void ctx_enhash(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
259 {
260         set_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags);
261         atomic_inc(&ctx->cc_refcount);
262         hlist_add_head(&ctx->cc_hash, hash);
263 }
264
265 static
266 void ctx_unhash(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
267 {
268         LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
269         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
270         LASSERT(test_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags));
271         LASSERT(!hlist_unhashed(&ctx->cc_hash));
272
273         clear_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags);
274
275         if (atomic_dec_and_test(&ctx->cc_refcount)) {
276                 __hlist_del(&ctx->cc_hash);
277                 hlist_add_head(&ctx->cc_hash, freelist);
278         } else
279                 hlist_del_init(&ctx->cc_hash);
280 }
281
282 /*
283  * return 1 if the context is dead.
284  */
285 static
286 int ctx_check_death(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
287 {
288         if (unlikely(ctx_is_dead(ctx)))
289                 goto unhash;
290
291         /* expire is 0 means never expire. a newly created gss context
292          * which during upcall also has 0 expiration
293          */
294         smp_mb();
295         if (ctx->cc_expire == 0)
296                 return 0;
297
298         /* check real expiration */
299         smp_mb();
300         if (cfs_time_after(ctx->cc_expire, cfs_time_current_sec()))
301                 return 0;
302
303         ctx_expire(ctx);
304
305 unhash:
306         if (freelist)
307                 ctx_unhash(ctx, freelist);
308
309         return 1;
310 }
311
312 static inline
313 int ctx_check_death_locked(struct ptlrpc_cli_ctx *ctx,
314                            struct hlist_head *freelist)
315 {
316         LASSERT(ctx->cc_sec);
317         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
318         LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
319         LASSERT(test_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags));
320
321         return ctx_check_death(ctx, freelist);
322 }
323
324 static
325 int ctx_check_uptodate(struct ptlrpc_cli_ctx *ctx)
326 {
327         LASSERT(ctx->cc_sec);
328         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
329
330         if (!ctx_check_death(ctx, NULL) && ctx_is_uptodate(ctx))
331                 return 1;
332         return 0;
333 }
334
335 static inline
336 int ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred)
337 {
338         /* a little bit optimization for null policy */
339         if (!ctx->cc_ops->match)
340                 return 1;
341
342         return ctx->cc_ops->match(ctx, vcred);
343 }
344
345 static
346 void ctx_list_destroy(struct hlist_head *head)
347 {
348         struct ptlrpc_cli_ctx *ctx;
349
350         while (!hlist_empty(head)) {
351                 ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_hash);
352
353                 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
354                 LASSERT(test_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags) == 0);
355
356                 hlist_del_init(&ctx->cc_hash);
357                 sptlrpc_sec_destroy_ctx(ctx->cc_sec, ctx);
358         }
359 }
360
361 static
362 void ctx_cache_gc(struct ptlrpc_sec *sec, struct hlist_head *freelist)
363 {
364         struct ptlrpc_cli_ctx *ctx;
365         struct hlist_node *pos, *next;
366         int i;
367         ENTRY;
368
369         CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
370
371         for (i = 0; i < sec->ps_ccache_size; i++) {
372                 hlist_for_each_entry_safe(ctx, pos, next,
373                                           &sec->ps_ccache[i], cc_hash)
374                         ctx_check_death_locked(ctx, freelist);
375         }
376
377         sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
378         EXIT;
379 }
380
381 /*
382  * @uid: which user. "-1" means flush all.
383  * @grace: mark context DEAD, allow graceful destroy like notify
384  *         server side, etc.
385  * @force: also flush busy entries.
386  *
387  * return the number of busy context encountered.
388  *
389  * In any cases, never touch "eternal" contexts.
390  */
391 static
392 int ctx_cache_flush(struct ptlrpc_sec *sec, uid_t uid, int grace, int force)
393 {
394         struct ptlrpc_cli_ctx *ctx;
395         struct hlist_node *pos, *next;
396         HLIST_HEAD(freelist);
397         int i, busy = 0;
398         ENTRY;
399
400         might_sleep_if(grace);
401
402         spin_lock(&sec->ps_lock);
403         for (i = 0; i < sec->ps_ccache_size; i++) {
404                 hlist_for_each_entry_safe(ctx, pos, next,
405                                           &sec->ps_ccache[i], cc_hash) {
406                         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
407
408                         if (ctx_is_eternal(ctx))
409                                 continue;
410                         if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
411                                 continue;
412
413                         if (atomic_read(&ctx->cc_refcount) > 1) {
414                                 busy++;
415                                 if (!force)
416                                         continue;
417
418                                 CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
419                                       "grace %d\n",
420                                       atomic_read(&ctx->cc_refcount),
421                                       ctx, ctx->cc_vcred.vc_uid,
422                                       sec2target_str(ctx->cc_sec), grace);
423                         }
424                         ctx_unhash(ctx, &freelist);
425
426                         set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
427                         if (!grace)
428                                 clear_bit(PTLRPC_CTX_UPTODATE_BIT,
429                                           &ctx->cc_flags);
430                 }
431         }
432         spin_unlock(&sec->ps_lock);
433
434         ctx_list_destroy(&freelist);
435         RETURN(busy);
436 }
437
438 static inline
439 unsigned int ctx_hash_index(struct ptlrpc_sec *sec, __u64 key)
440 {
441         return (unsigned int) (key & (sec->ps_ccache_size - 1));
442 }
443
444 /*
445  * return matched context. If it's a newly created one, we also give the
446  * first push to refresh. return NULL if error happens.
447  */
448 static
449 struct ptlrpc_cli_ctx * ctx_cache_lookup(struct ptlrpc_sec *sec,
450                                          struct vfs_cred *vcred,
451                                          int create, int remove_dead)
452 {
453         struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
454         struct hlist_head *hash_head;
455         struct hlist_node *pos, *next;
456         HLIST_HEAD(freelist);
457         unsigned int hash, gc = 0, found = 0;
458         ENTRY;
459
460         might_sleep();
461
462         hash = ctx_hash_index(sec, (__u64) vcred->vc_uid);
463         LASSERT(hash < sec->ps_ccache_size);
464         hash_head = &sec->ps_ccache[hash];
465
466 retry:
467         spin_lock(&sec->ps_lock);
468
469         /* gc_next == 0 means never do gc */
470         if (remove_dead && sec->ps_gc_next &&
471             cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
472                 ctx_cache_gc(sec, &freelist);
473                 gc = 1;
474         }
475
476         hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_hash) {
477                 if (gc == 0 &&
478                     ctx_check_death_locked(ctx, remove_dead ? &freelist : NULL))
479                         continue;
480
481                 if (ctx_match(ctx, vcred)) {
482                         found = 1;
483                         break;
484                 }
485         }
486
487         if (found) {
488                 if (new && new != ctx) {
489                         /* lost the race, just free it */
490                         hlist_add_head(&new->cc_hash, &freelist);
491                         new = NULL;
492                 }
493
494                 /* hot node, move to head */
495                 if (hash_head->first != &ctx->cc_hash) {
496                         __hlist_del(&ctx->cc_hash);
497                         hlist_add_head(&ctx->cc_hash, hash_head);
498                 }
499         } else {
500                 /* don't allocate for reverse sec */
501                 if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
502                         spin_unlock(&sec->ps_lock);
503                         RETURN(NULL);
504                 }
505
506                 if (new) {
507                         ctx_enhash(new, hash_head);
508                         ctx = new;
509                 } else if (create) {
510                         spin_unlock(&sec->ps_lock);
511                         new = sec->ps_policy->sp_cops->create_ctx(sec, vcred);
512                         if (new) {
513                                 atomic_inc(&sec->ps_busy);
514                                 goto retry;
515                         }
516                 } else
517                         ctx = NULL;
518         }
519
520         /* hold a ref */
521         if (ctx)
522                 atomic_inc(&ctx->cc_refcount);
523
524         spin_unlock(&sec->ps_lock);
525
526         /* the allocator of the context must give the first push to refresh */
527         if (new) {
528                 LASSERT(new == ctx);
529                 sptlrpc_ctx_refresh(new);
530         }
531
532         ctx_list_destroy(&freelist);
533         RETURN(ctx);
534 }
535
536 static inline
537 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
538 {
539         struct vfs_cred vcred;
540         int create = 1, remove_dead = 1;
541
542         LASSERT(sec);
543
544         if (sec->ps_flags & (PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY)) {
545                 vcred.vc_uid = 0;
546                 vcred.vc_gid = 0;
547                 if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
548                         create = 0;
549                         remove_dead = 0;
550                 }
551         } else {
552                 vcred.vc_uid = cfs_current()->uid;
553                 vcred.vc_gid = cfs_current()->gid;
554         }
555
556         if (sec->ps_policy->sp_cops->lookup_ctx)
557                 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred);
558         else
559                 return ctx_cache_lookup(sec, &vcred, create, remove_dead);
560 }
561
562 /**************************************************
563  * client context APIs                            *
564  **************************************************/
565
566 static
567 void sptlrpc_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
568 {
569         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
570
571         if (!ctx_is_refreshed(ctx) && ctx->cc_ops->refresh)
572                 ctx->cc_ops->refresh(ctx);
573 }
574
575 struct ptlrpc_cli_ctx *sptlrpc_ctx_get(struct ptlrpc_cli_ctx *ctx)
576 {
577         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
578         atomic_inc(&ctx->cc_refcount);
579         return ctx;
580 }
581 EXPORT_SYMBOL(sptlrpc_ctx_get);
582
583 void sptlrpc_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
584 {
585         struct ptlrpc_sec *sec = ctx->cc_sec;
586
587         LASSERT(sec);
588         LASSERT(atomic_read(&ctx->cc_refcount));
589
590         if (!atomic_dec_and_test(&ctx->cc_refcount))
591                 return;
592
593         LASSERT(test_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags) == 0);
594         LASSERT(hlist_unhashed(&ctx->cc_hash));
595
596         /* if required async, we must clear the UPTODATE bit to prevent extra
597          * rpcs during destroy procedure.
598          */
599         if (!sync)
600                 clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
601
602         /* destroy this context */
603         if (!sptlrpc_sec_destroy_ctx(sec, ctx))
604                 return;
605
606         CWARN("%s@%p: put last ctx, also destroy the sec\n",
607               sec->ps_policy->sp_name, sec);
608
609         sptlrpc_sec_destroy(sec);
610 }
611 EXPORT_SYMBOL(sptlrpc_ctx_put);
612
613 /*
614  * mark a ctx as DEAD, and pull it out from hash table.
615  *
616  * NOTE: the caller must hold at least 1 ref on the ctx.
617  */
618 void sptlrpc_ctx_expire(struct ptlrpc_cli_ctx *ctx)
619 {
620         LASSERT(ctx->cc_sec);
621         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
622
623         ctx_expire(ctx);
624
625         spin_lock(&ctx->cc_sec->ps_lock);
626
627         if (test_and_clear_bit(PTLRPC_CTX_HASHED_BIT, &ctx->cc_flags)) {
628                 LASSERT(!hlist_unhashed(&ctx->cc_hash));
629                 LASSERT(atomic_read(&ctx->cc_refcount) > 1);
630
631                 hlist_del_init(&ctx->cc_hash);
632                 if (atomic_dec_and_test(&ctx->cc_refcount))
633                         LBUG();
634         }
635
636         spin_unlock(&ctx->cc_sec->ps_lock);
637 }
638 EXPORT_SYMBOL(sptlrpc_ctx_expire);
639
640 void sptlrpc_ctx_replace(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *new)
641 {
642         struct ptlrpc_cli_ctx *ctx;
643         struct hlist_node *pos, *next;
644         HLIST_HEAD(freelist);
645         unsigned int hash;
646         ENTRY;
647
648         hash = ctx_hash_index(sec, (__u64) new->cc_vcred.vc_uid);
649         LASSERT(hash < sec->ps_ccache_size);
650
651         spin_lock(&sec->ps_lock);
652
653         hlist_for_each_entry_safe(ctx, pos, next,
654                                   &sec->ps_ccache[hash], cc_hash) {
655                 if (!ctx_match(ctx, &new->cc_vcred))
656                         continue;
657
658                 ctx_expire(ctx);
659                 ctx_unhash(ctx, &freelist);
660                 break;
661         }
662
663         ctx_enhash(new, &sec->ps_ccache[hash]);
664         atomic_inc(&sec->ps_busy);
665
666         spin_unlock(&sec->ps_lock);
667
668         ctx_list_destroy(&freelist);
669         EXIT;
670 }
671 EXPORT_SYMBOL(sptlrpc_ctx_replace);
672
673 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
674 {
675         struct obd_import *imp = req->rq_import;
676         ENTRY;
677
678         LASSERT(!req->rq_cli_ctx);
679         LASSERT(imp);
680
681         if (imp->imp_sec == NULL) {
682                 CERROR("import %p (%s) with no sec pointer\n",
683                        imp, ptlrpc_import_state_name(imp->imp_state));
684                 RETURN(-EACCES);
685         }
686
687         req->rq_cli_ctx = get_my_ctx(imp->imp_sec);
688
689         if (!req->rq_cli_ctx) {
690                 CERROR("req %p: fail to get context from cache\n", req);
691                 RETURN(-ENOMEM);
692         }
693
694         RETURN(0);
695 }
696
697 void sptlrpc_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
698 {
699         struct ptlrpc_request *req, *next;
700
701         spin_lock(&ctx->cc_lock);
702         list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
703                 list_del_init(&req->rq_ctx_chain);
704                 ptlrpc_wake_client_req(req);
705         }
706         spin_unlock(&ctx->cc_lock);
707 }
708 EXPORT_SYMBOL(sptlrpc_ctx_wakeup);
709
710 int sptlrpc_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
711 {
712         LASSERT(ctx->cc_ops);
713
714         if (ctx->cc_ops->display == NULL)
715                 return 0;
716
717         return ctx->cc_ops->display(ctx, buf, bufsize);
718 }
719
720 void sptlrpc_req_put_ctx(struct ptlrpc_request *req)
721 {
722         ENTRY;
723
724         LASSERT(req);
725         LASSERT(req->rq_cli_ctx);
726
727         /* request might be asked to release earlier while still
728          * in the context waiting list.
729          */
730         if (!list_empty(&req->rq_ctx_chain)) {
731                 spin_lock(&req->rq_cli_ctx->cc_lock);
732                 list_del_init(&req->rq_ctx_chain);
733                 spin_unlock(&req->rq_cli_ctx->cc_lock);
734         }
735
736         /* this could be called with spinlock hold, use async mode */
737         sptlrpc_ctx_put(req->rq_cli_ctx, 0);
738         req->rq_cli_ctx = NULL;
739         EXIT;
740 }
741
742 /*
743  * request must have a context. if failed to get new context,
744  * just restore the old one
745  */
746 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
747 {
748         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
749         int rc;
750         ENTRY;
751
752         LASSERT(ctx);
753         LASSERT(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags));
754
755         /* make sure not on context waiting list */
756         spin_lock(&ctx->cc_lock);
757         list_del_init(&req->rq_ctx_chain);
758         spin_unlock(&ctx->cc_lock);
759
760         sptlrpc_ctx_get(ctx);
761         sptlrpc_req_put_ctx(req);
762         rc = sptlrpc_req_get_ctx(req);
763         if (!rc) {
764                 LASSERT(req->rq_cli_ctx);
765                 LASSERT(req->rq_cli_ctx != ctx);
766                 sptlrpc_ctx_put(ctx, 1);
767         } else {
768                 LASSERT(!req->rq_cli_ctx);
769                 req->rq_cli_ctx = ctx;
770         }
771         RETURN(rc);
772 }
773 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
774
775 static
776 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
777 {
778         smp_mb();
779         if (ctx_is_refreshed(ctx))
780                 return 1;
781         return 0;
782 }
783
784 static
785 int ctx_refresh_timeout(void *data)
786 {
787         struct ptlrpc_request *req = data;
788         int rc;
789
790         /* conn_cnt is needed in expire_one_request */
791         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
792
793         rc = ptlrpc_expire_one_request(req);
794         /* if we started recovery, we should mark this ctx dead; otherwise
795          * in case of lgssd died nobody would retire this ctx, following
796          * connecting will still find the same ctx thus cause deadlock.
797          * there's an assumption that expire time of the request should be
798          * later than the context refresh expire time.
799          */
800         if (rc == 0)
801                 ctx_expire(req->rq_cli_ctx);
802         return rc;
803 }
804
805 static
806 void ctx_refresh_interrupt(void *data)
807 {
808         /* do nothing */
809 }
810
811 /*
812  * the status of context could be subject to be changed by other threads at any
813  * time. we allow this race. but once we return with 0, the caller will
814  * suppose it's uptodated and keep using it until the affected rpc is done.
815  *
816  * @timeout:
817  *    < 0  - don't wait
818  *    = 0  - wait until success or fatal error occur
819  *    > 0  - timeout value
820  *
821  * return 0 only if the context is uptodated.
822  */
823 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
824 {
825         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
826         struct l_wait_info      lwi;
827         int                     rc;
828         ENTRY;
829
830         LASSERT(ctx);
831
832         /* special ctxs */
833         if (ctx_is_eternal(ctx) || req->rq_ctx_init || req->rq_ctx_fini)
834                 RETURN(0);
835
836         /* reverse ctxs, don't refresh */
837         if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
838                 RETURN(0);
839
840         spin_lock(&ctx->cc_lock);
841 again:
842         if (ctx_check_uptodate(ctx)) {
843                 if (!list_empty(&req->rq_ctx_chain))
844                         list_del_init(&req->rq_ctx_chain);
845                 spin_unlock(&ctx->cc_lock);
846                 RETURN(0);
847         }
848
849         if (test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags)) {
850                 req->rq_err = 1;
851                 if (!list_empty(&req->rq_ctx_chain))
852                         list_del_init(&req->rq_ctx_chain);
853                 spin_unlock(&ctx->cc_lock);
854                 RETURN(-EPERM);
855         }
856
857         /* This is subtle. For resent message we have to keep original
858          * context to survive following situation:
859          *  1. the request sent to server
860          *  2. recovery was kick start
861          *  3. recovery finished, the request marked as resent
862          *  4. resend the request
863          *  5. old reply from server received (because xid is the same)
864          *  6. verify reply (has to be success)
865          *  7. new reply from server received, lnet drop it
866          *
867          * Note we can't simply change xid for resent request because
868          * server reply on it for reply reconstruction.
869          *
870          * Commonly the original context should be uptodate because we
871          * have a expiry nice time; And server will keep their half part
872          * context because we at least hold a ref of old context which
873          * prevent the context detroy RPC be sent. So server still can
874          * accept the request and finish RPC. Two cases:
875          *  1. If server side context has been trimed, a NO_CONTEXT will
876          *     be returned, gss_cli_ctx_verify/unseal will switch to new
877          *     context by force.
878          *  2. Current context never be refreshed, then we are fine: we
879          *     never really send request with old context before.
880          */
881         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
882             req->rq_reqmsg &&
883             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
884                 if (!list_empty(&req->rq_ctx_chain))
885                         list_del_init(&req->rq_ctx_chain);
886                 spin_unlock(&ctx->cc_lock);
887                 RETURN(0);
888         }
889
890         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
891                 spin_unlock(&ctx->cc_lock);
892
893                 /* don't have to, but we don't want to release it too soon */
894                 sptlrpc_ctx_get(ctx);
895
896                 rc = sptlrpc_req_replace_dead_ctx(req);
897                 if (rc) {
898                         LASSERT(ctx == req->rq_cli_ctx);
899                         CERROR("req %p: failed to replace dead ctx %p\n",
900                                 req, ctx);
901                         req->rq_err = 1;
902                         LASSERT(list_empty(&req->rq_ctx_chain));
903                         sptlrpc_ctx_put(ctx, 1);
904                         RETURN(-ENOMEM);
905                 }
906
907                 LASSERT(ctx != req->rq_cli_ctx);
908                 CWARN("req %p: replace dead ctx %p(%u->%s) => %p\n",
909                       req, ctx, ctx->cc_vcred.vc_uid,
910                       sec2target_str(ctx->cc_sec), req->rq_cli_ctx);
911
912                 sptlrpc_ctx_put(ctx, 1);
913                 ctx = req->rq_cli_ctx;
914                 LASSERT(list_empty(&req->rq_ctx_chain));
915
916                 spin_lock(&ctx->cc_lock);
917                 goto again;
918         }
919
920         /* Now we're sure this context is during upcall, add myself into
921          * waiting list
922          */
923         if (list_empty(&req->rq_ctx_chain))
924                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
925
926         spin_unlock(&ctx->cc_lock);
927
928         if (timeout < 0) {
929                 RETURN(-EWOULDBLOCK);
930         }
931
932         /* Clear any flags that may be present from previous sends */
933         LASSERT(req->rq_receiving_reply == 0);
934         spin_lock(&req->rq_lock);
935         req->rq_err = 0;
936         req->rq_timedout = 0;
937         req->rq_resend = 0;
938         req->rq_restart = 0;
939         spin_unlock(&req->rq_lock);
940
941         lwi = LWI_TIMEOUT_INTR(timeout == 0 ? LONG_MAX : timeout * HZ,
942                                ctx_refresh_timeout, ctx_refresh_interrupt, req);
943         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
944
945         spin_lock(&ctx->cc_lock);
946         /* five cases we are here:
947          * 1. successfully refreshed;
948          * 2. someone else mark this ctx dead by force;
949          * 3. interruptted;
950          * 4. timedout, and we don't want recover from the failure;
951          * 5. timedout, and waked up upon recovery finished;
952          */
953         if (!ctx_is_refreshed(ctx)) {
954                 /* timed out or interruptted */
955                 list_del_init(&req->rq_ctx_chain);
956                 spin_unlock(&ctx->cc_lock);
957
958                 LASSERT(rc != 0);
959                 RETURN(rc);
960         }
961
962         goto again;
963 }
964
965 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
966 {
967         struct sec_flavor_config *conf;
968
969         LASSERT(req->rq_import);
970         LASSERT(req->rq_import->imp_sec);
971         LASSERT(req->rq_cli_ctx);
972         LASSERT(req->rq_cli_ctx->cc_sec);
973         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
974
975         /* special security flags accoding to opcode */
976         switch (opcode) {
977         case OST_READ:
978                 req->rq_bulk_read = 1;
979                 break;
980         case OST_WRITE:
981                 req->rq_bulk_write = 1;
982                 break;
983         case SEC_CTX_INIT:
984                 req->rq_ctx_init = 1;
985                 break;
986         case SEC_CTX_FINI:
987                 req->rq_ctx_fini = 1;
988                 break;
989         }
990
991         req->rq_sec_flavor = req->rq_cli_ctx->cc_sec->ps_flavor;
992
993         /* force SVC_NONE for context initiation rpc, SVC_AUTH for context
994          * destruction rpc
995          */
996         if (unlikely(req->rq_ctx_init)) {
997                 req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
998                                 SEC_FLAVOR_POLICY(req->rq_sec_flavor),
999                                 SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
1000                                 SEC_FLAVOR_SVC(SPTLRPC_SVC_NONE));
1001         } else if (unlikely(req->rq_ctx_fini)) {
1002                 req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
1003                                 SEC_FLAVOR_POLICY(req->rq_sec_flavor),
1004                                 SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
1005                                 SEC_FLAVOR_SVC(SPTLRPC_SVC_AUTH));
1006         }
1007
1008         conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
1009
1010         /* user descriptor flag, except ROOTONLY which don't need, and
1011          * null security which can't
1012          */
1013         if ((conf->sfc_flags & PTLRPC_SEC_FL_ROOTONLY) == 0 &&
1014             req->rq_sec_flavor != SPTLRPC_FLVR_NULL)
1015                 req->rq_sec_flavor |= SEC_FLAVOR_FL_USER;
1016
1017         /* bulk security flag */
1018         if ((req->rq_bulk_read || req->rq_bulk_write) &&
1019             (conf->sfc_bulk_priv != BULK_PRIV_ALG_NULL ||
1020              conf->sfc_bulk_csum != BULK_CSUM_ALG_NULL))
1021                 req->rq_sec_flavor |= SEC_FLAVOR_FL_BULK;
1022 }
1023
1024 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
1025 {
1026         if (SEC_FLAVOR_SVC(req->rq_sec_flavor) != SPTLRPC_SVC_PRIV)
1027                 return;
1028
1029         LASSERT(req->rq_clrbuf);
1030         if (req->rq_pool || !req->rq_reqbuf)
1031                 return;
1032
1033         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
1034         req->rq_reqbuf = NULL;
1035         req->rq_reqbuf_len = 0;
1036 }
1037
1038 /*
1039  * check whether current user have valid context for an import or not.
1040  * might repeatedly try in case of non-fatal errors.
1041  * return 0 on success, < 0 on failure
1042  */
1043 int sptlrpc_import_check_ctx(struct obd_import *imp)
1044 {
1045         struct ptlrpc_cli_ctx *ctx;
1046         struct ptlrpc_request *req = NULL;
1047         int rc;
1048         ENTRY;
1049
1050         might_sleep();
1051
1052         ctx = get_my_ctx(imp->imp_sec);
1053         if (!ctx)
1054                 RETURN(1);
1055
1056         if (ctx_is_eternal(ctx)) {
1057                 sptlrpc_ctx_put(ctx, 1);
1058                 RETURN(0);
1059         }
1060
1061         OBD_ALLOC_PTR(req);
1062         if (!req)
1063                 RETURN(-ENOMEM);
1064
1065         spin_lock_init(&req->rq_lock);
1066         atomic_set(&req->rq_refcount, 10000);
1067         INIT_LIST_HEAD(&req->rq_ctx_chain);
1068         init_waitqueue_head(&req->rq_reply_waitq);
1069         req->rq_import = imp;
1070         req->rq_cli_ctx = ctx;
1071
1072         rc = sptlrpc_req_refresh_ctx(req, 0);
1073         LASSERT(list_empty(&req->rq_ctx_chain));
1074         sptlrpc_ctx_put(req->rq_cli_ctx, 1);
1075         OBD_FREE_PTR(req);
1076
1077         RETURN(rc);
1078 }
1079
1080 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
1081 {
1082         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1083         int rc = 0;
1084         ENTRY;
1085
1086         LASSERT(ctx);
1087         LASSERT(ctx->cc_sec);
1088         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1089
1090         /* we wrap bulk request here because now we can be sure
1091          * the context is uptodate.
1092          */
1093         if (req->rq_bulk) {
1094                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1095                 if (rc)
1096                         RETURN(rc);
1097         }
1098
1099         switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1100         case SPTLRPC_SVC_NONE:
1101         case SPTLRPC_SVC_AUTH:
1102                 LASSERT(ctx->cc_ops->sign);
1103                 rc = ctx->cc_ops->sign(ctx, req);
1104                 break;
1105         case SPTLRPC_SVC_PRIV:
1106                 LASSERT(ctx->cc_ops->seal);
1107                 rc = ctx->cc_ops->seal(ctx, req);
1108                 break;
1109         default:
1110                 LBUG();
1111         }
1112
1113         if (rc == 0) {
1114                 LASSERT(req->rq_reqdata_len);
1115                 LASSERT(req->rq_reqdata_len % 8 == 0);
1116                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1117         }
1118
1119         RETURN(rc);
1120 }
1121
1122 /*
1123  * rq_nob_received is the actual received data length
1124  */
1125 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1126 {
1127         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1128         int rc;
1129         ENTRY;
1130
1131         LASSERT(ctx);
1132         LASSERT(ctx->cc_sec);
1133         LASSERT(ctx->cc_ops);
1134         LASSERT(req->rq_repbuf);
1135
1136         req->rq_repdata_len = req->rq_nob_received;
1137
1138         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1139                 CERROR("replied data length %d too small\n",
1140                        req->rq_nob_received);
1141                 RETURN(-EPROTO);
1142         }
1143
1144         if (req->rq_repbuf->lm_magic == LUSTRE_MSG_MAGIC_V1 ||
1145             req->rq_repbuf->lm_magic == LUSTRE_MSG_MAGIC_V1_SWABBED) {
1146                 /* it's must be null flavor, so our requets also should be
1147                  * in null flavor */
1148                 if (SEC_FLAVOR_POLICY(req->rq_sec_flavor) !=
1149                     SPTLRPC_POLICY_NULL) {
1150                         CERROR("request flavor is %x but reply with null\n",
1151                                req->rq_sec_flavor);
1152                         RETURN(-EPROTO);
1153                 }
1154         } else {
1155                 /* v2 message... */
1156                 ptlrpc_sec_flavor_t tmpf = req->rq_repbuf->lm_secflvr;
1157
1158                 if (req->rq_repbuf->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
1159                         __swab32s(&tmpf);
1160
1161                 if (SEC_FLAVOR_POLICY(tmpf) !=
1162                     SEC_FLAVOR_POLICY(req->rq_sec_flavor)) {
1163                         CERROR("request policy %u while reply with %d\n",
1164                                SEC_FLAVOR_POLICY(req->rq_sec_flavor),
1165                                SEC_FLAVOR_POLICY(tmpf));
1166                         RETURN(-EPROTO);
1167                 }
1168
1169                 if ((SEC_FLAVOR_POLICY(req->rq_sec_flavor) !=
1170                      SPTLRPC_POLICY_NULL) &&
1171                     lustre_unpack_msg(req->rq_repbuf, req->rq_nob_received))
1172                         RETURN(-EPROTO);
1173         }
1174
1175         switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
1176         case SPTLRPC_SVC_NONE:
1177         case SPTLRPC_SVC_AUTH:
1178                 LASSERT(ctx->cc_ops->verify);
1179                 rc = ctx->cc_ops->verify(ctx, req);
1180                 break;
1181         case SPTLRPC_SVC_PRIV:
1182                 LASSERT(ctx->cc_ops->unseal);
1183                 rc = ctx->cc_ops->unseal(ctx, req);
1184                 break;
1185         default:
1186                 LBUG();
1187         }
1188
1189         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1190         RETURN(rc);
1191 }
1192
1193 /**************************************************
1194  * security APIs                                  *
1195  **************************************************/
1196
1197 /*
1198  * let policy module to determine whether take refrence of
1199  * import or not.
1200  */
1201 static
1202 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1203                                        struct ptlrpc_svc_ctx *ctx,
1204                                        __u32 flavor,
1205                                        unsigned long flags)
1206 {
1207         struct ptlrpc_sec_policy *policy;
1208         struct ptlrpc_sec *sec;
1209         ENTRY;
1210
1211         flavor = SEC_FLAVOR_RPC(flavor);
1212
1213         if (ctx) {
1214                 LASSERT(imp->imp_dlm_fake == 1);
1215
1216                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1217                        imp->imp_obd->obd_type->typ_name,
1218                        imp->imp_obd->obd_name,
1219                        sptlrpc_flavor2name(flavor));
1220
1221                 policy = sptlrpc_policy_get(ctx->sc_policy);
1222                 flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1223         } else {
1224                 LASSERT(imp->imp_dlm_fake == 0);
1225
1226                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1227                        imp->imp_obd->obd_type->typ_name,
1228                        imp->imp_obd->obd_name,
1229                        sptlrpc_flavor2name(flavor));
1230
1231                 policy = sptlrpc_flavor2policy(flavor);
1232                 if (!policy) {
1233                         CERROR("invalid flavor 0x%x\n", flavor);
1234                         RETURN(NULL);
1235                 }
1236         }
1237
1238         sec = policy->sp_cops->create_sec(imp, ctx, flavor, flags);
1239         if (sec) {
1240                 atomic_inc(&sec->ps_refcount);
1241
1242                 /* take 1 busy count on behalf of sec itself,
1243                  * balanced in sptlrpc_set_put()
1244                  */
1245                 atomic_inc(&sec->ps_busy);
1246         } else
1247                 sptlrpc_policy_put(policy);
1248
1249         RETURN(sec);
1250 }
1251
1252 static
1253 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1254 {
1255         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1256
1257         LASSERT(policy);
1258         LASSERT(atomic_read(&sec->ps_refcount) == 0);
1259         LASSERT(atomic_read(&sec->ps_busy) == 0);
1260         LASSERT(policy->sp_cops->destroy_sec);
1261
1262         policy->sp_cops->destroy_sec(sec);
1263         sptlrpc_policy_put(policy);
1264 }
1265
1266 static
1267 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1268 {
1269         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1270
1271         if (!atomic_dec_and_test(&sec->ps_refcount)) {
1272                 sptlrpc_policy_put(policy);
1273                 return;
1274         }
1275
1276         ctx_cache_flush(sec, -1, 1, 1);
1277
1278         if (atomic_dec_and_test(&sec->ps_busy))
1279                 sptlrpc_sec_destroy(sec);
1280         else
1281                 CWARN("delay to destroy %s@%p: busy contexts\n",
1282                       policy->sp_name, sec);
1283 }
1284
1285 /*
1286  * return 1 means we should also destroy the sec structure.
1287  * normally return 0
1288  */
1289 static
1290 int sptlrpc_sec_destroy_ctx(struct ptlrpc_sec *sec,
1291                             struct ptlrpc_cli_ctx *ctx)
1292 {
1293         LASSERT(sec == ctx->cc_sec);
1294         LASSERT(atomic_read(&sec->ps_busy));
1295         LASSERT(atomic_read(&ctx->cc_refcount) == 0);
1296         LASSERT(hlist_unhashed(&ctx->cc_hash));
1297         LASSERT(list_empty(&ctx->cc_req_list));
1298         LASSERT(sec->ps_policy->sp_cops->destroy_ctx);
1299
1300         sec->ps_policy->sp_cops->destroy_ctx(sec, ctx);
1301
1302         if (atomic_dec_and_test(&sec->ps_busy)) {
1303                 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1304                 return 1;
1305         }
1306
1307         return 0;
1308 }
1309
1310 /*
1311  * when complete successfully, req->rq_reqmsg should point to the
1312  * right place.
1313  */
1314 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1315 {
1316         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1317         struct ptlrpc_sec_policy *policy;
1318         int rc;
1319
1320         LASSERT(ctx);
1321         LASSERT(atomic_read(&ctx->cc_refcount));
1322         LASSERT(ctx->cc_sec);
1323         LASSERT(ctx->cc_sec->ps_policy);
1324         LASSERT(req->rq_reqmsg == NULL);
1325
1326         policy = ctx->cc_sec->ps_policy;
1327         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1328         if (!rc) {
1329                 LASSERT(req->rq_reqmsg);
1330                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1331
1332                 /* zeroing preallocated buffer */
1333                 if (req->rq_pool)
1334                         memset(req->rq_reqmsg, 0, msgsize);
1335         }
1336
1337         return rc;
1338 }
1339
1340 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1341 {
1342         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1343         struct ptlrpc_sec_policy *policy;
1344
1345         LASSERT(ctx);
1346         LASSERT(atomic_read(&ctx->cc_refcount));
1347         LASSERT(ctx->cc_sec);
1348         LASSERT(ctx->cc_sec->ps_policy);
1349         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1350
1351         policy = ctx->cc_sec->ps_policy;
1352         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1353 }
1354
1355 /*
1356  * NOTE caller must guarantee the buffer size is enough for the enlargement
1357  */
1358 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1359                                   int segment, int newsize)
1360 {
1361         void   *src, *dst;
1362         int     oldsize, oldmsg_size, movesize;
1363
1364         LASSERT(segment < msg->lm_bufcount);
1365         LASSERT(msg->lm_buflens[segment] <= newsize);
1366
1367         if (msg->lm_buflens[segment] == newsize)
1368                 return;
1369
1370         /* nothing to do if we are enlarging the last segment */
1371         if (segment == msg->lm_bufcount - 1) {
1372                 msg->lm_buflens[segment] = newsize;
1373                 return;
1374         }
1375
1376         oldsize = msg->lm_buflens[segment];
1377
1378         src = lustre_msg_buf(msg, segment + 1, 0);
1379         msg->lm_buflens[segment] = newsize;
1380         dst = lustre_msg_buf(msg, segment + 1, 0);
1381         msg->lm_buflens[segment] = oldsize;
1382
1383         /* move from segment + 1 to end segment */
1384         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1385         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1386         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1387         LASSERT(movesize >= 0);
1388
1389         if (movesize)
1390                 memmove(dst, src, movesize);
1391
1392         /* note we don't clear the ares where old data live, not secret */
1393
1394         /* finally set new segment size */
1395         msg->lm_buflens[segment] = newsize;
1396 }
1397 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1398
1399 /*
1400  * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
1401  * will be preserved after enlargement. this must be called after rq_reqmsg has
1402  * been intialized at least.
1403  *
1404  * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
1405  * been changed.
1406  */
1407 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1408                                int segment, int newsize)
1409 {
1410         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1411         struct ptlrpc_sec_cops   *cops;
1412         struct lustre_msg        *msg = req->rq_reqmsg;
1413
1414         LASSERT(ctx);
1415         LASSERT(msg);
1416         LASSERT(msg->lm_bufcount > segment);
1417         LASSERT(msg->lm_buflens[segment] <= newsize);
1418
1419         if (msg->lm_buflens[segment] == newsize)
1420                 return 0;
1421
1422         cops = ctx->cc_sec->ps_policy->sp_cops;
1423         LASSERT(cops->enlarge_reqbuf);
1424         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1425 }
1426 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1427
1428 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1429 {
1430         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1431         struct ptlrpc_sec_policy *policy;
1432         ENTRY;
1433
1434         LASSERT(ctx);
1435         LASSERT(atomic_read(&ctx->cc_refcount));
1436         LASSERT(ctx->cc_sec);
1437         LASSERT(ctx->cc_sec->ps_policy);
1438
1439         if (req->rq_repbuf)
1440                 RETURN(0);
1441
1442         policy = ctx->cc_sec->ps_policy;
1443         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1444 }
1445
1446 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1447 {
1448         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1449         struct ptlrpc_sec_policy *policy;
1450         ENTRY;
1451
1452         LASSERT(ctx);
1453         LASSERT(atomic_read(&ctx->cc_refcount));
1454         LASSERT(ctx->cc_sec);
1455         LASSERT(ctx->cc_sec->ps_policy);
1456         LASSERT(req->rq_repbuf);
1457
1458         policy = ctx->cc_sec->ps_policy;
1459         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1460         EXIT;
1461 }
1462
1463 int sptlrpc_import_get_sec(struct obd_import *imp,
1464                            struct ptlrpc_svc_ctx *ctx,
1465                            __u32 flavor,
1466                            unsigned long flags)
1467 {
1468         struct obd_device *obd = imp->imp_obd;
1469         ENTRY;
1470
1471         LASSERT(obd);
1472         LASSERT(obd->obd_type);
1473
1474         /* old sec might be still there in reconnecting */
1475         if (imp->imp_sec)
1476                 RETURN(0);
1477
1478         imp->imp_sec = sptlrpc_sec_create(imp, ctx, flavor, flags);
1479         if (!imp->imp_sec)
1480                 RETURN(-EINVAL);
1481
1482         RETURN(0);
1483 }
1484
1485 void sptlrpc_import_put_sec(struct obd_import *imp)
1486 {
1487         if (imp->imp_sec == NULL)
1488                 return;
1489
1490         sptlrpc_sec_put(imp->imp_sec);
1491         imp->imp_sec = NULL;
1492 }
1493
1494 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1495 {
1496         if (imp == NULL || imp->imp_sec == NULL)
1497                 return;
1498
1499         /* use 'grace' mode, it's crutial see explain in
1500          * sptlrpc_req_refresh_ctx()
1501          */
1502         ctx_cache_flush(imp->imp_sec, 0, 1, 1);
1503 }
1504
1505 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1506 {
1507         if (imp == NULL || imp->imp_sec == NULL)
1508                 return;
1509
1510         ctx_cache_flush(imp->imp_sec, cfs_current()->uid, 1, 1);
1511 }
1512 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1513
1514 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1515 {
1516         if (imp == NULL || imp->imp_sec == NULL)
1517                 return;
1518
1519         ctx_cache_flush(imp->imp_sec, -1, 0, 1);
1520 }
1521 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1522
1523 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1524                                 struct ptlrpc_cli_ctx *ctx)
1525 {
1526         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1527
1528         if (!policy->sp_cops->install_rctx)
1529                 return 0;
1530         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1531 }
1532
1533 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1534                                 struct ptlrpc_svc_ctx *ctx)
1535 {
1536         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1537
1538         if (!policy->sp_sops->install_rctx)
1539                 return 0;
1540         return policy->sp_sops->install_rctx(imp, ctx);
1541 }
1542
1543 /****************************************
1544  * server side security                 *
1545  ****************************************/
1546
1547 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1548 {
1549         struct ptlrpc_sec_policy *policy;
1550         struct lustre_msg *msg = req->rq_reqbuf;
1551         int rc;
1552         ENTRY;
1553
1554         LASSERT(msg);
1555         LASSERT(req->rq_reqmsg == NULL);
1556         LASSERT(req->rq_repmsg == NULL);
1557
1558         /* 
1559          * in any case we avoid to call unpack_msg() for request of null flavor
1560          * which will later be done by ptlrpc_server_handle_request().
1561          */
1562         if (req->rq_reqdata_len < sizeof(struct lustre_msg)) {
1563                 CERROR("request size %d too small\n", req->rq_reqdata_len);
1564                 RETURN(SECSVC_DROP);
1565         }
1566
1567         if (msg->lm_magic == LUSTRE_MSG_MAGIC_V1 ||
1568             msg->lm_magic == LUSTRE_MSG_MAGIC_V1_SWABBED) {
1569                 req->rq_sec_flavor = SPTLRPC_FLVR_NULL;
1570         } else {
1571                 req->rq_sec_flavor = msg->lm_secflvr;
1572
1573                 if (msg->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
1574                         __swab32s(&req->rq_sec_flavor);
1575
1576                 if ((SEC_FLAVOR_POLICY(req->rq_sec_flavor) !=
1577                      SPTLRPC_POLICY_NULL) &&
1578                     lustre_unpack_msg(msg, req->rq_reqdata_len))
1579                         RETURN(SECSVC_DROP);
1580         }
1581
1582         policy = sptlrpc_flavor2policy(req->rq_sec_flavor);
1583         if (!policy) {
1584                 CERROR("unsupported security flavor %x\n", req->rq_sec_flavor);
1585                 RETURN(SECSVC_DROP);
1586         }
1587
1588         LASSERT(policy->sp_sops->accept);
1589         rc = policy->sp_sops->accept(req);
1590
1591         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1592         sptlrpc_policy_put(policy);
1593
1594         /* FIXME move to proper place */
1595         if (rc == SECSVC_OK) {
1596                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1597
1598                 if (opc == OST_WRITE)
1599                         req->rq_bulk_write = 1;
1600                 else if (opc == OST_READ)
1601                         req->rq_bulk_read = 1;
1602         }
1603
1604         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1605         RETURN(rc);
1606 }
1607
1608 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
1609                          int msglen)
1610 {
1611         struct ptlrpc_sec_policy *policy;
1612         struct ptlrpc_reply_state *rs;
1613         int rc;
1614         ENTRY;
1615
1616         LASSERT(req->rq_svc_ctx);
1617         LASSERT(req->rq_svc_ctx->sc_policy);
1618
1619         policy = req->rq_svc_ctx->sc_policy;
1620         LASSERT(policy->sp_sops->alloc_rs);
1621
1622         rc = policy->sp_sops->alloc_rs(req, msglen);
1623         if (unlikely(rc == -ENOMEM)) {
1624                 /* failed alloc, try emergency pool */
1625                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
1626                 if (rs == NULL)
1627                         RETURN(-ENOMEM);
1628
1629                 req->rq_reply_state = rs;
1630                 rc = policy->sp_sops->alloc_rs(req, msglen);
1631                 if (rc) {
1632                         lustre_put_emerg_rs(rs);
1633                         req->rq_reply_state = NULL;
1634                 }
1635         }
1636
1637         LASSERT(rc != 0 ||
1638                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
1639
1640         RETURN(rc);
1641 }
1642
1643 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
1644 {
1645         struct ptlrpc_sec_policy *policy;
1646         int rc;
1647         ENTRY;
1648
1649         LASSERT(req->rq_svc_ctx);
1650         LASSERT(req->rq_svc_ctx->sc_policy);
1651
1652         policy = req->rq_svc_ctx->sc_policy;
1653         LASSERT(policy->sp_sops->authorize);
1654
1655         rc = policy->sp_sops->authorize(req);
1656         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
1657
1658         RETURN(rc);
1659 }
1660
1661 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
1662 {
1663         struct ptlrpc_sec_policy *policy;
1664         unsigned int prealloc;
1665         ENTRY;
1666
1667         LASSERT(rs->rs_svc_ctx);
1668         LASSERT(rs->rs_svc_ctx->sc_policy);
1669
1670         policy = rs->rs_svc_ctx->sc_policy;
1671         LASSERT(policy->sp_sops->free_rs);
1672
1673         prealloc = rs->rs_prealloc;
1674         policy->sp_sops->free_rs(rs);
1675
1676         if (prealloc)
1677                 lustre_put_emerg_rs(rs);
1678         EXIT;
1679 }
1680
1681 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
1682 {
1683         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1684
1685         if (ctx == NULL)
1686                 return;
1687
1688         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1689         atomic_inc(&ctx->sc_refcount);
1690 }
1691
1692 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
1693 {
1694         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1695
1696         if (ctx == NULL)
1697                 return;
1698
1699         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1700         if (atomic_dec_and_test(&ctx->sc_refcount)) {
1701                 if (ctx->sc_policy->sp_sops->free_ctx)
1702                         ctx->sc_policy->sp_sops->free_ctx(ctx);
1703         }
1704         req->rq_svc_ctx = NULL;
1705 }
1706
1707 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
1708 {
1709         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1710
1711         if (ctx == NULL)
1712                 return;
1713
1714         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1715         if (ctx->sc_policy->sp_sops->invalidate_ctx)
1716                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
1717 }
1718 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
1719
1720 /****************************************
1721  * bulk security                        *
1722  ****************************************/
1723
1724 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1725                           struct ptlrpc_bulk_desc *desc)
1726 {
1727         struct ptlrpc_cli_ctx *ctx;
1728
1729         if (!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor))
1730                 return 0;
1731
1732         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
1733
1734         ctx = req->rq_cli_ctx;
1735         if (ctx->cc_ops->wrap_bulk)
1736                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
1737         return 0;
1738 }
1739 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
1740
1741 static
1742 void pga_to_bulk_desc(int nob, obd_count pg_count, struct brw_page **pga,
1743                       struct ptlrpc_bulk_desc *desc)
1744 {
1745         int i;
1746
1747         LASSERT(pga);
1748         LASSERT(*pga);
1749
1750         for (i = 0; i < pg_count && nob > 0; i++) {
1751 #ifdef __KERNEL__
1752                 desc->bd_iov[i].kiov_page = pga[i]->pg;
1753                 desc->bd_iov[i].kiov_len = pga[i]->count > nob ?
1754                                            nob : pga[i]->count;
1755                 desc->bd_iov[i].kiov_offset = pga[i]->off & ~CFS_PAGE_MASK;
1756 #else
1757 #warning FIXME for liblustre!
1758                 desc->bd_iov[i].iov_base = pga[i]->pg->addr;
1759                 desc->bd_iov[i].iov_len = pga[i]->count > nob ?
1760                                            nob : pga[i]->count;
1761 #endif
1762
1763                 desc->bd_iov_count++;
1764                 nob -= pga[i]->count;
1765         }
1766 }
1767
1768 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1769                                  int nob, obd_count pg_count,
1770                                  struct brw_page **pga)
1771 {
1772         struct ptlrpc_bulk_desc *desc;
1773         struct ptlrpc_cli_ctx *ctx;
1774         int rc = 0;
1775
1776         if (!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor))
1777                 return 0;
1778
1779         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
1780
1781         OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
1782         if (desc == NULL) {
1783                 CERROR("out of memory, can't verify bulk read data\n");
1784                 return -ENOMEM;
1785         }
1786
1787         pga_to_bulk_desc(nob, pg_count, pga, desc);
1788
1789         ctx = req->rq_cli_ctx;
1790         if (ctx->cc_ops->unwrap_bulk)
1791                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
1792
1793         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
1794
1795         return rc;
1796 }
1797 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
1798
1799 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1800                                   struct ptlrpc_bulk_desc *desc)
1801 {
1802         struct ptlrpc_cli_ctx *ctx;
1803
1804         if (!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor))
1805                 return 0;
1806
1807         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
1808
1809         ctx = req->rq_cli_ctx;
1810         if (ctx->cc_ops->unwrap_bulk)
1811                 return ctx->cc_ops->unwrap_bulk(ctx, req, desc);
1812
1813         return 0;
1814 }
1815 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
1816
1817 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1818                           struct ptlrpc_bulk_desc *desc)
1819 {
1820         struct ptlrpc_svc_ctx *ctx;
1821
1822         if (!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor))
1823                 return 0;
1824
1825         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
1826
1827         ctx = req->rq_svc_ctx;
1828         if (ctx->sc_policy->sp_sops->wrap_bulk)
1829                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
1830
1831         return 0;
1832 }
1833 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
1834
1835 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
1836                             struct ptlrpc_bulk_desc *desc)
1837 {
1838         struct ptlrpc_svc_ctx *ctx;
1839
1840         if (!SEC_FLAVOR_HAS_BULK(req->rq_sec_flavor))
1841                 return 0;
1842
1843         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
1844
1845         ctx = req->rq_svc_ctx;
1846         if (ctx->sc_policy->sp_sops->unwrap_bulk);
1847                 return ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
1848
1849         return 0;
1850 }
1851 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
1852
1853
1854 /****************************************
1855  * user descriptor helpers              *
1856  ****************************************/
1857
1858 int sptlrpc_current_user_desc_size(void)
1859 {
1860         int ngroups;
1861
1862 #ifdef __KERNEL__
1863         ngroups = current_ngroups;
1864
1865         if (ngroups > LUSTRE_MAX_GROUPS)
1866                 ngroups = LUSTRE_MAX_GROUPS;
1867 #else
1868         ngroups = 0;
1869 #endif
1870         return sptlrpc_user_desc_size(ngroups);
1871 }
1872 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
1873
1874 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
1875 {
1876         struct ptlrpc_user_desc *pud;
1877
1878         pud = lustre_msg_buf(msg, offset, 0);
1879
1880         pud->pud_uid = cfs_current()->uid;
1881         pud->pud_gid = cfs_current()->gid;
1882         pud->pud_fsuid = cfs_current()->fsuid;
1883         pud->pud_fsgid = cfs_current()->fsgid;
1884         pud->pud_cap = cfs_current()->cap_effective;
1885         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
1886
1887 #ifdef __KERNEL__
1888         task_lock(current);
1889         if (pud->pud_ngroups > current_ngroups)
1890                 pud->pud_ngroups = current_ngroups;
1891         memcpy(pud->pud_groups, cfs_current()->group_info->blocks[0],
1892                pud->pud_ngroups * sizeof(__u32));
1893         task_unlock(current);
1894 #endif
1895
1896         return 0;
1897 }
1898 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
1899
1900 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset)
1901 {
1902         struct ptlrpc_user_desc *pud;
1903         int                      i;
1904
1905         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
1906         if (!pud)
1907                 return -EINVAL;
1908
1909         if (lustre_msg_swabbed(msg)) {
1910                 __swab32s(&pud->pud_uid);
1911                 __swab32s(&pud->pud_gid);
1912                 __swab32s(&pud->pud_fsuid);
1913                 __swab32s(&pud->pud_fsgid);
1914                 __swab32s(&pud->pud_cap);
1915                 __swab32s(&pud->pud_ngroups);
1916         }
1917
1918         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
1919                 CERROR("%u groups is too large\n", pud->pud_ngroups);
1920                 return -EINVAL;
1921         }
1922
1923         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
1924             msg->lm_buflens[offset]) {
1925                 CERROR("%u groups are claimed but bufsize only %u\n",
1926                        pud->pud_ngroups, msg->lm_buflens[offset]);
1927                 return -EINVAL;
1928         }
1929
1930         if (lustre_msg_swabbed(msg)) {
1931                 for (i = 0; i < pud->pud_ngroups; i++)
1932                         __swab32s(&pud->pud_groups[i]);
1933         }
1934
1935         return 0;
1936 }
1937 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
1938
1939 /****************************************
1940  * user supplied flavor string parsing  *
1941  ****************************************/
1942
1943 static
1944 int get_default_flavor(enum lustre_part to_part, struct sec_flavor_config *conf)
1945 {
1946         conf->sfc_bulk_priv = BULK_PRIV_ALG_NULL;
1947         conf->sfc_bulk_csum = BULK_CSUM_ALG_NULL;
1948         conf->sfc_flags = 0;
1949
1950         switch (to_part) {
1951         case LUSTRE_MDT:
1952                 conf->sfc_rpc_flavor = SPTLRPC_FLVR_PLAIN;
1953                 return 0;
1954         case LUSTRE_OST:
1955                 conf->sfc_rpc_flavor = SPTLRPC_FLVR_NULL;
1956                 return 0;
1957         default:
1958                 CERROR("Unknown to lustre part %d, apply defaults\n", to_part);
1959                 conf->sfc_rpc_flavor = SPTLRPC_FLVR_NULL;
1960                 return -EINVAL;
1961         }
1962 }
1963
1964 static
1965 void get_flavor_by_rpc(__u32 rpc_flavor, struct sec_flavor_config *conf)
1966 {
1967         conf->sfc_rpc_flavor = rpc_flavor;
1968         conf->sfc_bulk_priv = BULK_PRIV_ALG_NULL;
1969         conf->sfc_bulk_csum = BULK_CSUM_ALG_NULL;
1970         conf->sfc_flags = 0;
1971
1972         switch (rpc_flavor) {
1973         case SPTLRPC_FLVR_NULL:
1974         case SPTLRPC_FLVR_PLAIN:
1975                 break;
1976         case SPTLRPC_FLVR_KRB5P:
1977                 conf->sfc_bulk_priv = BULK_PRIV_ALG_ARC4;
1978                 /* fall through */
1979         case SPTLRPC_FLVR_KRB5I:
1980                 conf->sfc_bulk_csum = BULK_CSUM_ALG_SHA1;
1981                 break;
1982         default:
1983                 LBUG();
1984         }
1985 }
1986
1987 static
1988 void get_flavor_by_rpc_bulk(__u32 rpc_flavor, int bulk_priv,
1989                             struct sec_flavor_config *conf)
1990 {
1991         if (bulk_priv)
1992                 conf->sfc_bulk_priv = BULK_PRIV_ALG_ARC4;
1993         else
1994                 conf->sfc_bulk_priv = BULK_PRIV_ALG_NULL;
1995
1996         switch (rpc_flavor) {
1997         case SPTLRPC_FLVR_PLAIN:
1998                 conf->sfc_bulk_csum = BULK_CSUM_ALG_MD5;
1999                 break;
2000         case SPTLRPC_FLVR_KRB5I:
2001         case SPTLRPC_FLVR_KRB5P:
2002                 conf->sfc_bulk_csum = BULK_CSUM_ALG_SHA1;
2003                 break;
2004         default:
2005                 LBUG();
2006         }
2007 }
2008
2009 static __u32 __flavors[] = {
2010         SPTLRPC_FLVR_NULL,
2011         SPTLRPC_FLVR_PLAIN,
2012         SPTLRPC_FLVR_KRB5I,
2013         SPTLRPC_FLVR_KRB5P,
2014 };
2015
2016 #define __nflavors      (sizeof(__flavors)/sizeof(__u32))
2017
2018 /*
2019  * flavor string format: rpc[-bulk{n|i|p}[:cksum/enc]]
2020  * for examples:
2021  *  null
2022  *  plain-bulki
2023  *  krb5p-bulkn
2024  *  krb5i-bulkp
2025  *  krb5i-bulkp:sha512/arc4
2026  */
2027 int sptlrpc_parse_flavor(enum lustre_part from_part, enum lustre_part to_part,
2028                          char *str, struct sec_flavor_config *conf)
2029 {
2030         char   *f, *bulk, *alg, *enc;
2031         char    buf[64];
2032         int     i, bulk_priv;
2033         ENTRY;
2034
2035         if (str == NULL) {
2036                 if (get_default_flavor(to_part, conf))
2037                         return -EINVAL;
2038                 goto set_flags;
2039         }
2040
2041         for (i = 0; i < __nflavors; i++) {
2042                 f = sptlrpc_flavor2name(__flavors[i]);
2043                 if (strncmp(str, f, strlen(f)) == 0)
2044                         break;
2045         }
2046
2047         if (i >= __nflavors)
2048                 GOTO(invalid, -EINVAL);
2049
2050         /* prepare local buffer thus we can modify it as we want */
2051         strncpy(buf, str, 64);
2052         buf[64 - 1] = '\0';
2053
2054         /* find bulk string */
2055         bulk = strchr(buf, '-');
2056         if (bulk)
2057                 *bulk++ = '\0';
2058
2059         /* now the first part must equal to rpc flavor name */
2060         if (strcmp(buf, f) != 0)
2061                 GOTO(invalid, -EINVAL);
2062
2063         get_flavor_by_rpc(__flavors[i], conf);
2064
2065         if (bulk == NULL)
2066                 goto set_flags;
2067
2068         /* null flavor should not have any suffix */
2069         if (__flavors[i] == SPTLRPC_FLVR_NULL)
2070                 GOTO(invalid, -EINVAL);
2071
2072         /* find bulk algorithm string */
2073         alg = strchr(bulk, ':');
2074         if (alg)
2075                 *alg++ = '\0';
2076
2077         /* verify bulk section */
2078         if (strcmp(bulk, "bulkn") == 0) {
2079                 conf->sfc_bulk_csum = BULK_CSUM_ALG_NULL;
2080                 conf->sfc_bulk_priv = BULK_PRIV_ALG_NULL;
2081                 goto set_flags;
2082         }
2083
2084         if (strcmp(bulk, "bulki") == 0)
2085                 bulk_priv = 0;
2086         else if (strcmp(bulk, "bulkp") == 0)
2087                 bulk_priv = 1;
2088         else
2089                 GOTO(invalid, -EINVAL);
2090
2091         /* plain policy dosen't support bulk encryption */
2092         if (bulk_priv && __flavors[i] == SPTLRPC_FLVR_PLAIN)
2093                 GOTO(invalid, -EINVAL);
2094
2095         get_flavor_by_rpc_bulk(__flavors[i], bulk_priv, conf);
2096
2097         if (alg == NULL)
2098                 goto set_flags;
2099
2100         /* find encryption algorithm string */
2101         enc = strchr(alg, '/');
2102         if (enc)
2103                 *enc++ = '\0';
2104
2105         /* bulk combination sanity check */
2106         if ((bulk_priv && enc == NULL) || (bulk_priv == 0 && enc))
2107                 GOTO(invalid, -EINVAL);
2108
2109         /* checksum algorithm */
2110         for (i = 0; i < BULK_CSUM_ALG_MAX; i++) {
2111                 if (strcmp(alg, sptlrpc_bulk_csum_alg2name(i)) == 0) {
2112                         conf->sfc_bulk_csum = i;
2113                         break;
2114                 }
2115         }
2116         if (i >= BULK_CSUM_ALG_MAX)
2117                 GOTO(invalid, -EINVAL);
2118
2119         /* privacy algorithm */
2120         if (enc) {
2121                 if (strcmp(enc, "arc4") != 0)
2122                         GOTO(invalid, -EINVAL);
2123                 conf->sfc_bulk_priv = BULK_PRIV_ALG_ARC4;
2124         }
2125
2126 set_flags:
2127         /* * set ROOTONLY flag:
2128          *   - to OST
2129          *   - from MDT to MDT
2130          * * set BULK flag for:
2131          *   - from CLI to OST
2132          */
2133         if (to_part == LUSTRE_OST ||
2134             (from_part == LUSTRE_MDT && to_part == LUSTRE_MDT))
2135                 conf->sfc_flags |= PTLRPC_SEC_FL_ROOTONLY;
2136         if (from_part == LUSTRE_CLI && to_part == LUSTRE_OST)
2137                 conf->sfc_flags |= PTLRPC_SEC_FL_BULK;
2138
2139 #ifdef __BIG_ENDIAN
2140         __swab32s(&conf->sfc_rpc_flavor);
2141         __swab32s(&conf->sfc_bulk_csum);
2142         __swab32s(&conf->sfc_bulk_priv);
2143         __swab32s(&conf->sfc_flags);
2144 #endif
2145         return 0;
2146 invalid:
2147         CERROR("invalid flavor string: %s\n", str);
2148         return -EINVAL;
2149 }
2150 EXPORT_SYMBOL(sptlrpc_parse_flavor);
2151
2152 /****************************************
2153  * misc helpers                         *
2154  ****************************************/
2155
2156 const char * sec2target_str(struct ptlrpc_sec *sec)
2157 {
2158         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2159                 return "*";
2160         if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
2161                 return "c";
2162         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2163 }
2164 EXPORT_SYMBOL(sec2target_str);
2165
2166 /****************************************
2167  * initialize/finalize                  *
2168  ****************************************/
2169
2170 int sptlrpc_init(void)
2171 {
2172         int rc;
2173
2174         rc = sptlrpc_enc_pool_init();
2175         if (rc)
2176                 goto out;
2177
2178         rc = sptlrpc_null_init();
2179         if (rc)
2180                 goto out_pool;
2181
2182         rc = sptlrpc_plain_init();
2183         if (rc)
2184                 goto out_null;
2185
2186         rc = sptlrpc_lproc_init();
2187         if (rc)
2188                 goto out_plain;
2189
2190         return 0;
2191
2192 out_plain:
2193         sptlrpc_plain_fini();
2194 out_null:
2195         sptlrpc_null_fini();
2196 out_pool:
2197         sptlrpc_enc_pool_fini();
2198 out:
2199         return rc;
2200 }
2201
2202 void sptlrpc_fini(void)
2203 {
2204         sptlrpc_lproc_fini();
2205         sptlrpc_plain_fini();
2206         sptlrpc_null_fini();
2207         sptlrpc_enc_pool_fini();
2208 }