Whamcloud - gitweb
b6f7d07d669ab4e840c4dab3ec40a07026fac1e0
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42
43 #include <libcfs/libcfs.h>
44 #ifndef __KERNEL__
45 #include <liblustre.h>
46 #include <libcfs/list.h>
47 #else
48 #include <linux/crypto.h>
49 #include <linux/key.h>
50 #endif
51
52 #include <obd.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_net.h>
56 #include <lustre_import.h>
57 #include <lustre_dlm.h>
58 #include <lustre_sec.h>
59
60 #include "ptlrpc_internal.h"
61
62 /***********************************************
63  * policy registers                            *
64  ***********************************************/
65
66 static rwlock_t policy_lock;
67 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
68         NULL,
69 };
70
71 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
72 {
73         __u16 number = policy->sp_policy;
74
75         LASSERT(policy->sp_name);
76         LASSERT(policy->sp_cops);
77         LASSERT(policy->sp_sops);
78
79         if (number >= SPTLRPC_POLICY_MAX)
80                 return -EINVAL;
81
82         write_lock(&policy_lock);
83         if (unlikely(policies[number])) {
84                 write_unlock(&policy_lock);
85                 return -EALREADY;
86         }
87         policies[number] = policy;
88         write_unlock(&policy_lock);
89
90         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
91         return 0;
92 }
93 EXPORT_SYMBOL(sptlrpc_register_policy);
94
95 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
96 {
97         __u16 number = policy->sp_policy;
98
99         LASSERT(number < SPTLRPC_POLICY_MAX);
100
101         write_lock(&policy_lock);
102         if (unlikely(policies[number] == NULL)) {
103                 write_unlock(&policy_lock);
104                 CERROR("%s: already unregistered\n", policy->sp_name);
105                 return -EINVAL;
106         }
107
108         LASSERT(policies[number] == policy);
109         policies[number] = NULL;
110         write_unlock(&policy_lock);
111
112         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
113         return 0;
114 }
115 EXPORT_SYMBOL(sptlrpc_unregister_policy);
116
117 static
118 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
119 {
120         static DEFINE_MUTEX(load_mutex);
121         static cfs_atomic_t       loaded = CFS_ATOMIC_INIT(0);
122         struct ptlrpc_sec_policy *policy;
123         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
124         __u16                     flag = 0;
125
126         if (number >= SPTLRPC_POLICY_MAX)
127                 return NULL;
128
129         while (1) {
130                 read_lock(&policy_lock);
131                 policy = policies[number];
132                 if (policy && !try_module_get(policy->sp_owner))
133                         policy = NULL;
134                 if (policy == NULL)
135                         flag = cfs_atomic_read(&loaded);
136                 read_unlock(&policy_lock);
137
138                 if (policy != NULL || flag != 0 ||
139                     number != SPTLRPC_POLICY_GSS)
140                         break;
141
142                 /* try to load gss module, once */
143                 mutex_lock(&load_mutex);
144                 if (cfs_atomic_read(&loaded) == 0) {
145                         if (request_module("ptlrpc_gss") == 0)
146                                 CDEBUG(D_SEC,
147                                        "module ptlrpc_gss loaded on demand\n");
148                         else
149                                 CERROR("Unable to load module ptlrpc_gss\n");
150
151                         cfs_atomic_set(&loaded, 1);
152                 }
153                 mutex_unlock(&load_mutex);
154         }
155
156         return policy;
157 }
158
159 __u32 sptlrpc_name2flavor_base(const char *name)
160 {
161         if (!strcmp(name, "null"))
162                 return SPTLRPC_FLVR_NULL;
163         if (!strcmp(name, "plain"))
164                 return SPTLRPC_FLVR_PLAIN;
165         if (!strcmp(name, "krb5n"))
166                 return SPTLRPC_FLVR_KRB5N;
167         if (!strcmp(name, "krb5a"))
168                 return SPTLRPC_FLVR_KRB5A;
169         if (!strcmp(name, "krb5i"))
170                 return SPTLRPC_FLVR_KRB5I;
171         if (!strcmp(name, "krb5p"))
172                 return SPTLRPC_FLVR_KRB5P;
173
174         return SPTLRPC_FLVR_INVALID;
175 }
176 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
177
178 const char *sptlrpc_flavor2name_base(__u32 flvr)
179 {
180         __u32   base = SPTLRPC_FLVR_BASE(flvr);
181
182         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
183                 return "null";
184         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
185                 return "plain";
186         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
187                 return "krb5n";
188         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
189                 return "krb5a";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
191                 return "krb5i";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
193                 return "krb5p";
194
195         CERROR("invalid wire flavor 0x%x\n", flvr);
196         return "invalid";
197 }
198 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
199
200 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
201                                char *buf, int bufsize)
202 {
203         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
204                 snprintf(buf, bufsize, "hash:%s",
205                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
206         else
207                 snprintf(buf, bufsize, "%s",
208                          sptlrpc_flavor2name_base(sf->sf_rpc));
209
210         buf[bufsize - 1] = '\0';
211         return buf;
212 }
213 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
214
215 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
216 {
217         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
218
219         /*
220          * currently we don't support customized bulk specification for
221          * flavors other than plain
222          */
223         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
224                 char bspec[16];
225
226                 bspec[0] = '-';
227                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
228                 strncat(buf, bspec, bufsize);
229         }
230
231         buf[bufsize - 1] = '\0';
232         return buf;
233 }
234 EXPORT_SYMBOL(sptlrpc_flavor2name);
235
236 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
237 {
238         buf[0] = '\0';
239
240         if (flags & PTLRPC_SEC_FL_REVERSE)
241                 strlcat(buf, "reverse,", bufsize);
242         if (flags & PTLRPC_SEC_FL_ROOTONLY)
243                 strlcat(buf, "rootonly,", bufsize);
244         if (flags & PTLRPC_SEC_FL_UDESC)
245                 strlcat(buf, "udesc,", bufsize);
246         if (flags & PTLRPC_SEC_FL_BULK)
247                 strlcat(buf, "bulk,", bufsize);
248         if (buf[0] == '\0')
249                 strlcat(buf, "-,", bufsize);
250
251         return buf;
252 }
253 EXPORT_SYMBOL(sptlrpc_secflags2str);
254
255 /**************************************************
256  * client context APIs                            *
257  **************************************************/
258
259 static
260 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
261 {
262         struct vfs_cred vcred;
263         int create = 1, remove_dead = 1;
264
265         LASSERT(sec);
266         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
267
268         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
269                                      PTLRPC_SEC_FL_ROOTONLY)) {
270                 vcred.vc_uid = 0;
271                 vcred.vc_gid = 0;
272                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
273                         create = 0;
274                         remove_dead = 0;
275                 }
276         } else {
277                 vcred.vc_uid = current_uid();
278                 vcred.vc_gid = current_gid();
279         }
280
281         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
282                                                    remove_dead);
283 }
284
285 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
286 {
287         cfs_atomic_inc(&ctx->cc_refcount);
288         return ctx;
289 }
290 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
291
292 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
293 {
294         struct ptlrpc_sec *sec = ctx->cc_sec;
295
296         LASSERT(sec);
297         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
298
299         if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
300                 return;
301
302         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
303 }
304 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
305
306 /**
307  * Expire the client context immediately.
308  *
309  * \pre Caller must hold at least 1 reference on the \a ctx.
310  */
311 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
312 {
313         LASSERT(ctx->cc_ops->die);
314         ctx->cc_ops->die(ctx, 0);
315 }
316 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
317
318 /**
319  * To wake up the threads who are waiting for this client context. Called
320  * after some status change happened on \a ctx.
321  */
322 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
323 {
324         struct ptlrpc_request *req, *next;
325
326         spin_lock(&ctx->cc_lock);
327         cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
328                                      rq_ctx_chain) {
329                 cfs_list_del_init(&req->rq_ctx_chain);
330                 ptlrpc_client_wake_req(req);
331         }
332         spin_unlock(&ctx->cc_lock);
333 }
334 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
335
336 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
337 {
338         LASSERT(ctx->cc_ops);
339
340         if (ctx->cc_ops->display == NULL)
341                 return 0;
342
343         return ctx->cc_ops->display(ctx, buf, bufsize);
344 }
345
346 static int import_sec_check_expire(struct obd_import *imp)
347 {
348         int     adapt = 0;
349
350         spin_lock(&imp->imp_lock);
351         if (imp->imp_sec_expire &&
352             imp->imp_sec_expire < cfs_time_current_sec()) {
353                 adapt = 1;
354                 imp->imp_sec_expire = 0;
355         }
356         spin_unlock(&imp->imp_lock);
357
358         if (!adapt)
359                 return 0;
360
361         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
362         return sptlrpc_import_sec_adapt(imp, NULL, 0);
363 }
364
365 static int import_sec_validate_get(struct obd_import *imp,
366                                    struct ptlrpc_sec **sec)
367 {
368         int     rc;
369
370         if (unlikely(imp->imp_sec_expire)) {
371                 rc = import_sec_check_expire(imp);
372                 if (rc)
373                         return rc;
374         }
375
376         *sec = sptlrpc_import_sec_ref(imp);
377         if (*sec == NULL) {
378                 CERROR("import %p (%s) with no sec\n",
379                        imp, ptlrpc_import_state_name(imp->imp_state));
380                 return -EACCES;
381         }
382
383         if (unlikely((*sec)->ps_dying)) {
384                 CERROR("attempt to use dying sec %p\n", sec);
385                 sptlrpc_sec_put(*sec);
386                 return -EACCES;
387         }
388
389         return 0;
390 }
391
392 /**
393  * Given a \a req, find or allocate a appropriate context for it.
394  * \pre req->rq_cli_ctx == NULL.
395  *
396  * \retval 0 succeed, and req->rq_cli_ctx is set.
397  * \retval -ev error number, and req->rq_cli_ctx == NULL.
398  */
399 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
400 {
401         struct obd_import *imp = req->rq_import;
402         struct ptlrpc_sec *sec;
403         int                rc;
404         ENTRY;
405
406         LASSERT(!req->rq_cli_ctx);
407         LASSERT(imp);
408
409         rc = import_sec_validate_get(imp, &sec);
410         if (rc)
411                 RETURN(rc);
412
413         req->rq_cli_ctx = get_my_ctx(sec);
414
415         sptlrpc_sec_put(sec);
416
417         if (!req->rq_cli_ctx) {
418                 CERROR("req %p: fail to get context\n", req);
419                 RETURN(-ENOMEM);
420         }
421
422         RETURN(0);
423 }
424
425 /**
426  * Drop the context for \a req.
427  * \pre req->rq_cli_ctx != NULL.
428  * \post req->rq_cli_ctx == NULL.
429  *
430  * If \a sync == 0, this function should return quickly without sleep;
431  * otherwise it might trigger and wait for the whole process of sending
432  * an context-destroying rpc to server.
433  */
434 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
435 {
436         ENTRY;
437
438         LASSERT(req);
439         LASSERT(req->rq_cli_ctx);
440
441         /* request might be asked to release earlier while still
442          * in the context waiting list.
443          */
444         if (!cfs_list_empty(&req->rq_ctx_chain)) {
445                 spin_lock(&req->rq_cli_ctx->cc_lock);
446                 cfs_list_del_init(&req->rq_ctx_chain);
447                 spin_unlock(&req->rq_cli_ctx->cc_lock);
448         }
449
450         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
451         req->rq_cli_ctx = NULL;
452         EXIT;
453 }
454
455 static
456 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
457                            struct ptlrpc_cli_ctx *oldctx,
458                            struct ptlrpc_cli_ctx *newctx)
459 {
460         struct sptlrpc_flavor   old_flvr;
461         char                   *reqmsg = NULL; /* to workaround old gcc */
462         int                     reqmsg_size;
463         int                     rc = 0;
464
465         LASSERT(req->rq_reqmsg);
466         LASSERT(req->rq_reqlen);
467         LASSERT(req->rq_replen);
468
469         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
470                "switch sec %p(%s) -> %p(%s)\n", req,
471                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
472                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
473                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
474                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
475
476         /* save flavor */
477         old_flvr = req->rq_flvr;
478
479         /* save request message */
480         reqmsg_size = req->rq_reqlen;
481         if (reqmsg_size != 0) {
482                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
483                 if (reqmsg == NULL)
484                         return -ENOMEM;
485                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
486         }
487
488         /* release old req/rep buf */
489         req->rq_cli_ctx = oldctx;
490         sptlrpc_cli_free_reqbuf(req);
491         sptlrpc_cli_free_repbuf(req);
492         req->rq_cli_ctx = newctx;
493
494         /* recalculate the flavor */
495         sptlrpc_req_set_flavor(req, 0);
496
497         /* alloc new request buffer
498          * we don't need to alloc reply buffer here, leave it to the
499          * rest procedure of ptlrpc */
500         if (reqmsg_size != 0) {
501                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
502                 if (!rc) {
503                         LASSERT(req->rq_reqmsg);
504                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
505                 } else {
506                         CWARN("failed to alloc reqbuf: %d\n", rc);
507                         req->rq_flvr = old_flvr;
508                 }
509
510                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
511         }
512         return rc;
513 }
514
515 /**
516  * If current context of \a req is dead somehow, e.g. we just switched flavor
517  * thus marked original contexts dead, we'll find a new context for it. if
518  * no switch is needed, \a req will end up with the same context.
519  *
520  * \note a request must have a context, to keep other parts of code happy.
521  * In any case of failure during the switching, we must restore the old one.
522  */
523 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
524 {
525         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
526         struct ptlrpc_cli_ctx *newctx;
527         int                    rc;
528         ENTRY;
529
530         LASSERT(oldctx);
531
532         sptlrpc_cli_ctx_get(oldctx);
533         sptlrpc_req_put_ctx(req, 0);
534
535         rc = sptlrpc_req_get_ctx(req);
536         if (unlikely(rc)) {
537                 LASSERT(!req->rq_cli_ctx);
538
539                 /* restore old ctx */
540                 req->rq_cli_ctx = oldctx;
541                 RETURN(rc);
542         }
543
544         newctx = req->rq_cli_ctx;
545         LASSERT(newctx);
546
547         if (unlikely(newctx == oldctx && 
548                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
549                 /*
550                  * still get the old dead ctx, usually means system too busy
551                  */
552                 CDEBUG(D_SEC,
553                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
554                        newctx, newctx->cc_flags);
555
556                 schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
557                                                    HZ);
558         } else {
559                 /*
560                  * it's possible newctx == oldctx if we're switching
561                  * subflavor with the same sec.
562                  */
563                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
564                 if (rc) {
565                         /* restore old ctx */
566                         sptlrpc_req_put_ctx(req, 0);
567                         req->rq_cli_ctx = oldctx;
568                         RETURN(rc);
569                 }
570
571                 LASSERT(req->rq_cli_ctx == newctx);
572         }
573
574         sptlrpc_cli_ctx_put(oldctx, 1);
575         RETURN(0);
576 }
577 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
578
579 static
580 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
581 {
582         if (cli_ctx_is_refreshed(ctx))
583                 return 1;
584         return 0;
585 }
586
587 static
588 int ctx_refresh_timeout(void *data)
589 {
590         struct ptlrpc_request *req = data;
591         int rc;
592
593         /* conn_cnt is needed in expire_one_request */
594         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
595
596         rc = ptlrpc_expire_one_request(req, 1);
597         /* if we started recovery, we should mark this ctx dead; otherwise
598          * in case of lgssd died nobody would retire this ctx, following
599          * connecting will still find the same ctx thus cause deadlock.
600          * there's an assumption that expire time of the request should be
601          * later than the context refresh expire time.
602          */
603         if (rc == 0)
604                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
605         return rc;
606 }
607
608 static
609 void ctx_refresh_interrupt(void *data)
610 {
611         struct ptlrpc_request *req = data;
612
613         spin_lock(&req->rq_lock);
614         req->rq_intr = 1;
615         spin_unlock(&req->rq_lock);
616 }
617
618 static
619 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
620 {
621         spin_lock(&ctx->cc_lock);
622         if (!cfs_list_empty(&req->rq_ctx_chain))
623                 cfs_list_del_init(&req->rq_ctx_chain);
624         spin_unlock(&ctx->cc_lock);
625 }
626
627 /**
628  * To refresh the context of \req, if it's not up-to-date.
629  * \param timeout
630  * - < 0: don't wait
631  * - = 0: wait until success or fatal error occur
632  * - > 0: timeout value (in seconds)
633  *
634  * The status of the context could be subject to be changed by other threads
635  * at any time. We allow this race, but once we return with 0, the caller will
636  * suppose it's uptodated and keep using it until the owning rpc is done.
637  *
638  * \retval 0 only if the context is uptodated.
639  * \retval -ev error number.
640  */
641 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
642 {
643         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
644         struct ptlrpc_sec      *sec;
645         struct l_wait_info      lwi;
646         int                     rc;
647         ENTRY;
648
649         LASSERT(ctx);
650
651         if (req->rq_ctx_init || req->rq_ctx_fini)
652                 RETURN(0);
653
654         /*
655          * during the process a request's context might change type even
656          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
657          * everything
658          */
659 again:
660         rc = import_sec_validate_get(req->rq_import, &sec);
661         if (rc)
662                 RETURN(rc);
663
664         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
665                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
666                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
667                 req_off_ctx_list(req, ctx);
668                 sptlrpc_req_replace_dead_ctx(req);
669                 ctx = req->rq_cli_ctx;
670         }
671         sptlrpc_sec_put(sec);
672
673         if (cli_ctx_is_eternal(ctx))
674                 RETURN(0);
675
676         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
677                 LASSERT(ctx->cc_ops->refresh);
678                 ctx->cc_ops->refresh(ctx);
679         }
680         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
681
682         LASSERT(ctx->cc_ops->validate);
683         if (ctx->cc_ops->validate(ctx) == 0) {
684                 req_off_ctx_list(req, ctx);
685                 RETURN(0);
686         }
687
688         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
689                 spin_lock(&req->rq_lock);
690                 req->rq_err = 1;
691                 spin_unlock(&req->rq_lock);
692                 req_off_ctx_list(req, ctx);
693                 RETURN(-EPERM);
694         }
695
696         /*
697          * There's a subtle issue for resending RPCs, suppose following
698          * situation:
699          *  1. the request was sent to server.
700          *  2. recovery was kicked start, after finished the request was
701          *     marked as resent.
702          *  3. resend the request.
703          *  4. old reply from server received, we accept and verify the reply.
704          *     this has to be success, otherwise the error will be aware
705          *     by application.
706          *  5. new reply from server received, dropped by LNet.
707          *
708          * Note the xid of old & new request is the same. We can't simply
709          * change xid for the resent request because the server replies on
710          * it for reply reconstruction.
711          *
712          * Commonly the original context should be uptodate because we
713          * have a expiry nice time; server will keep its context because
714          * we at least hold a ref of old context which prevent context
715          * destroying RPC being sent. So server still can accept the request
716          * and finish the RPC. But if that's not the case:
717          *  1. If server side context has been trimmed, a NO_CONTEXT will
718          *     be returned, gss_cli_ctx_verify/unseal will switch to new
719          *     context by force.
720          *  2. Current context never be refreshed, then we are fine: we
721          *     never really send request with old context before.
722          */
723         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
724             unlikely(req->rq_reqmsg) &&
725             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
726                 req_off_ctx_list(req, ctx);
727                 RETURN(0);
728         }
729
730         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
731                 req_off_ctx_list(req, ctx);
732                 /*
733                  * don't switch ctx if import was deactivated
734                  */
735                 if (req->rq_import->imp_deactive) {
736                         spin_lock(&req->rq_lock);
737                         req->rq_err = 1;
738                         spin_unlock(&req->rq_lock);
739                         RETURN(-EINTR);
740                 }
741
742                 rc = sptlrpc_req_replace_dead_ctx(req);
743                 if (rc) {
744                         LASSERT(ctx == req->rq_cli_ctx);
745                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
746                                req, ctx, rc);
747                         spin_lock(&req->rq_lock);
748                         req->rq_err = 1;
749                         spin_unlock(&req->rq_lock);
750                         RETURN(rc);
751                 }
752
753                 ctx = req->rq_cli_ctx;
754                 goto again;
755         }
756
757         /*
758          * Now we're sure this context is during upcall, add myself into
759          * waiting list
760          */
761         spin_lock(&ctx->cc_lock);
762         if (cfs_list_empty(&req->rq_ctx_chain))
763                 cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
764         spin_unlock(&ctx->cc_lock);
765
766         if (timeout < 0)
767                 RETURN(-EWOULDBLOCK);
768
769         /* Clear any flags that may be present from previous sends */
770         LASSERT(req->rq_receiving_reply == 0);
771         spin_lock(&req->rq_lock);
772         req->rq_err = 0;
773         req->rq_timedout = 0;
774         req->rq_resend = 0;
775         req->rq_restart = 0;
776         spin_unlock(&req->rq_lock);
777
778         lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
779                                ctx_refresh_interrupt, req);
780         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
781
782         /*
783          * following cases could lead us here:
784          * - successfully refreshed;
785          * - interrupted;
786          * - timedout, and we don't want recover from the failure;
787          * - timedout, and waked up upon recovery finished;
788          * - someone else mark this ctx dead by force;
789          * - someone invalidate the req and call ptlrpc_client_wake_req(),
790          *   e.g. ptlrpc_abort_inflight();
791          */
792         if (!cli_ctx_is_refreshed(ctx)) {
793                 /* timed out or interruptted */
794                 req_off_ctx_list(req, ctx);
795
796                 LASSERT(rc != 0);
797                 RETURN(rc);
798         }
799
800         goto again;
801 }
802
803 /**
804  * Initialize flavor settings for \a req, according to \a opcode.
805  *
806  * \note this could be called in two situations:
807  * - new request from ptlrpc_pre_req(), with proper @opcode
808  * - old request which changed ctx in the middle, with @opcode == 0
809  */
810 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
811 {
812         struct ptlrpc_sec *sec;
813
814         LASSERT(req->rq_import);
815         LASSERT(req->rq_cli_ctx);
816         LASSERT(req->rq_cli_ctx->cc_sec);
817         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
818
819         /* special security flags accoding to opcode */
820         switch (opcode) {
821         case OST_READ:
822         case MDS_READPAGE:
823         case MGS_CONFIG_READ:
824         case OBD_IDX_READ:
825                 req->rq_bulk_read = 1;
826                 break;
827         case OST_WRITE:
828         case MDS_WRITEPAGE:
829                 req->rq_bulk_write = 1;
830                 break;
831         case SEC_CTX_INIT:
832                 req->rq_ctx_init = 1;
833                 break;
834         case SEC_CTX_FINI:
835                 req->rq_ctx_fini = 1;
836                 break;
837         case 0:
838                 /* init/fini rpc won't be resend, so can't be here */
839                 LASSERT(req->rq_ctx_init == 0);
840                 LASSERT(req->rq_ctx_fini == 0);
841
842                 /* cleanup flags, which should be recalculated */
843                 req->rq_pack_udesc = 0;
844                 req->rq_pack_bulk = 0;
845                 break;
846         }
847
848         sec = req->rq_cli_ctx->cc_sec;
849
850         spin_lock(&sec->ps_lock);
851         req->rq_flvr = sec->ps_flvr;
852         spin_unlock(&sec->ps_lock);
853
854         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
855          * destruction rpc */
856         if (unlikely(req->rq_ctx_init))
857                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
858         else if (unlikely(req->rq_ctx_fini))
859                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
860
861         /* user descriptor flag, null security can't do it anyway */
862         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
863             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
864                 req->rq_pack_udesc = 1;
865
866         /* bulk security flag */
867         if ((req->rq_bulk_read || req->rq_bulk_write) &&
868             sptlrpc_flavor_has_bulk(&req->rq_flvr))
869                 req->rq_pack_bulk = 1;
870 }
871
872 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
873 {
874         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
875                 return;
876
877         LASSERT(req->rq_clrbuf);
878         if (req->rq_pool || !req->rq_reqbuf)
879                 return;
880
881         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
882         req->rq_reqbuf = NULL;
883         req->rq_reqbuf_len = 0;
884 }
885
886 /**
887  * Given an import \a imp, check whether current user has a valid context
888  * or not. We may create a new context and try to refresh it, and try
889  * repeatedly try in case of non-fatal errors. Return 0 means success.
890  */
891 int sptlrpc_import_check_ctx(struct obd_import *imp)
892 {
893         struct ptlrpc_sec     *sec;
894         struct ptlrpc_cli_ctx *ctx;
895         struct ptlrpc_request *req = NULL;
896         int rc;
897         ENTRY;
898
899         cfs_might_sleep();
900
901         sec = sptlrpc_import_sec_ref(imp);
902         ctx = get_my_ctx(sec);
903         sptlrpc_sec_put(sec);
904
905         if (!ctx)
906                 RETURN(-ENOMEM);
907
908         if (cli_ctx_is_eternal(ctx) ||
909             ctx->cc_ops->validate(ctx) == 0) {
910                 sptlrpc_cli_ctx_put(ctx, 1);
911                 RETURN(0);
912         }
913
914         if (cli_ctx_is_error(ctx)) {
915                 sptlrpc_cli_ctx_put(ctx, 1);
916                 RETURN(-EACCES);
917         }
918
919         req = ptlrpc_request_cache_alloc(__GFP_IO);
920         if (!req)
921                 RETURN(-ENOMEM);
922
923         spin_lock_init(&req->rq_lock);
924         cfs_atomic_set(&req->rq_refcount, 10000);
925         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
926         init_waitqueue_head(&req->rq_reply_waitq);
927         init_waitqueue_head(&req->rq_set_waitq);
928         req->rq_import = imp;
929         req->rq_flvr = sec->ps_flvr;
930         req->rq_cli_ctx = ctx;
931
932         rc = sptlrpc_req_refresh_ctx(req, 0);
933         LASSERT(cfs_list_empty(&req->rq_ctx_chain));
934         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
935         ptlrpc_request_cache_free(req);
936
937         RETURN(rc);
938 }
939
940 /**
941  * Used by ptlrpc client, to perform the pre-defined security transformation
942  * upon the request message of \a req. After this function called,
943  * req->rq_reqmsg is still accessible as clear text.
944  */
945 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
946 {
947         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
948         int rc = 0;
949         ENTRY;
950
951         LASSERT(ctx);
952         LASSERT(ctx->cc_sec);
953         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
954
955         /* we wrap bulk request here because now we can be sure
956          * the context is uptodate.
957          */
958         if (req->rq_bulk) {
959                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
960                 if (rc)
961                         RETURN(rc);
962         }
963
964         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
965         case SPTLRPC_SVC_NULL:
966         case SPTLRPC_SVC_AUTH:
967         case SPTLRPC_SVC_INTG:
968                 LASSERT(ctx->cc_ops->sign);
969                 rc = ctx->cc_ops->sign(ctx, req);
970                 break;
971         case SPTLRPC_SVC_PRIV:
972                 LASSERT(ctx->cc_ops->seal);
973                 rc = ctx->cc_ops->seal(ctx, req);
974                 break;
975         default:
976                 LBUG();
977         }
978
979         if (rc == 0) {
980                 LASSERT(req->rq_reqdata_len);
981                 LASSERT(req->rq_reqdata_len % 8 == 0);
982                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
983         }
984
985         RETURN(rc);
986 }
987
988 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
989 {
990         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
991         int                    rc;
992         ENTRY;
993
994         LASSERT(ctx);
995         LASSERT(ctx->cc_sec);
996         LASSERT(req->rq_repbuf);
997         LASSERT(req->rq_repdata);
998         LASSERT(req->rq_repmsg == NULL);
999
1000         req->rq_rep_swab_mask = 0;
1001
1002         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1003         switch (rc) {
1004         case 1:
1005                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1006         case 0:
1007                 break;
1008         default:
1009                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1010                 RETURN(-EPROTO);
1011         }
1012
1013         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1014                 CERROR("replied data length %d too small\n",
1015                        req->rq_repdata_len);
1016                 RETURN(-EPROTO);
1017         }
1018
1019         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1020             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1021                 CERROR("reply policy %u doesn't match request policy %u\n",
1022                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1023                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1024                 RETURN(-EPROTO);
1025         }
1026
1027         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1028         case SPTLRPC_SVC_NULL:
1029         case SPTLRPC_SVC_AUTH:
1030         case SPTLRPC_SVC_INTG:
1031                 LASSERT(ctx->cc_ops->verify);
1032                 rc = ctx->cc_ops->verify(ctx, req);
1033                 break;
1034         case SPTLRPC_SVC_PRIV:
1035                 LASSERT(ctx->cc_ops->unseal);
1036                 rc = ctx->cc_ops->unseal(ctx, req);
1037                 break;
1038         default:
1039                 LBUG();
1040         }
1041         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1042
1043         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1044             !req->rq_ctx_init)
1045                 req->rq_rep_swab_mask = 0;
1046         RETURN(rc);
1047 }
1048
1049 /**
1050  * Used by ptlrpc client, to perform security transformation upon the reply
1051  * message of \a req. After return successfully, req->rq_repmsg points to
1052  * the reply message in clear text.
1053  *
1054  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1055  * going to change.
1056  */
1057 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1058 {
1059         LASSERT(req->rq_repbuf);
1060         LASSERT(req->rq_repdata == NULL);
1061         LASSERT(req->rq_repmsg == NULL);
1062         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1063
1064         if (req->rq_reply_off == 0 &&
1065             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1066                 CERROR("real reply with offset 0\n");
1067                 return -EPROTO;
1068         }
1069
1070         if (req->rq_reply_off % 8 != 0) {
1071                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1072                 return -EPROTO;
1073         }
1074
1075         req->rq_repdata = (struct lustre_msg *)
1076                                 (req->rq_repbuf + req->rq_reply_off);
1077         req->rq_repdata_len = req->rq_nob_received;
1078
1079         return do_cli_unwrap_reply(req);
1080 }
1081
1082 /**
1083  * Used by ptlrpc client, to perform security transformation upon the early
1084  * reply message of \a req. We expect the rq_reply_off is 0, and
1085  * rq_nob_received is the early reply size.
1086  * 
1087  * Because the receive buffer might be still posted, the reply data might be
1088  * changed at any time, no matter we're holding rq_lock or not. For this reason
1089  * we allocate a separate ptlrpc_request and reply buffer for early reply
1090  * processing.
1091  * 
1092  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1093  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1094  * \a *req_ret to release it.
1095  * \retval -ev error number, and \a req_ret will not be set.
1096  */
1097 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1098                                    struct ptlrpc_request **req_ret)
1099 {
1100         struct ptlrpc_request  *early_req;
1101         char                   *early_buf;
1102         int                     early_bufsz, early_size;
1103         int                     rc;
1104         ENTRY;
1105
1106         early_req = ptlrpc_request_cache_alloc(__GFP_IO);
1107         if (early_req == NULL)
1108                 RETURN(-ENOMEM);
1109
1110         early_size = req->rq_nob_received;
1111         early_bufsz = size_roundup_power2(early_size);
1112         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1113         if (early_buf == NULL)
1114                 GOTO(err_req, rc = -ENOMEM);
1115
1116         /* sanity checkings and copy data out, do it inside spinlock */
1117         spin_lock(&req->rq_lock);
1118
1119         if (req->rq_replied) {
1120                 spin_unlock(&req->rq_lock);
1121                 GOTO(err_buf, rc = -EALREADY);
1122         }
1123
1124         LASSERT(req->rq_repbuf);
1125         LASSERT(req->rq_repdata == NULL);
1126         LASSERT(req->rq_repmsg == NULL);
1127
1128         if (req->rq_reply_off != 0) {
1129                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1130                 spin_unlock(&req->rq_lock);
1131                 GOTO(err_buf, rc = -EPROTO);
1132         }
1133
1134         if (req->rq_nob_received != early_size) {
1135                 /* even another early arrived the size should be the same */
1136                 CERROR("data size has changed from %u to %u\n",
1137                        early_size, req->rq_nob_received);
1138                 spin_unlock(&req->rq_lock);
1139                 GOTO(err_buf, rc = -EINVAL);
1140         }
1141
1142         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1143                 CERROR("early reply length %d too small\n",
1144                        req->rq_nob_received);
1145                 spin_unlock(&req->rq_lock);
1146                 GOTO(err_buf, rc = -EALREADY);
1147         }
1148
1149         memcpy(early_buf, req->rq_repbuf, early_size);
1150         spin_unlock(&req->rq_lock);
1151
1152         spin_lock_init(&early_req->rq_lock);
1153         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1154         early_req->rq_flvr = req->rq_flvr;
1155         early_req->rq_repbuf = early_buf;
1156         early_req->rq_repbuf_len = early_bufsz;
1157         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1158         early_req->rq_repdata_len = early_size;
1159         early_req->rq_early = 1;
1160         early_req->rq_reqmsg = req->rq_reqmsg;
1161
1162         rc = do_cli_unwrap_reply(early_req);
1163         if (rc) {
1164                 DEBUG_REQ(D_ADAPTTO, early_req,
1165                           "error %d unwrap early reply", rc);
1166                 GOTO(err_ctx, rc);
1167         }
1168
1169         LASSERT(early_req->rq_repmsg);
1170         *req_ret = early_req;
1171         RETURN(0);
1172
1173 err_ctx:
1174         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1175 err_buf:
1176         OBD_FREE_LARGE(early_buf, early_bufsz);
1177 err_req:
1178         ptlrpc_request_cache_free(early_req);
1179         RETURN(rc);
1180 }
1181
1182 /**
1183  * Used by ptlrpc client, to release a processed early reply \a early_req.
1184  *
1185  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1186  */
1187 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1188 {
1189         LASSERT(early_req->rq_repbuf);
1190         LASSERT(early_req->rq_repdata);
1191         LASSERT(early_req->rq_repmsg);
1192
1193         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1194         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1195         ptlrpc_request_cache_free(early_req);
1196 }
1197
1198 /**************************************************
1199  * sec ID                                         *
1200  **************************************************/
1201
1202 /*
1203  * "fixed" sec (e.g. null) use sec_id < 0
1204  */
1205 static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
1206
1207 int sptlrpc_get_next_secid(void)
1208 {
1209         return cfs_atomic_inc_return(&sptlrpc_sec_id);
1210 }
1211 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1212
1213 /**************************************************
1214  * client side high-level security APIs           *
1215  **************************************************/
1216
1217 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1218                                    int grace, int force)
1219 {
1220         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1221
1222         LASSERT(policy->sp_cops);
1223         LASSERT(policy->sp_cops->flush_ctx_cache);
1224
1225         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1226 }
1227
1228 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1229 {
1230         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1231
1232         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1233         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1234         LASSERT(policy->sp_cops->destroy_sec);
1235
1236         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1237
1238         policy->sp_cops->destroy_sec(sec);
1239         sptlrpc_policy_put(policy);
1240 }
1241
1242 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1243 {
1244         sec_cop_destroy_sec(sec);
1245 }
1246 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1247
1248 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1249 {
1250         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1251
1252         if (sec->ps_policy->sp_cops->kill_sec) {
1253                 sec->ps_policy->sp_cops->kill_sec(sec);
1254
1255                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1256         }
1257 }
1258
1259 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1260 {
1261         if (sec)
1262                 cfs_atomic_inc(&sec->ps_refcount);
1263
1264         return sec;
1265 }
1266 EXPORT_SYMBOL(sptlrpc_sec_get);
1267
1268 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1269 {
1270         if (sec) {
1271                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1272
1273                 if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
1274                         sptlrpc_gc_del_sec(sec);
1275                         sec_cop_destroy_sec(sec);
1276                 }
1277         }
1278 }
1279 EXPORT_SYMBOL(sptlrpc_sec_put);
1280
1281 /*
1282  * policy module is responsible for taking refrence of import
1283  */
1284 static
1285 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1286                                        struct ptlrpc_svc_ctx *svc_ctx,
1287                                        struct sptlrpc_flavor *sf,
1288                                        enum lustre_sec_part sp)
1289 {
1290         struct ptlrpc_sec_policy *policy;
1291         struct ptlrpc_sec        *sec;
1292         char                      str[32];
1293         ENTRY;
1294
1295         if (svc_ctx) {
1296                 LASSERT(imp->imp_dlm_fake == 1);
1297
1298                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1299                        imp->imp_obd->obd_type->typ_name,
1300                        imp->imp_obd->obd_name,
1301                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1302
1303                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1304                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1305         } else {
1306                 LASSERT(imp->imp_dlm_fake == 0);
1307
1308                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1309                        imp->imp_obd->obd_type->typ_name,
1310                        imp->imp_obd->obd_name,
1311                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1312
1313                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1314                 if (!policy) {
1315                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1316                         RETURN(NULL);
1317                 }
1318         }
1319
1320         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1321         if (sec) {
1322                 cfs_atomic_inc(&sec->ps_refcount);
1323
1324                 sec->ps_part = sp;
1325
1326                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1327                         sptlrpc_gc_add_sec(sec);
1328         } else {
1329                 sptlrpc_policy_put(policy);
1330         }
1331
1332         RETURN(sec);
1333 }
1334
1335 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1336 {
1337         struct ptlrpc_sec *sec;
1338
1339         spin_lock(&imp->imp_lock);
1340         sec = sptlrpc_sec_get(imp->imp_sec);
1341         spin_unlock(&imp->imp_lock);
1342
1343         return sec;
1344 }
1345 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1346
1347 static void sptlrpc_import_sec_install(struct obd_import *imp,
1348                                        struct ptlrpc_sec *sec)
1349 {
1350         struct ptlrpc_sec *old_sec;
1351
1352         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1353
1354         spin_lock(&imp->imp_lock);
1355         old_sec = imp->imp_sec;
1356         imp->imp_sec = sec;
1357         spin_unlock(&imp->imp_lock);
1358
1359         if (old_sec) {
1360                 sptlrpc_sec_kill(old_sec);
1361
1362                 /* balance the ref taken by this import */
1363                 sptlrpc_sec_put(old_sec);
1364         }
1365 }
1366
1367 static inline
1368 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1369 {
1370         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1371 }
1372
1373 static inline
1374 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1375 {
1376         *dst = *src;
1377 }
1378
1379 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1380                                              struct ptlrpc_sec *sec,
1381                                              struct sptlrpc_flavor *sf)
1382 {
1383         char    str1[32], str2[32];
1384
1385         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1386                 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1387                        sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1388                                             str1, sizeof(str1)),
1389                        sptlrpc_secflags2str(sf->sf_flags,
1390                                             str2, sizeof(str2)));
1391
1392         spin_lock(&sec->ps_lock);
1393         flavor_copy(&sec->ps_flvr, sf);
1394         spin_unlock(&sec->ps_lock);
1395 }
1396
1397 /**
1398  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1399  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1400  *
1401  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1402  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1403  */
1404 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1405                              struct ptlrpc_svc_ctx *svc_ctx,
1406                              struct sptlrpc_flavor *flvr)
1407 {
1408         struct ptlrpc_connection   *conn;
1409         struct sptlrpc_flavor       sf;
1410         struct ptlrpc_sec          *sec, *newsec;
1411         enum lustre_sec_part        sp;
1412         char                        str[24];
1413         int                         rc = 0;
1414         ENTRY;
1415
1416         cfs_might_sleep();
1417
1418         if (imp == NULL)
1419                 RETURN(0);
1420
1421         conn = imp->imp_connection;
1422
1423         if (svc_ctx == NULL) {
1424                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1425                 /*
1426                  * normal import, determine flavor from rule set, except
1427                  * for mgc the flavor is predetermined.
1428                  */
1429                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1430                         sf = cliobd->cl_flvr_mgc;
1431                 else 
1432                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1433                                                    cliobd->cl_sp_to,
1434                                                    &cliobd->cl_target_uuid,
1435                                                    conn->c_self, &sf);
1436
1437                 sp = imp->imp_obd->u.cli.cl_sp_me;
1438         } else {
1439                 /* reverse import, determine flavor from incoming reqeust */
1440                 sf = *flvr;
1441
1442                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1443                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1444                                       PTLRPC_SEC_FL_ROOTONLY;
1445
1446                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1447         }
1448
1449         sec = sptlrpc_import_sec_ref(imp);
1450         if (sec) {
1451                 char    str2[24];
1452
1453                 if (flavor_equal(&sf, &sec->ps_flvr))
1454                         GOTO(out, rc);
1455
1456                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1457                        imp->imp_obd->obd_name,
1458                        obd_uuid2str(&conn->c_remote_uuid),
1459                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1460                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1461
1462                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1463                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1464                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1465                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1466                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1467                         GOTO(out, rc);
1468                 }
1469         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1470                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1471                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1472                        imp->imp_obd->obd_name,
1473                        obd_uuid2str(&conn->c_remote_uuid),
1474                        LNET_NIDNET(conn->c_self),
1475                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1476         }
1477
1478         mutex_lock(&imp->imp_sec_mutex);
1479
1480         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1481         if (newsec) {
1482                 sptlrpc_import_sec_install(imp, newsec);
1483         } else {
1484                 CERROR("import %s->%s: failed to create new sec\n",
1485                        imp->imp_obd->obd_name,
1486                        obd_uuid2str(&conn->c_remote_uuid));
1487                 rc = -EPERM;
1488         }
1489
1490         mutex_unlock(&imp->imp_sec_mutex);
1491 out:
1492         sptlrpc_sec_put(sec);
1493         RETURN(rc);
1494 }
1495
1496 void sptlrpc_import_sec_put(struct obd_import *imp)
1497 {
1498         if (imp->imp_sec) {
1499                 sptlrpc_sec_kill(imp->imp_sec);
1500
1501                 sptlrpc_sec_put(imp->imp_sec);
1502                 imp->imp_sec = NULL;
1503         }
1504 }
1505
1506 static void import_flush_ctx_common(struct obd_import *imp,
1507                                     uid_t uid, int grace, int force)
1508 {
1509         struct ptlrpc_sec *sec;
1510
1511         if (imp == NULL)
1512                 return;
1513
1514         sec = sptlrpc_import_sec_ref(imp);
1515         if (sec == NULL)
1516                 return;
1517
1518         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1519         sptlrpc_sec_put(sec);
1520 }
1521
1522 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1523 {
1524         /* it's important to use grace mode, see explain in
1525          * sptlrpc_req_refresh_ctx() */
1526         import_flush_ctx_common(imp, 0, 1, 1);
1527 }
1528
1529 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1530 {
1531         import_flush_ctx_common(imp, current_uid(), 1, 1);
1532 }
1533 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1534
1535 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1536 {
1537         import_flush_ctx_common(imp, -1, 1, 1);
1538 }
1539 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1540
1541 /**
1542  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1543  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1544  */
1545 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1546 {
1547         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1548         struct ptlrpc_sec_policy *policy;
1549         int rc;
1550
1551         LASSERT(ctx);
1552         LASSERT(ctx->cc_sec);
1553         LASSERT(ctx->cc_sec->ps_policy);
1554         LASSERT(req->rq_reqmsg == NULL);
1555         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1556
1557         policy = ctx->cc_sec->ps_policy;
1558         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1559         if (!rc) {
1560                 LASSERT(req->rq_reqmsg);
1561                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1562
1563                 /* zeroing preallocated buffer */
1564                 if (req->rq_pool)
1565                         memset(req->rq_reqmsg, 0, msgsize);
1566         }
1567
1568         return rc;
1569 }
1570
1571 /**
1572  * Used by ptlrpc client to free request buffer of \a req. After this
1573  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1574  */
1575 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1576 {
1577         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1578         struct ptlrpc_sec_policy *policy;
1579
1580         LASSERT(ctx);
1581         LASSERT(ctx->cc_sec);
1582         LASSERT(ctx->cc_sec->ps_policy);
1583         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1584
1585         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1586                 return;
1587
1588         policy = ctx->cc_sec->ps_policy;
1589         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1590         req->rq_reqmsg = NULL;
1591 }
1592
1593 /*
1594  * NOTE caller must guarantee the buffer size is enough for the enlargement
1595  */
1596 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1597                                   int segment, int newsize)
1598 {
1599         void   *src, *dst;
1600         int     oldsize, oldmsg_size, movesize;
1601
1602         LASSERT(segment < msg->lm_bufcount);
1603         LASSERT(msg->lm_buflens[segment] <= newsize);
1604
1605         if (msg->lm_buflens[segment] == newsize)
1606                 return;
1607
1608         /* nothing to do if we are enlarging the last segment */
1609         if (segment == msg->lm_bufcount - 1) {
1610                 msg->lm_buflens[segment] = newsize;
1611                 return;
1612         }
1613
1614         oldsize = msg->lm_buflens[segment];
1615
1616         src = lustre_msg_buf(msg, segment + 1, 0);
1617         msg->lm_buflens[segment] = newsize;
1618         dst = lustre_msg_buf(msg, segment + 1, 0);
1619         msg->lm_buflens[segment] = oldsize;
1620
1621         /* move from segment + 1 to end segment */
1622         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1623         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1624         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1625         LASSERT(movesize >= 0);
1626
1627         if (movesize)
1628                 memmove(dst, src, movesize);
1629
1630         /* note we don't clear the ares where old data live, not secret */
1631
1632         /* finally set new segment size */
1633         msg->lm_buflens[segment] = newsize;
1634 }
1635 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1636
1637 /**
1638  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1639  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1640  * preserved after the enlargement. this must be called after original request
1641  * buffer being allocated.
1642  *
1643  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1644  * so caller should refresh its local pointers if needed.
1645  */
1646 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1647                                int segment, int newsize)
1648 {
1649         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1650         struct ptlrpc_sec_cops   *cops;
1651         struct lustre_msg        *msg = req->rq_reqmsg;
1652
1653         LASSERT(ctx);
1654         LASSERT(msg);
1655         LASSERT(msg->lm_bufcount > segment);
1656         LASSERT(msg->lm_buflens[segment] <= newsize);
1657
1658         if (msg->lm_buflens[segment] == newsize)
1659                 return 0;
1660
1661         cops = ctx->cc_sec->ps_policy->sp_cops;
1662         LASSERT(cops->enlarge_reqbuf);
1663         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1664 }
1665 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1666
1667 /**
1668  * Used by ptlrpc client to allocate reply buffer of \a req.
1669  *
1670  * \note After this, req->rq_repmsg is still not accessible.
1671  */
1672 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1673 {
1674         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1675         struct ptlrpc_sec_policy *policy;
1676         ENTRY;
1677
1678         LASSERT(ctx);
1679         LASSERT(ctx->cc_sec);
1680         LASSERT(ctx->cc_sec->ps_policy);
1681
1682         if (req->rq_repbuf)
1683                 RETURN(0);
1684
1685         policy = ctx->cc_sec->ps_policy;
1686         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1687 }
1688
1689 /**
1690  * Used by ptlrpc client to free reply buffer of \a req. After this
1691  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1692  */
1693 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1694 {
1695         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1696         struct ptlrpc_sec_policy *policy;
1697         ENTRY;
1698
1699         LASSERT(ctx);
1700         LASSERT(ctx->cc_sec);
1701         LASSERT(ctx->cc_sec->ps_policy);
1702         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1703
1704         if (req->rq_repbuf == NULL)
1705                 return;
1706         LASSERT(req->rq_repbuf_len);
1707
1708         policy = ctx->cc_sec->ps_policy;
1709         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1710         req->rq_repmsg = NULL;
1711         EXIT;
1712 }
1713
1714 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1715                                 struct ptlrpc_cli_ctx *ctx)
1716 {
1717         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1718
1719         if (!policy->sp_cops->install_rctx)
1720                 return 0;
1721         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1722 }
1723
1724 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1725                                 struct ptlrpc_svc_ctx *ctx)
1726 {
1727         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1728
1729         if (!policy->sp_sops->install_rctx)
1730                 return 0;
1731         return policy->sp_sops->install_rctx(imp, ctx);
1732 }
1733
1734 /****************************************
1735  * server side security                 *
1736  ****************************************/
1737
1738 static int flavor_allowed(struct sptlrpc_flavor *exp,
1739                           struct ptlrpc_request *req)
1740 {
1741         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1742
1743         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1744                 return 1;
1745
1746         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1747             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1748             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1749             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1750                 return 1;
1751
1752         return 0;
1753 }
1754
1755 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1756
1757 /**
1758  * Given an export \a exp, check whether the flavor of incoming \a req
1759  * is allowed by the export \a exp. Main logic is about taking care of
1760  * changing configurations. Return 0 means success.
1761  */
1762 int sptlrpc_target_export_check(struct obd_export *exp,
1763                                 struct ptlrpc_request *req)
1764 {
1765         struct sptlrpc_flavor   flavor;
1766
1767         if (exp == NULL)
1768                 return 0;
1769
1770         /* client side export has no imp_reverse, skip
1771          * FIXME maybe we should check flavor this as well??? */
1772         if (exp->exp_imp_reverse == NULL)
1773                 return 0;
1774
1775         /* don't care about ctx fini rpc */
1776         if (req->rq_ctx_fini)
1777                 return 0;
1778
1779         spin_lock(&exp->exp_lock);
1780
1781         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1782          * the first req with the new flavor, then treat it as current flavor,
1783          * adapt reverse sec according to it.
1784          * note the first rpc with new flavor might not be with root ctx, in
1785          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1786         if (unlikely(exp->exp_flvr_changed) &&
1787             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1788                 /* make the new flavor as "current", and old ones as
1789                  * about-to-expire */
1790                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1791                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1792                 flavor = exp->exp_flvr_old[1];
1793                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1794                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1795                 exp->exp_flvr_old[0] = exp->exp_flvr;
1796                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1797                                           EXP_FLVR_UPDATE_EXPIRE;
1798                 exp->exp_flvr = flavor;
1799
1800                 /* flavor change finished */
1801                 exp->exp_flvr_changed = 0;
1802                 LASSERT(exp->exp_flvr_adapt == 1);
1803
1804                 /* if it's gss, we only interested in root ctx init */
1805                 if (req->rq_auth_gss &&
1806                     !(req->rq_ctx_init &&
1807                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1808                        req->rq_auth_usr_ost))) {
1809                         spin_unlock(&exp->exp_lock);
1810                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1811                                req->rq_auth_gss, req->rq_ctx_init,
1812                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1813                                req->rq_auth_usr_ost);
1814                         return 0;
1815                 }
1816
1817                 exp->exp_flvr_adapt = 0;
1818                 spin_unlock(&exp->exp_lock);
1819
1820                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1821                                                 req->rq_svc_ctx, &flavor);
1822         }
1823
1824         /* if it equals to the current flavor, we accept it, but need to
1825          * dealing with reverse sec/ctx */
1826         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1827                 /* most cases should return here, we only interested in
1828                  * gss root ctx init */
1829                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1830                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1831                      !req->rq_auth_usr_ost)) {
1832                         spin_unlock(&exp->exp_lock);
1833                         return 0;
1834                 }
1835
1836                 /* if flavor just changed, we should not proceed, just leave
1837                  * it and current flavor will be discovered and replaced
1838                  * shortly, and let _this_ rpc pass through */
1839                 if (exp->exp_flvr_changed) {
1840                         LASSERT(exp->exp_flvr_adapt);
1841                         spin_unlock(&exp->exp_lock);
1842                         return 0;
1843                 }
1844
1845                 if (exp->exp_flvr_adapt) {
1846                         exp->exp_flvr_adapt = 0;
1847                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1848                                exp, exp->exp_flvr.sf_rpc,
1849                                exp->exp_flvr_old[0].sf_rpc,
1850                                exp->exp_flvr_old[1].sf_rpc);
1851                         flavor = exp->exp_flvr;
1852                         spin_unlock(&exp->exp_lock);
1853
1854                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1855                                                         req->rq_svc_ctx,
1856                                                         &flavor);
1857                 } else {
1858                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1859                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1860                                exp->exp_flvr_old[0].sf_rpc,
1861                                exp->exp_flvr_old[1].sf_rpc);
1862                         spin_unlock(&exp->exp_lock);
1863
1864                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1865                                                            req->rq_svc_ctx);
1866                 }
1867         }
1868
1869         if (exp->exp_flvr_expire[0]) {
1870                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1871                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1872                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1873                                        "middle one ("CFS_DURATION_T")\n", exp,
1874                                        exp->exp_flvr.sf_rpc,
1875                                        exp->exp_flvr_old[0].sf_rpc,
1876                                        exp->exp_flvr_old[1].sf_rpc,
1877                                        exp->exp_flvr_expire[0] -
1878                                                 cfs_time_current_sec());
1879                                 spin_unlock(&exp->exp_lock);
1880                                 return 0;
1881                         }
1882                 } else {
1883                         CDEBUG(D_SEC, "mark middle expired\n");
1884                         exp->exp_flvr_expire[0] = 0;
1885                 }
1886                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1887                        exp->exp_flvr.sf_rpc,
1888                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1889                        req->rq_flvr.sf_rpc);
1890         }
1891
1892         /* now it doesn't match the current flavor, the only chance we can
1893          * accept it is match the old flavors which is not expired. */
1894         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1895                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1896                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1897                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1898                                        "oldest one ("CFS_DURATION_T")\n", exp,
1899                                        exp->exp_flvr.sf_rpc,
1900                                        exp->exp_flvr_old[0].sf_rpc,
1901                                        exp->exp_flvr_old[1].sf_rpc,
1902                                        exp->exp_flvr_expire[1] -
1903                                                 cfs_time_current_sec());
1904                                 spin_unlock(&exp->exp_lock);
1905                                 return 0;
1906                         }
1907                 } else {
1908                         CDEBUG(D_SEC, "mark oldest expired\n");
1909                         exp->exp_flvr_expire[1] = 0;
1910                 }
1911                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1912                        exp, exp->exp_flvr.sf_rpc,
1913                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1914                        req->rq_flvr.sf_rpc);
1915         } else {
1916                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1917                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1918                        exp->exp_flvr_old[1].sf_rpc);
1919         }
1920
1921         spin_unlock(&exp->exp_lock);
1922
1923         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1924               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1925               exp, exp->exp_obd->obd_name,
1926               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1927               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1928               req->rq_flvr.sf_rpc,
1929               exp->exp_flvr.sf_rpc,
1930               exp->exp_flvr_old[0].sf_rpc,
1931               exp->exp_flvr_expire[0] ?
1932               (unsigned long) (exp->exp_flvr_expire[0] -
1933                                cfs_time_current_sec()) : 0,
1934               exp->exp_flvr_old[1].sf_rpc,
1935               exp->exp_flvr_expire[1] ?
1936               (unsigned long) (exp->exp_flvr_expire[1] -
1937                                cfs_time_current_sec()) : 0);
1938         return -EACCES;
1939 }
1940 EXPORT_SYMBOL(sptlrpc_target_export_check);
1941
1942 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1943                                       struct sptlrpc_rule_set *rset)
1944 {
1945         struct obd_export       *exp;
1946         struct sptlrpc_flavor    new_flvr;
1947
1948         LASSERT(obd);
1949
1950         spin_lock(&obd->obd_dev_lock);
1951
1952         cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1953                 if (exp->exp_connection == NULL)
1954                         continue;
1955
1956                 /* note if this export had just been updated flavor
1957                  * (exp_flvr_changed == 1), this will override the
1958                  * previous one. */
1959                 spin_lock(&exp->exp_lock);
1960                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1961                                              exp->exp_connection->c_peer.nid,
1962                                              &new_flvr);
1963                 if (exp->exp_flvr_changed ||
1964                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1965                         exp->exp_flvr_old[1] = new_flvr;
1966                         exp->exp_flvr_expire[1] = 0;
1967                         exp->exp_flvr_changed = 1;
1968                         exp->exp_flvr_adapt = 1;
1969
1970                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1971                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1972                                exp->exp_flvr.sf_rpc,
1973                                exp->exp_flvr_old[1].sf_rpc);
1974                 }
1975                 spin_unlock(&exp->exp_lock);
1976         }
1977
1978         spin_unlock(&obd->obd_dev_lock);
1979 }
1980 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1981
1982 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1983 {
1984         /* peer's claim is unreliable unless gss is being used */
1985         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1986                 return svc_rc;
1987
1988         switch (req->rq_sp_from) {
1989         case LUSTRE_SP_CLI:
1990                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
1991                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
1992                         svc_rc = SECSVC_DROP;
1993                 }
1994                 break;
1995         case LUSTRE_SP_MDT:
1996                 if (!req->rq_auth_usr_mdt) {
1997                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
1998                         svc_rc = SECSVC_DROP;
1999                 }
2000                 break;
2001         case LUSTRE_SP_OST:
2002                 if (!req->rq_auth_usr_ost) {
2003                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2004                         svc_rc = SECSVC_DROP;
2005                 }
2006                 break;
2007         case LUSTRE_SP_MGS:
2008         case LUSTRE_SP_MGC:
2009                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2010                     !req->rq_auth_usr_ost) {
2011                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2012                         svc_rc = SECSVC_DROP;
2013                 }
2014                 break;
2015         case LUSTRE_SP_ANY:
2016         default:
2017                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2018                 svc_rc = SECSVC_DROP;
2019         }
2020
2021         return svc_rc;
2022 }
2023
2024 /**
2025  * Used by ptlrpc server, to perform transformation upon request message of
2026  * incoming \a req. This must be the first thing to do with a incoming
2027  * request in ptlrpc layer.
2028  *
2029  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2030  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2031  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2032  * reply message has been prepared.
2033  * \retval SECSVC_DROP failed, this request should be dropped.
2034  */
2035 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2036 {
2037         struct ptlrpc_sec_policy *policy;
2038         struct lustre_msg        *msg = req->rq_reqbuf;
2039         int                       rc;
2040         ENTRY;
2041
2042         LASSERT(msg);
2043         LASSERT(req->rq_reqmsg == NULL);
2044         LASSERT(req->rq_repmsg == NULL);
2045         LASSERT(req->rq_svc_ctx == NULL);
2046
2047         req->rq_req_swab_mask = 0;
2048
2049         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2050         switch (rc) {
2051         case 1:
2052                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2053         case 0:
2054                 break;
2055         default:
2056                 CERROR("error unpacking request from %s x"LPU64"\n",
2057                        libcfs_id2str(req->rq_peer), req->rq_xid);
2058                 RETURN(SECSVC_DROP);
2059         }
2060
2061         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2062         req->rq_sp_from = LUSTRE_SP_ANY;
2063         req->rq_auth_uid = INVALID_UID;
2064         req->rq_auth_mapped_uid = INVALID_UID;
2065
2066         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2067         if (!policy) {
2068                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2069                 RETURN(SECSVC_DROP);
2070         }
2071
2072         LASSERT(policy->sp_sops->accept);
2073         rc = policy->sp_sops->accept(req);
2074         sptlrpc_policy_put(policy);
2075         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2076         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2077
2078         /*
2079          * if it's not null flavor (which means embedded packing msg),
2080          * reset the swab mask for the comming inner msg unpacking.
2081          */
2082         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2083                 req->rq_req_swab_mask = 0;
2084
2085         /* sanity check for the request source */
2086         rc = sptlrpc_svc_check_from(req, rc);
2087         RETURN(rc);
2088 }
2089
2090 /**
2091  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2092  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2093  * a buffer of \a msglen size.
2094  */
2095 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2096 {
2097         struct ptlrpc_sec_policy *policy;
2098         struct ptlrpc_reply_state *rs;
2099         int rc;
2100         ENTRY;
2101
2102         LASSERT(req->rq_svc_ctx);
2103         LASSERT(req->rq_svc_ctx->sc_policy);
2104
2105         policy = req->rq_svc_ctx->sc_policy;
2106         LASSERT(policy->sp_sops->alloc_rs);
2107
2108         rc = policy->sp_sops->alloc_rs(req, msglen);
2109         if (unlikely(rc == -ENOMEM)) {
2110                 /* failed alloc, try emergency pool */
2111                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_svcpt);
2112                 if (rs == NULL)
2113                         RETURN(-ENOMEM);
2114
2115                 req->rq_reply_state = rs;
2116                 rc = policy->sp_sops->alloc_rs(req, msglen);
2117                 if (rc) {
2118                         lustre_put_emerg_rs(rs);
2119                         req->rq_reply_state = NULL;
2120                 }
2121         }
2122
2123         LASSERT(rc != 0 ||
2124                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2125
2126         RETURN(rc);
2127 }
2128
2129 /**
2130  * Used by ptlrpc server, to perform transformation upon reply message.
2131  *
2132  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2133  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2134  */
2135 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2136 {
2137         struct ptlrpc_sec_policy *policy;
2138         int rc;
2139         ENTRY;
2140
2141         LASSERT(req->rq_svc_ctx);
2142         LASSERT(req->rq_svc_ctx->sc_policy);
2143
2144         policy = req->rq_svc_ctx->sc_policy;
2145         LASSERT(policy->sp_sops->authorize);
2146
2147         rc = policy->sp_sops->authorize(req);
2148         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2149
2150         RETURN(rc);
2151 }
2152
2153 /**
2154  * Used by ptlrpc server, to free reply_state.
2155  */
2156 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2157 {
2158         struct ptlrpc_sec_policy *policy;
2159         unsigned int prealloc;
2160         ENTRY;
2161
2162         LASSERT(rs->rs_svc_ctx);
2163         LASSERT(rs->rs_svc_ctx->sc_policy);
2164
2165         policy = rs->rs_svc_ctx->sc_policy;
2166         LASSERT(policy->sp_sops->free_rs);
2167
2168         prealloc = rs->rs_prealloc;
2169         policy->sp_sops->free_rs(rs);
2170
2171         if (prealloc)
2172                 lustre_put_emerg_rs(rs);
2173         EXIT;
2174 }
2175
2176 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2177 {
2178         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2179
2180         if (ctx != NULL)
2181                 cfs_atomic_inc(&ctx->sc_refcount);
2182 }
2183
2184 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2185 {
2186         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2187
2188         if (ctx == NULL)
2189                 return;
2190
2191         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2192         if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
2193                 if (ctx->sc_policy->sp_sops->free_ctx)
2194                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2195         }
2196         req->rq_svc_ctx = NULL;
2197 }
2198
2199 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2200 {
2201         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2202
2203         if (ctx == NULL)
2204                 return;
2205
2206         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2207         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2208                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2209 }
2210 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2211
2212 /****************************************
2213  * bulk security                        *
2214  ****************************************/
2215
2216 /**
2217  * Perform transformation upon bulk data pointed by \a desc. This is called
2218  * before transforming the request message.
2219  */
2220 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2221                           struct ptlrpc_bulk_desc *desc)
2222 {
2223         struct ptlrpc_cli_ctx *ctx;
2224
2225         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2226
2227         if (!req->rq_pack_bulk)
2228                 return 0;
2229
2230         ctx = req->rq_cli_ctx;
2231         if (ctx->cc_ops->wrap_bulk)
2232                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2233         return 0;
2234 }
2235 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2236
2237 /**
2238  * This is called after unwrap the reply message.
2239  * return nob of actual plain text size received, or error code.
2240  */
2241 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2242                                  struct ptlrpc_bulk_desc *desc,
2243                                  int nob)
2244 {
2245         struct ptlrpc_cli_ctx  *ctx;
2246         int                     rc;
2247
2248         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2249
2250         if (!req->rq_pack_bulk)
2251                 return desc->bd_nob_transferred;
2252
2253         ctx = req->rq_cli_ctx;
2254         if (ctx->cc_ops->unwrap_bulk) {
2255                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2256                 if (rc < 0)
2257                         return rc;
2258         }
2259         return desc->bd_nob_transferred;
2260 }
2261 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2262
2263 /**
2264  * This is called after unwrap the reply message.
2265  * return 0 for success or error code.
2266  */
2267 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2268                                   struct ptlrpc_bulk_desc *desc)
2269 {
2270         struct ptlrpc_cli_ctx  *ctx;
2271         int                     rc;
2272
2273         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2274
2275         if (!req->rq_pack_bulk)
2276                 return 0;
2277
2278         ctx = req->rq_cli_ctx;
2279         if (ctx->cc_ops->unwrap_bulk) {
2280                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2281                 if (rc < 0)
2282                         return rc;
2283         }
2284
2285         /*
2286          * if everything is going right, nob should equals to nob_transferred.
2287          * in case of privacy mode, nob_transferred needs to be adjusted.
2288          */
2289         if (desc->bd_nob != desc->bd_nob_transferred) {
2290                 CERROR("nob %d doesn't match transferred nob %d",
2291                        desc->bd_nob, desc->bd_nob_transferred);
2292                 return -EPROTO;
2293         }
2294
2295         return 0;
2296 }
2297 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2298
2299 #ifdef HAVE_SERVER_SUPPORT
2300 /**
2301  * Performe transformation upon outgoing bulk read.
2302  */
2303 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2304                           struct ptlrpc_bulk_desc *desc)
2305 {
2306         struct ptlrpc_svc_ctx *ctx;
2307
2308         LASSERT(req->rq_bulk_read);
2309
2310         if (!req->rq_pack_bulk)
2311                 return 0;
2312
2313         ctx = req->rq_svc_ctx;
2314         if (ctx->sc_policy->sp_sops->wrap_bulk)
2315                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2316
2317         return 0;
2318 }
2319 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2320
2321 /**
2322  * Performe transformation upon incoming bulk write.
2323  */
2324 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2325                             struct ptlrpc_bulk_desc *desc)
2326 {
2327         struct ptlrpc_svc_ctx *ctx;
2328         int                    rc;
2329
2330         LASSERT(req->rq_bulk_write);
2331
2332         /*
2333          * if it's in privacy mode, transferred should >= expected; otherwise
2334          * transferred should == expected.
2335          */
2336         if (desc->bd_nob_transferred < desc->bd_nob ||
2337             (desc->bd_nob_transferred > desc->bd_nob &&
2338              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2339              SPTLRPC_BULK_SVC_PRIV)) {
2340                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2341                           desc->bd_nob_transferred, desc->bd_nob);
2342                 return -ETIMEDOUT;
2343         }
2344
2345         if (!req->rq_pack_bulk)
2346                 return 0;
2347
2348         ctx = req->rq_svc_ctx;
2349         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2350                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2351                 if (rc)
2352                         CERROR("error unwrap bulk: %d\n", rc);
2353         }
2354
2355         /* return 0 to allow reply be sent */
2356         return 0;
2357 }
2358 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2359
2360 /**
2361  * Prepare buffers for incoming bulk write.
2362  */
2363 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2364                           struct ptlrpc_bulk_desc *desc)
2365 {
2366         struct ptlrpc_svc_ctx *ctx;
2367
2368         LASSERT(req->rq_bulk_write);
2369
2370         if (!req->rq_pack_bulk)
2371                 return 0;
2372
2373         ctx = req->rq_svc_ctx;
2374         if (ctx->sc_policy->sp_sops->prep_bulk)
2375                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2376
2377         return 0;
2378 }
2379 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2380
2381 #endif /* HAVE_SERVER_SUPPORT */
2382
2383 /****************************************
2384  * user descriptor helpers              *
2385  ****************************************/
2386
2387 int sptlrpc_current_user_desc_size(void)
2388 {
2389         int ngroups;
2390
2391 #ifdef __KERNEL__
2392         ngroups = current_ngroups;
2393
2394         if (ngroups > LUSTRE_MAX_GROUPS)
2395                 ngroups = LUSTRE_MAX_GROUPS;
2396 #else
2397         ngroups = 0;
2398 #endif
2399         return sptlrpc_user_desc_size(ngroups);
2400 }
2401 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2402
2403 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2404 {
2405         struct ptlrpc_user_desc *pud;
2406
2407         pud = lustre_msg_buf(msg, offset, 0);
2408
2409         pud->pud_uid = current_uid();
2410         pud->pud_gid = current_gid();
2411         pud->pud_fsuid = current_fsuid();
2412         pud->pud_fsgid = current_fsgid();
2413         pud->pud_cap = cfs_curproc_cap_pack();
2414         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2415
2416 #ifdef __KERNEL__
2417         task_lock(current);
2418         if (pud->pud_ngroups > current_ngroups)
2419                 pud->pud_ngroups = current_ngroups;
2420         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2421                pud->pud_ngroups * sizeof(__u32));
2422         task_unlock(current);
2423 #endif
2424
2425         return 0;
2426 }
2427 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2428
2429 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2430 {
2431         struct ptlrpc_user_desc *pud;
2432         int                      i;
2433
2434         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2435         if (!pud)
2436                 return -EINVAL;
2437
2438         if (swabbed) {
2439                 __swab32s(&pud->pud_uid);
2440                 __swab32s(&pud->pud_gid);
2441                 __swab32s(&pud->pud_fsuid);
2442                 __swab32s(&pud->pud_fsgid);
2443                 __swab32s(&pud->pud_cap);
2444                 __swab32s(&pud->pud_ngroups);
2445         }
2446
2447         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2448                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2449                 return -EINVAL;
2450         }
2451
2452         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2453             msg->lm_buflens[offset]) {
2454                 CERROR("%u groups are claimed but bufsize only %u\n",
2455                        pud->pud_ngroups, msg->lm_buflens[offset]);
2456                 return -EINVAL;
2457         }
2458
2459         if (swabbed) {
2460                 for (i = 0; i < pud->pud_ngroups; i++)
2461                         __swab32s(&pud->pud_groups[i]);
2462         }
2463
2464         return 0;
2465 }
2466 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2467
2468 /****************************************
2469  * misc helpers                         *
2470  ****************************************/
2471
2472 const char * sec2target_str(struct ptlrpc_sec *sec)
2473 {
2474         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2475                 return "*";
2476         if (sec_is_reverse(sec))
2477                 return "c";
2478         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2479 }
2480 EXPORT_SYMBOL(sec2target_str);
2481
2482 /*
2483  * return true if the bulk data is protected
2484  */
2485 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2486 {
2487         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2488         case SPTLRPC_BULK_SVC_INTG:
2489         case SPTLRPC_BULK_SVC_PRIV:
2490                 return 1;
2491         default:
2492                 return 0;
2493         }
2494 }
2495 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2496
2497 /****************************************
2498  * crypto API helper/alloc blkciper     *
2499  ****************************************/
2500
2501 /****************************************
2502  * initialize/finalize                  *
2503  ****************************************/
2504
2505 int sptlrpc_init(void)
2506 {
2507         int rc;
2508
2509         rwlock_init(&policy_lock);
2510
2511         rc = sptlrpc_gc_init();
2512         if (rc)
2513                 goto out;
2514
2515         rc = sptlrpc_conf_init();
2516         if (rc)
2517                 goto out_gc;
2518
2519         rc = sptlrpc_enc_pool_init();
2520         if (rc)
2521                 goto out_conf;
2522
2523         rc = sptlrpc_null_init();
2524         if (rc)
2525                 goto out_pool;
2526
2527         rc = sptlrpc_plain_init();
2528         if (rc)
2529                 goto out_null;
2530
2531         rc = sptlrpc_lproc_init();
2532         if (rc)
2533                 goto out_plain;
2534
2535         return 0;
2536
2537 out_plain:
2538         sptlrpc_plain_fini();
2539 out_null:
2540         sptlrpc_null_fini();
2541 out_pool:
2542         sptlrpc_enc_pool_fini();
2543 out_conf:
2544         sptlrpc_conf_fini();
2545 out_gc:
2546         sptlrpc_gc_fini();
2547 out:
2548         return rc;
2549 }
2550
2551 void sptlrpc_fini(void)
2552 {
2553         sptlrpc_lproc_fini();
2554         sptlrpc_plain_fini();
2555         sptlrpc_null_fini();
2556         sptlrpc_enc_pool_fini();
2557         sptlrpc_conf_fini();
2558         sptlrpc_gc_fini();
2559 }