Whamcloud - gitweb
LU-5443 lustre: replace direct HZ access with kernel APIs
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42
43 #include <libcfs/libcfs.h>
44 #include <linux/crypto.h>
45 #include <linux/key.h>
46
47 #include <obd.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_dlm.h>
53 #include <lustre_sec.h>
54
55 #include "ptlrpc_internal.h"
56
57 /***********************************************
58  * policy registers                            *
59  ***********************************************/
60
61 static rwlock_t policy_lock;
62 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
63         NULL,
64 };
65
66 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
67 {
68         __u16 number = policy->sp_policy;
69
70         LASSERT(policy->sp_name);
71         LASSERT(policy->sp_cops);
72         LASSERT(policy->sp_sops);
73
74         if (number >= SPTLRPC_POLICY_MAX)
75                 return -EINVAL;
76
77         write_lock(&policy_lock);
78         if (unlikely(policies[number])) {
79                 write_unlock(&policy_lock);
80                 return -EALREADY;
81         }
82         policies[number] = policy;
83         write_unlock(&policy_lock);
84
85         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
86         return 0;
87 }
88 EXPORT_SYMBOL(sptlrpc_register_policy);
89
90 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
91 {
92         __u16 number = policy->sp_policy;
93
94         LASSERT(number < SPTLRPC_POLICY_MAX);
95
96         write_lock(&policy_lock);
97         if (unlikely(policies[number] == NULL)) {
98                 write_unlock(&policy_lock);
99                 CERROR("%s: already unregistered\n", policy->sp_name);
100                 return -EINVAL;
101         }
102
103         LASSERT(policies[number] == policy);
104         policies[number] = NULL;
105         write_unlock(&policy_lock);
106
107         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
108         return 0;
109 }
110 EXPORT_SYMBOL(sptlrpc_unregister_policy);
111
112 static
113 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
114 {
115         static DEFINE_MUTEX(load_mutex);
116         static atomic_t           loaded = ATOMIC_INIT(0);
117         struct ptlrpc_sec_policy *policy;
118         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
119         __u16                     flag = 0;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 if (policy == NULL)
130                         flag = atomic_read(&loaded);
131                 read_unlock(&policy_lock);
132
133                 if (policy != NULL || flag != 0 ||
134                     number != SPTLRPC_POLICY_GSS)
135                         break;
136
137                 /* try to load gss module, once */
138                 mutex_lock(&load_mutex);
139                 if (atomic_read(&loaded) == 0) {
140                         if (request_module("ptlrpc_gss") == 0)
141                                 CDEBUG(D_SEC,
142                                        "module ptlrpc_gss loaded on demand\n");
143                         else
144                                 CERROR("Unable to load module ptlrpc_gss\n");
145
146                         atomic_set(&loaded, 1);
147                 }
148                 mutex_unlock(&load_mutex);
149         }
150
151         return policy;
152 }
153
154 __u32 sptlrpc_name2flavor_base(const char *name)
155 {
156         if (!strcmp(name, "null"))
157                 return SPTLRPC_FLVR_NULL;
158         if (!strcmp(name, "plain"))
159                 return SPTLRPC_FLVR_PLAIN;
160         if (!strcmp(name, "gssnull"))
161                 return SPTLRPC_FLVR_GSSNULL;
162         if (!strcmp(name, "krb5n"))
163                 return SPTLRPC_FLVR_KRB5N;
164         if (!strcmp(name, "krb5a"))
165                 return SPTLRPC_FLVR_KRB5A;
166         if (!strcmp(name, "krb5i"))
167                 return SPTLRPC_FLVR_KRB5I;
168         if (!strcmp(name, "krb5p"))
169                 return SPTLRPC_FLVR_KRB5P;
170         if (!strcmp(name, "ski"))
171                 return SPTLRPC_FLVR_SKI;
172         if (!strcmp(name, "skpi"))
173                 return SPTLRPC_FLVR_SKPI;
174
175         return SPTLRPC_FLVR_INVALID;
176 }
177 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
178
179 const char *sptlrpc_flavor2name_base(__u32 flvr)
180 {
181         __u32   base = SPTLRPC_FLVR_BASE(flvr);
182
183         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
184                 return "null";
185         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
186                 return "plain";
187         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
188                 return "gssnull";
189         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
190                 return "krb5n";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
192                 return "krb5a";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
194                 return "krb5i";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
196                 return "krb5p";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
198                 return "ski";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
200                 return "skpi";
201
202         CERROR("invalid wire flavor 0x%x\n", flvr);
203         return "invalid";
204 }
205 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
206
207 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
208                                char *buf, int bufsize)
209 {
210         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
211                 snprintf(buf, bufsize, "hash:%s",
212                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
213         else
214                 snprintf(buf, bufsize, "%s",
215                          sptlrpc_flavor2name_base(sf->sf_rpc));
216
217         buf[bufsize - 1] = '\0';
218         return buf;
219 }
220 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
221
222 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
223 {
224         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
225
226         /*
227          * currently we don't support customized bulk specification for
228          * flavors other than plain
229          */
230         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
231                 char bspec[16];
232
233                 bspec[0] = '-';
234                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
235                 strncat(buf, bspec, bufsize);
236         }
237
238         buf[bufsize - 1] = '\0';
239         return buf;
240 }
241 EXPORT_SYMBOL(sptlrpc_flavor2name);
242
243 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
244 {
245         buf[0] = '\0';
246
247         if (flags & PTLRPC_SEC_FL_REVERSE)
248                 strlcat(buf, "reverse,", bufsize);
249         if (flags & PTLRPC_SEC_FL_ROOTONLY)
250                 strlcat(buf, "rootonly,", bufsize);
251         if (flags & PTLRPC_SEC_FL_UDESC)
252                 strlcat(buf, "udesc,", bufsize);
253         if (flags & PTLRPC_SEC_FL_BULK)
254                 strlcat(buf, "bulk,", bufsize);
255         if (buf[0] == '\0')
256                 strlcat(buf, "-,", bufsize);
257
258         return buf;
259 }
260 EXPORT_SYMBOL(sptlrpc_secflags2str);
261
262 /**************************************************
263  * client context APIs                            *
264  **************************************************/
265
266 static
267 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
268 {
269         struct vfs_cred vcred;
270         int create = 1, remove_dead = 1;
271
272         LASSERT(sec);
273         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
274
275         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
276                                      PTLRPC_SEC_FL_ROOTONLY)) {
277                 vcred.vc_uid = 0;
278                 vcred.vc_gid = 0;
279                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
280                         create = 0;
281                         remove_dead = 0;
282                 }
283         } else {
284                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
285                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
286         }
287
288         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
289                                                    remove_dead);
290 }
291
292 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
293 {
294         atomic_inc(&ctx->cc_refcount);
295         return ctx;
296 }
297 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
298
299 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
300 {
301         struct ptlrpc_sec *sec = ctx->cc_sec;
302
303         LASSERT(sec);
304         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
305
306         if (!atomic_dec_and_test(&ctx->cc_refcount))
307                 return;
308
309         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
310 }
311 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
312
313 /**
314  * Expire the client context immediately.
315  *
316  * \pre Caller must hold at least 1 reference on the \a ctx.
317  */
318 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
319 {
320         LASSERT(ctx->cc_ops->die);
321         ctx->cc_ops->die(ctx, 0);
322 }
323 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
324
325 /**
326  * To wake up the threads who are waiting for this client context. Called
327  * after some status change happened on \a ctx.
328  */
329 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
330 {
331         struct ptlrpc_request *req, *next;
332
333         spin_lock(&ctx->cc_lock);
334         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
335                                      rq_ctx_chain) {
336                 list_del_init(&req->rq_ctx_chain);
337                 ptlrpc_client_wake_req(req);
338         }
339         spin_unlock(&ctx->cc_lock);
340 }
341 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
342
343 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
344 {
345         LASSERT(ctx->cc_ops);
346
347         if (ctx->cc_ops->display == NULL)
348                 return 0;
349
350         return ctx->cc_ops->display(ctx, buf, bufsize);
351 }
352
353 static int import_sec_check_expire(struct obd_import *imp)
354 {
355         int     adapt = 0;
356
357         spin_lock(&imp->imp_lock);
358         if (imp->imp_sec_expire &&
359             imp->imp_sec_expire < cfs_time_current_sec()) {
360                 adapt = 1;
361                 imp->imp_sec_expire = 0;
362         }
363         spin_unlock(&imp->imp_lock);
364
365         if (!adapt)
366                 return 0;
367
368         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
369         return sptlrpc_import_sec_adapt(imp, NULL, 0);
370 }
371
372 static int import_sec_validate_get(struct obd_import *imp,
373                                    struct ptlrpc_sec **sec)
374 {
375         int     rc;
376
377         if (unlikely(imp->imp_sec_expire)) {
378                 rc = import_sec_check_expire(imp);
379                 if (rc)
380                         return rc;
381         }
382
383         *sec = sptlrpc_import_sec_ref(imp);
384         if (*sec == NULL) {
385                 CERROR("import %p (%s) with no sec\n",
386                        imp, ptlrpc_import_state_name(imp->imp_state));
387                 return -EACCES;
388         }
389
390         if (unlikely((*sec)->ps_dying)) {
391                 CERROR("attempt to use dying sec %p\n", sec);
392                 sptlrpc_sec_put(*sec);
393                 return -EACCES;
394         }
395
396         return 0;
397 }
398
399 /**
400  * Given a \a req, find or allocate a appropriate context for it.
401  * \pre req->rq_cli_ctx == NULL.
402  *
403  * \retval 0 succeed, and req->rq_cli_ctx is set.
404  * \retval -ev error number, and req->rq_cli_ctx == NULL.
405  */
406 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
407 {
408         struct obd_import *imp = req->rq_import;
409         struct ptlrpc_sec *sec;
410         int                rc;
411         ENTRY;
412
413         LASSERT(!req->rq_cli_ctx);
414         LASSERT(imp);
415
416         rc = import_sec_validate_get(imp, &sec);
417         if (rc)
418                 RETURN(rc);
419
420         req->rq_cli_ctx = get_my_ctx(sec);
421
422         sptlrpc_sec_put(sec);
423
424         if (!req->rq_cli_ctx) {
425                 CERROR("req %p: fail to get context\n", req);
426                 RETURN(-ENOMEM);
427         }
428
429         RETURN(0);
430 }
431
432 /**
433  * Drop the context for \a req.
434  * \pre req->rq_cli_ctx != NULL.
435  * \post req->rq_cli_ctx == NULL.
436  *
437  * If \a sync == 0, this function should return quickly without sleep;
438  * otherwise it might trigger and wait for the whole process of sending
439  * an context-destroying rpc to server.
440  */
441 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
442 {
443         ENTRY;
444
445         LASSERT(req);
446         LASSERT(req->rq_cli_ctx);
447
448         /* request might be asked to release earlier while still
449          * in the context waiting list.
450          */
451         if (!list_empty(&req->rq_ctx_chain)) {
452                 spin_lock(&req->rq_cli_ctx->cc_lock);
453                 list_del_init(&req->rq_ctx_chain);
454                 spin_unlock(&req->rq_cli_ctx->cc_lock);
455         }
456
457         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
458         req->rq_cli_ctx = NULL;
459         EXIT;
460 }
461
462 static
463 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
464                            struct ptlrpc_cli_ctx *oldctx,
465                            struct ptlrpc_cli_ctx *newctx)
466 {
467         struct sptlrpc_flavor   old_flvr;
468         char                   *reqmsg = NULL; /* to workaround old gcc */
469         int                     reqmsg_size;
470         int                     rc = 0;
471
472         LASSERT(req->rq_reqmsg);
473         LASSERT(req->rq_reqlen);
474         LASSERT(req->rq_replen);
475
476         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
477                "switch sec %p(%s) -> %p(%s)\n", req,
478                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
479                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
480                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
481                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
482
483         /* save flavor */
484         old_flvr = req->rq_flvr;
485
486         /* save request message */
487         reqmsg_size = req->rq_reqlen;
488         if (reqmsg_size != 0) {
489                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
490                 if (reqmsg == NULL)
491                         return -ENOMEM;
492                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
493         }
494
495         /* release old req/rep buf */
496         req->rq_cli_ctx = oldctx;
497         sptlrpc_cli_free_reqbuf(req);
498         sptlrpc_cli_free_repbuf(req);
499         req->rq_cli_ctx = newctx;
500
501         /* recalculate the flavor */
502         sptlrpc_req_set_flavor(req, 0);
503
504         /* alloc new request buffer
505          * we don't need to alloc reply buffer here, leave it to the
506          * rest procedure of ptlrpc */
507         if (reqmsg_size != 0) {
508                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
509                 if (!rc) {
510                         LASSERT(req->rq_reqmsg);
511                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
512                 } else {
513                         CWARN("failed to alloc reqbuf: %d\n", rc);
514                         req->rq_flvr = old_flvr;
515                 }
516
517                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
518         }
519         return rc;
520 }
521
522 /**
523  * If current context of \a req is dead somehow, e.g. we just switched flavor
524  * thus marked original contexts dead, we'll find a new context for it. if
525  * no switch is needed, \a req will end up with the same context.
526  *
527  * \note a request must have a context, to keep other parts of code happy.
528  * In any case of failure during the switching, we must restore the old one.
529  */
530 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
531 {
532         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
533         struct ptlrpc_cli_ctx *newctx;
534         int                    rc;
535         ENTRY;
536
537         LASSERT(oldctx);
538
539         sptlrpc_cli_ctx_get(oldctx);
540         sptlrpc_req_put_ctx(req, 0);
541
542         rc = sptlrpc_req_get_ctx(req);
543         if (unlikely(rc)) {
544                 LASSERT(!req->rq_cli_ctx);
545
546                 /* restore old ctx */
547                 req->rq_cli_ctx = oldctx;
548                 RETURN(rc);
549         }
550
551         newctx = req->rq_cli_ctx;
552         LASSERT(newctx);
553
554         if (unlikely(newctx == oldctx && 
555                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
556                 /*
557                  * still get the old dead ctx, usually means system too busy
558                  */
559                 CDEBUG(D_SEC,
560                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
561                        newctx, newctx->cc_flags);
562
563                 schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
564                         msecs_to_jiffies(MSEC_PER_SEC));
565         } else {
566                 /*
567                  * it's possible newctx == oldctx if we're switching
568                  * subflavor with the same sec.
569                  */
570                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
571                 if (rc) {
572                         /* restore old ctx */
573                         sptlrpc_req_put_ctx(req, 0);
574                         req->rq_cli_ctx = oldctx;
575                         RETURN(rc);
576                 }
577
578                 LASSERT(req->rq_cli_ctx == newctx);
579         }
580
581         sptlrpc_cli_ctx_put(oldctx, 1);
582         RETURN(0);
583 }
584 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
585
586 static
587 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
588 {
589         if (cli_ctx_is_refreshed(ctx))
590                 return 1;
591         return 0;
592 }
593
594 static
595 int ctx_refresh_timeout(void *data)
596 {
597         struct ptlrpc_request *req = data;
598         int rc;
599
600         /* conn_cnt is needed in expire_one_request */
601         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
602
603         rc = ptlrpc_expire_one_request(req, 1);
604         /* if we started recovery, we should mark this ctx dead; otherwise
605          * in case of lgssd died nobody would retire this ctx, following
606          * connecting will still find the same ctx thus cause deadlock.
607          * there's an assumption that expire time of the request should be
608          * later than the context refresh expire time.
609          */
610         if (rc == 0)
611                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
612         return rc;
613 }
614
615 static
616 void ctx_refresh_interrupt(void *data)
617 {
618         struct ptlrpc_request *req = data;
619
620         spin_lock(&req->rq_lock);
621         req->rq_intr = 1;
622         spin_unlock(&req->rq_lock);
623 }
624
625 static
626 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
627 {
628         spin_lock(&ctx->cc_lock);
629         if (!list_empty(&req->rq_ctx_chain))
630                 list_del_init(&req->rq_ctx_chain);
631         spin_unlock(&ctx->cc_lock);
632 }
633
634 /**
635  * To refresh the context of \req, if it's not up-to-date.
636  * \param timeout
637  * - < 0: don't wait
638  * - = 0: wait until success or fatal error occur
639  * - > 0: timeout value (in seconds)
640  *
641  * The status of the context could be subject to be changed by other threads
642  * at any time. We allow this race, but once we return with 0, the caller will
643  * suppose it's uptodated and keep using it until the owning rpc is done.
644  *
645  * \retval 0 only if the context is uptodated.
646  * \retval -ev error number.
647  */
648 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
649 {
650         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
651         struct ptlrpc_sec      *sec;
652         struct l_wait_info      lwi;
653         int                     rc;
654         ENTRY;
655
656         LASSERT(ctx);
657
658         if (req->rq_ctx_init || req->rq_ctx_fini)
659                 RETURN(0);
660
661         /*
662          * during the process a request's context might change type even
663          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
664          * everything
665          */
666 again:
667         rc = import_sec_validate_get(req->rq_import, &sec);
668         if (rc)
669                 RETURN(rc);
670
671         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
672                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
673                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
674                 req_off_ctx_list(req, ctx);
675                 sptlrpc_req_replace_dead_ctx(req);
676                 ctx = req->rq_cli_ctx;
677         }
678         sptlrpc_sec_put(sec);
679
680         if (cli_ctx_is_eternal(ctx))
681                 RETURN(0);
682
683         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
684                 LASSERT(ctx->cc_ops->refresh);
685                 ctx->cc_ops->refresh(ctx);
686         }
687         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
688
689         LASSERT(ctx->cc_ops->validate);
690         if (ctx->cc_ops->validate(ctx) == 0) {
691                 req_off_ctx_list(req, ctx);
692                 RETURN(0);
693         }
694
695         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
696                 spin_lock(&req->rq_lock);
697                 req->rq_err = 1;
698                 spin_unlock(&req->rq_lock);
699                 req_off_ctx_list(req, ctx);
700                 RETURN(-EPERM);
701         }
702
703         /*
704          * There's a subtle issue for resending RPCs, suppose following
705          * situation:
706          *  1. the request was sent to server.
707          *  2. recovery was kicked start, after finished the request was
708          *     marked as resent.
709          *  3. resend the request.
710          *  4. old reply from server received, we accept and verify the reply.
711          *     this has to be success, otherwise the error will be aware
712          *     by application.
713          *  5. new reply from server received, dropped by LNet.
714          *
715          * Note the xid of old & new request is the same. We can't simply
716          * change xid for the resent request because the server replies on
717          * it for reply reconstruction.
718          *
719          * Commonly the original context should be uptodate because we
720          * have a expiry nice time; server will keep its context because
721          * we at least hold a ref of old context which prevent context
722          * destroying RPC being sent. So server still can accept the request
723          * and finish the RPC. But if that's not the case:
724          *  1. If server side context has been trimmed, a NO_CONTEXT will
725          *     be returned, gss_cli_ctx_verify/unseal will switch to new
726          *     context by force.
727          *  2. Current context never be refreshed, then we are fine: we
728          *     never really send request with old context before.
729          */
730         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
731             unlikely(req->rq_reqmsg) &&
732             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
733                 req_off_ctx_list(req, ctx);
734                 RETURN(0);
735         }
736
737         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
738                 req_off_ctx_list(req, ctx);
739                 /*
740                  * don't switch ctx if import was deactivated
741                  */
742                 if (req->rq_import->imp_deactive) {
743                         spin_lock(&req->rq_lock);
744                         req->rq_err = 1;
745                         spin_unlock(&req->rq_lock);
746                         RETURN(-EINTR);
747                 }
748
749                 rc = sptlrpc_req_replace_dead_ctx(req);
750                 if (rc) {
751                         LASSERT(ctx == req->rq_cli_ctx);
752                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
753                                req, ctx, rc);
754                         spin_lock(&req->rq_lock);
755                         req->rq_err = 1;
756                         spin_unlock(&req->rq_lock);
757                         RETURN(rc);
758                 }
759
760                 ctx = req->rq_cli_ctx;
761                 goto again;
762         }
763
764         /*
765          * Now we're sure this context is during upcall, add myself into
766          * waiting list
767          */
768         spin_lock(&ctx->cc_lock);
769         if (list_empty(&req->rq_ctx_chain))
770                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
771         spin_unlock(&ctx->cc_lock);
772
773         if (timeout < 0)
774                 RETURN(-EWOULDBLOCK);
775
776         /* Clear any flags that may be present from previous sends */
777         LASSERT(req->rq_receiving_reply == 0);
778         spin_lock(&req->rq_lock);
779         req->rq_err = 0;
780         req->rq_timedout = 0;
781         req->rq_resend = 0;
782         req->rq_restart = 0;
783         spin_unlock(&req->rq_lock);
784
785         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
786                                ctx_refresh_timeout,
787                                ctx_refresh_interrupt, req);
788         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
789
790         /*
791          * following cases could lead us here:
792          * - successfully refreshed;
793          * - interrupted;
794          * - timedout, and we don't want recover from the failure;
795          * - timedout, and waked up upon recovery finished;
796          * - someone else mark this ctx dead by force;
797          * - someone invalidate the req and call ptlrpc_client_wake_req(),
798          *   e.g. ptlrpc_abort_inflight();
799          */
800         if (!cli_ctx_is_refreshed(ctx)) {
801                 /* timed out or interruptted */
802                 req_off_ctx_list(req, ctx);
803
804                 LASSERT(rc != 0);
805                 RETURN(rc);
806         }
807
808         goto again;
809 }
810
811 /**
812  * Initialize flavor settings for \a req, according to \a opcode.
813  *
814  * \note this could be called in two situations:
815  * - new request from ptlrpc_pre_req(), with proper @opcode
816  * - old request which changed ctx in the middle, with @opcode == 0
817  */
818 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
819 {
820         struct ptlrpc_sec *sec;
821
822         LASSERT(req->rq_import);
823         LASSERT(req->rq_cli_ctx);
824         LASSERT(req->rq_cli_ctx->cc_sec);
825         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
826
827         /* special security flags accoding to opcode */
828         switch (opcode) {
829         case OST_READ:
830         case MDS_READPAGE:
831         case MGS_CONFIG_READ:
832         case OBD_IDX_READ:
833                 req->rq_bulk_read = 1;
834                 break;
835         case OST_WRITE:
836         case MDS_WRITEPAGE:
837                 req->rq_bulk_write = 1;
838                 break;
839         case SEC_CTX_INIT:
840                 req->rq_ctx_init = 1;
841                 break;
842         case SEC_CTX_FINI:
843                 req->rq_ctx_fini = 1;
844                 break;
845         case 0:
846                 /* init/fini rpc won't be resend, so can't be here */
847                 LASSERT(req->rq_ctx_init == 0);
848                 LASSERT(req->rq_ctx_fini == 0);
849
850                 /* cleanup flags, which should be recalculated */
851                 req->rq_pack_udesc = 0;
852                 req->rq_pack_bulk = 0;
853                 break;
854         }
855
856         sec = req->rq_cli_ctx->cc_sec;
857
858         spin_lock(&sec->ps_lock);
859         req->rq_flvr = sec->ps_flvr;
860         spin_unlock(&sec->ps_lock);
861
862         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
863          * destruction rpc */
864         if (unlikely(req->rq_ctx_init))
865                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
866         else if (unlikely(req->rq_ctx_fini))
867                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
868
869         /* user descriptor flag, null security can't do it anyway */
870         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
871             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
872                 req->rq_pack_udesc = 1;
873
874         /* bulk security flag */
875         if ((req->rq_bulk_read || req->rq_bulk_write) &&
876             sptlrpc_flavor_has_bulk(&req->rq_flvr))
877                 req->rq_pack_bulk = 1;
878 }
879
880 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
881 {
882         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
883                 return;
884
885         LASSERT(req->rq_clrbuf);
886         if (req->rq_pool || !req->rq_reqbuf)
887                 return;
888
889         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
890         req->rq_reqbuf = NULL;
891         req->rq_reqbuf_len = 0;
892 }
893
894 /**
895  * Given an import \a imp, check whether current user has a valid context
896  * or not. We may create a new context and try to refresh it, and try
897  * repeatedly try in case of non-fatal errors. Return 0 means success.
898  */
899 int sptlrpc_import_check_ctx(struct obd_import *imp)
900 {
901         struct ptlrpc_sec     *sec;
902         struct ptlrpc_cli_ctx *ctx;
903         struct ptlrpc_request *req = NULL;
904         int rc;
905         ENTRY;
906
907         might_sleep();
908
909         sec = sptlrpc_import_sec_ref(imp);
910         ctx = get_my_ctx(sec);
911         sptlrpc_sec_put(sec);
912
913         if (!ctx)
914                 RETURN(-ENOMEM);
915
916         if (cli_ctx_is_eternal(ctx) ||
917             ctx->cc_ops->validate(ctx) == 0) {
918                 sptlrpc_cli_ctx_put(ctx, 1);
919                 RETURN(0);
920         }
921
922         if (cli_ctx_is_error(ctx)) {
923                 sptlrpc_cli_ctx_put(ctx, 1);
924                 RETURN(-EACCES);
925         }
926
927         req = ptlrpc_request_cache_alloc(GFP_NOFS);
928         if (!req)
929                 RETURN(-ENOMEM);
930
931         spin_lock_init(&req->rq_lock);
932         atomic_set(&req->rq_refcount, 10000);
933         INIT_LIST_HEAD(&req->rq_ctx_chain);
934         init_waitqueue_head(&req->rq_reply_waitq);
935         init_waitqueue_head(&req->rq_set_waitq);
936         req->rq_import = imp;
937         req->rq_flvr = sec->ps_flvr;
938         req->rq_cli_ctx = ctx;
939
940         rc = sptlrpc_req_refresh_ctx(req, 0);
941         LASSERT(list_empty(&req->rq_ctx_chain));
942         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
943         ptlrpc_request_cache_free(req);
944
945         RETURN(rc);
946 }
947
948 /**
949  * Used by ptlrpc client, to perform the pre-defined security transformation
950  * upon the request message of \a req. After this function called,
951  * req->rq_reqmsg is still accessible as clear text.
952  */
953 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
954 {
955         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
956         int rc = 0;
957         ENTRY;
958
959         LASSERT(ctx);
960         LASSERT(ctx->cc_sec);
961         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
962
963         /* we wrap bulk request here because now we can be sure
964          * the context is uptodate.
965          */
966         if (req->rq_bulk) {
967                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
968                 if (rc)
969                         RETURN(rc);
970         }
971
972         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
973         case SPTLRPC_SVC_NULL:
974         case SPTLRPC_SVC_AUTH:
975         case SPTLRPC_SVC_INTG:
976                 LASSERT(ctx->cc_ops->sign);
977                 rc = ctx->cc_ops->sign(ctx, req);
978                 break;
979         case SPTLRPC_SVC_PRIV:
980                 LASSERT(ctx->cc_ops->seal);
981                 rc = ctx->cc_ops->seal(ctx, req);
982                 break;
983         default:
984                 LBUG();
985         }
986
987         if (rc == 0) {
988                 LASSERT(req->rq_reqdata_len);
989                 LASSERT(req->rq_reqdata_len % 8 == 0);
990                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
991         }
992
993         RETURN(rc);
994 }
995
996 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
997 {
998         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
999         int                    rc;
1000         ENTRY;
1001
1002         LASSERT(ctx);
1003         LASSERT(ctx->cc_sec);
1004         LASSERT(req->rq_repbuf);
1005         LASSERT(req->rq_repdata);
1006         LASSERT(req->rq_repmsg == NULL);
1007
1008         req->rq_rep_swab_mask = 0;
1009
1010         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1011         switch (rc) {
1012         case 1:
1013                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1014         case 0:
1015                 break;
1016         default:
1017                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1018                 RETURN(-EPROTO);
1019         }
1020
1021         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1022                 CERROR("replied data length %d too small\n",
1023                        req->rq_repdata_len);
1024                 RETURN(-EPROTO);
1025         }
1026
1027         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1028             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1029                 CERROR("reply policy %u doesn't match request policy %u\n",
1030                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1031                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1032                 RETURN(-EPROTO);
1033         }
1034
1035         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1036         case SPTLRPC_SVC_NULL:
1037         case SPTLRPC_SVC_AUTH:
1038         case SPTLRPC_SVC_INTG:
1039                 LASSERT(ctx->cc_ops->verify);
1040                 rc = ctx->cc_ops->verify(ctx, req);
1041                 break;
1042         case SPTLRPC_SVC_PRIV:
1043                 LASSERT(ctx->cc_ops->unseal);
1044                 rc = ctx->cc_ops->unseal(ctx, req);
1045                 break;
1046         default:
1047                 LBUG();
1048         }
1049         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1050
1051         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1052             !req->rq_ctx_init)
1053                 req->rq_rep_swab_mask = 0;
1054         RETURN(rc);
1055 }
1056
1057 /**
1058  * Used by ptlrpc client, to perform security transformation upon the reply
1059  * message of \a req. After return successfully, req->rq_repmsg points to
1060  * the reply message in clear text.
1061  *
1062  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1063  * going to change.
1064  */
1065 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1066 {
1067         LASSERT(req->rq_repbuf);
1068         LASSERT(req->rq_repdata == NULL);
1069         LASSERT(req->rq_repmsg == NULL);
1070         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1071
1072         if (req->rq_reply_off == 0 &&
1073             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1074                 CERROR("real reply with offset 0\n");
1075                 return -EPROTO;
1076         }
1077
1078         if (req->rq_reply_off % 8 != 0) {
1079                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1080                 return -EPROTO;
1081         }
1082
1083         req->rq_repdata = (struct lustre_msg *)
1084                                 (req->rq_repbuf + req->rq_reply_off);
1085         req->rq_repdata_len = req->rq_nob_received;
1086
1087         return do_cli_unwrap_reply(req);
1088 }
1089
1090 /**
1091  * Used by ptlrpc client, to perform security transformation upon the early
1092  * reply message of \a req. We expect the rq_reply_off is 0, and
1093  * rq_nob_received is the early reply size.
1094  * 
1095  * Because the receive buffer might be still posted, the reply data might be
1096  * changed at any time, no matter we're holding rq_lock or not. For this reason
1097  * we allocate a separate ptlrpc_request and reply buffer for early reply
1098  * processing.
1099  *
1100  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1101  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1102  * \a *req_ret to release it.
1103  * \retval -ev error number, and \a req_ret will not be set.
1104  */
1105 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1106                                    struct ptlrpc_request **req_ret)
1107 {
1108         struct ptlrpc_request  *early_req;
1109         char                   *early_buf;
1110         int                     early_bufsz, early_size;
1111         int                     rc;
1112         ENTRY;
1113
1114         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1115         if (early_req == NULL)
1116                 RETURN(-ENOMEM);
1117
1118         early_size = req->rq_nob_received;
1119         early_bufsz = size_roundup_power2(early_size);
1120         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1121         if (early_buf == NULL)
1122                 GOTO(err_req, rc = -ENOMEM);
1123
1124         /* sanity checkings and copy data out, do it inside spinlock */
1125         spin_lock(&req->rq_lock);
1126
1127         if (req->rq_replied) {
1128                 spin_unlock(&req->rq_lock);
1129                 GOTO(err_buf, rc = -EALREADY);
1130         }
1131
1132         LASSERT(req->rq_repbuf);
1133         LASSERT(req->rq_repdata == NULL);
1134         LASSERT(req->rq_repmsg == NULL);
1135
1136         if (req->rq_reply_off != 0) {
1137                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1138                 spin_unlock(&req->rq_lock);
1139                 GOTO(err_buf, rc = -EPROTO);
1140         }
1141
1142         if (req->rq_nob_received != early_size) {
1143                 /* even another early arrived the size should be the same */
1144                 CERROR("data size has changed from %u to %u\n",
1145                        early_size, req->rq_nob_received);
1146                 spin_unlock(&req->rq_lock);
1147                 GOTO(err_buf, rc = -EINVAL);
1148         }
1149
1150         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1151                 CERROR("early reply length %d too small\n",
1152                        req->rq_nob_received);
1153                 spin_unlock(&req->rq_lock);
1154                 GOTO(err_buf, rc = -EALREADY);
1155         }
1156
1157         memcpy(early_buf, req->rq_repbuf, early_size);
1158         spin_unlock(&req->rq_lock);
1159
1160         spin_lock_init(&early_req->rq_lock);
1161         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1162         early_req->rq_flvr = req->rq_flvr;
1163         early_req->rq_repbuf = early_buf;
1164         early_req->rq_repbuf_len = early_bufsz;
1165         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1166         early_req->rq_repdata_len = early_size;
1167         early_req->rq_early = 1;
1168         early_req->rq_reqmsg = req->rq_reqmsg;
1169
1170         rc = do_cli_unwrap_reply(early_req);
1171         if (rc) {
1172                 DEBUG_REQ(D_ADAPTTO, early_req,
1173                           "error %d unwrap early reply", rc);
1174                 GOTO(err_ctx, rc);
1175         }
1176
1177         LASSERT(early_req->rq_repmsg);
1178         *req_ret = early_req;
1179         RETURN(0);
1180
1181 err_ctx:
1182         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1183 err_buf:
1184         OBD_FREE_LARGE(early_buf, early_bufsz);
1185 err_req:
1186         ptlrpc_request_cache_free(early_req);
1187         RETURN(rc);
1188 }
1189
1190 /**
1191  * Used by ptlrpc client, to release a processed early reply \a early_req.
1192  *
1193  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1194  */
1195 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1196 {
1197         LASSERT(early_req->rq_repbuf);
1198         LASSERT(early_req->rq_repdata);
1199         LASSERT(early_req->rq_repmsg);
1200
1201         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1202         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1203         ptlrpc_request_cache_free(early_req);
1204 }
1205
1206 /**************************************************
1207  * sec ID                                         *
1208  **************************************************/
1209
1210 /*
1211  * "fixed" sec (e.g. null) use sec_id < 0
1212  */
1213 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1214
1215 int sptlrpc_get_next_secid(void)
1216 {
1217         return atomic_inc_return(&sptlrpc_sec_id);
1218 }
1219 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1220
1221 /**************************************************
1222  * client side high-level security APIs           *
1223  **************************************************/
1224
1225 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1226                                    int grace, int force)
1227 {
1228         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1229
1230         LASSERT(policy->sp_cops);
1231         LASSERT(policy->sp_cops->flush_ctx_cache);
1232
1233         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1234 }
1235
1236 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1237 {
1238         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1239
1240         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1241         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1242         LASSERT(policy->sp_cops->destroy_sec);
1243
1244         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1245
1246         policy->sp_cops->destroy_sec(sec);
1247         sptlrpc_policy_put(policy);
1248 }
1249
1250 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1251 {
1252         sec_cop_destroy_sec(sec);
1253 }
1254 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1255
1256 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1257 {
1258         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1259
1260         if (sec->ps_policy->sp_cops->kill_sec) {
1261                 sec->ps_policy->sp_cops->kill_sec(sec);
1262
1263                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1264         }
1265 }
1266
1267 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1268 {
1269         if (sec)
1270                 atomic_inc(&sec->ps_refcount);
1271
1272         return sec;
1273 }
1274 EXPORT_SYMBOL(sptlrpc_sec_get);
1275
1276 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1277 {
1278         if (sec) {
1279                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1280
1281                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1282                         sptlrpc_gc_del_sec(sec);
1283                         sec_cop_destroy_sec(sec);
1284                 }
1285         }
1286 }
1287 EXPORT_SYMBOL(sptlrpc_sec_put);
1288
1289 /*
1290  * policy module is responsible for taking refrence of import
1291  */
1292 static
1293 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1294                                        struct ptlrpc_svc_ctx *svc_ctx,
1295                                        struct sptlrpc_flavor *sf,
1296                                        enum lustre_sec_part sp)
1297 {
1298         struct ptlrpc_sec_policy *policy;
1299         struct ptlrpc_sec        *sec;
1300         char                      str[32];
1301         ENTRY;
1302
1303         if (svc_ctx) {
1304                 LASSERT(imp->imp_dlm_fake == 1);
1305
1306                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1307                        imp->imp_obd->obd_type->typ_name,
1308                        imp->imp_obd->obd_name,
1309                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1310
1311                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1312                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1313         } else {
1314                 LASSERT(imp->imp_dlm_fake == 0);
1315
1316                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1317                        imp->imp_obd->obd_type->typ_name,
1318                        imp->imp_obd->obd_name,
1319                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1320
1321                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1322                 if (!policy) {
1323                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1324                         RETURN(NULL);
1325                 }
1326         }
1327
1328         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1329         if (sec) {
1330                 atomic_inc(&sec->ps_refcount);
1331
1332                 sec->ps_part = sp;
1333
1334                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1335                         sptlrpc_gc_add_sec(sec);
1336         } else {
1337                 sptlrpc_policy_put(policy);
1338         }
1339
1340         RETURN(sec);
1341 }
1342
1343 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1344 {
1345         struct ptlrpc_sec *sec;
1346
1347         spin_lock(&imp->imp_lock);
1348         sec = sptlrpc_sec_get(imp->imp_sec);
1349         spin_unlock(&imp->imp_lock);
1350
1351         return sec;
1352 }
1353 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1354
1355 static void sptlrpc_import_sec_install(struct obd_import *imp,
1356                                        struct ptlrpc_sec *sec)
1357 {
1358         struct ptlrpc_sec *old_sec;
1359
1360         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1361
1362         spin_lock(&imp->imp_lock);
1363         old_sec = imp->imp_sec;
1364         imp->imp_sec = sec;
1365         spin_unlock(&imp->imp_lock);
1366
1367         if (old_sec) {
1368                 sptlrpc_sec_kill(old_sec);
1369
1370                 /* balance the ref taken by this import */
1371                 sptlrpc_sec_put(old_sec);
1372         }
1373 }
1374
1375 static inline
1376 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1377 {
1378         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1379 }
1380
1381 static inline
1382 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1383 {
1384         *dst = *src;
1385 }
1386
1387 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1388                                              struct ptlrpc_sec *sec,
1389                                              struct sptlrpc_flavor *sf)
1390 {
1391         char    str1[32], str2[32];
1392
1393         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1394                 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1395                        sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1396                                             str1, sizeof(str1)),
1397                        sptlrpc_secflags2str(sf->sf_flags,
1398                                             str2, sizeof(str2)));
1399
1400         spin_lock(&sec->ps_lock);
1401         flavor_copy(&sec->ps_flvr, sf);
1402         spin_unlock(&sec->ps_lock);
1403 }
1404
1405 /**
1406  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1407  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1408  *
1409  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1410  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1411  */
1412 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1413                              struct ptlrpc_svc_ctx *svc_ctx,
1414                              struct sptlrpc_flavor *flvr)
1415 {
1416         struct ptlrpc_connection   *conn;
1417         struct sptlrpc_flavor       sf;
1418         struct ptlrpc_sec          *sec, *newsec;
1419         enum lustre_sec_part        sp;
1420         char                        str[24];
1421         int                         rc = 0;
1422         ENTRY;
1423
1424         might_sleep();
1425
1426         if (imp == NULL)
1427                 RETURN(0);
1428
1429         conn = imp->imp_connection;
1430
1431         if (svc_ctx == NULL) {
1432                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1433                 /*
1434                  * normal import, determine flavor from rule set, except
1435                  * for mgc the flavor is predetermined.
1436                  */
1437                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1438                         sf = cliobd->cl_flvr_mgc;
1439                 else 
1440                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1441                                                    cliobd->cl_sp_to,
1442                                                    &cliobd->cl_target_uuid,
1443                                                    conn->c_self, &sf);
1444
1445                 sp = imp->imp_obd->u.cli.cl_sp_me;
1446         } else {
1447                 /* reverse import, determine flavor from incoming reqeust */
1448                 sf = *flvr;
1449
1450                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1451                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1452                                       PTLRPC_SEC_FL_ROOTONLY;
1453
1454                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1455         }
1456
1457         sec = sptlrpc_import_sec_ref(imp);
1458         if (sec) {
1459                 char    str2[24];
1460
1461                 if (flavor_equal(&sf, &sec->ps_flvr))
1462                         GOTO(out, rc);
1463
1464                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1465                        imp->imp_obd->obd_name,
1466                        obd_uuid2str(&conn->c_remote_uuid),
1467                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1468                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1469
1470                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1471                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1472                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1473                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1474                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1475                         GOTO(out, rc);
1476                 }
1477         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1478                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1479                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1480                        imp->imp_obd->obd_name,
1481                        obd_uuid2str(&conn->c_remote_uuid),
1482                        LNET_NIDNET(conn->c_self),
1483                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1484         }
1485
1486         mutex_lock(&imp->imp_sec_mutex);
1487
1488         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1489         if (newsec) {
1490                 sptlrpc_import_sec_install(imp, newsec);
1491         } else {
1492                 CERROR("import %s->%s: failed to create new sec\n",
1493                        imp->imp_obd->obd_name,
1494                        obd_uuid2str(&conn->c_remote_uuid));
1495                 rc = -EPERM;
1496         }
1497
1498         mutex_unlock(&imp->imp_sec_mutex);
1499 out:
1500         sptlrpc_sec_put(sec);
1501         RETURN(rc);
1502 }
1503
1504 void sptlrpc_import_sec_put(struct obd_import *imp)
1505 {
1506         if (imp->imp_sec) {
1507                 sptlrpc_sec_kill(imp->imp_sec);
1508
1509                 sptlrpc_sec_put(imp->imp_sec);
1510                 imp->imp_sec = NULL;
1511         }
1512 }
1513
1514 static void import_flush_ctx_common(struct obd_import *imp,
1515                                     uid_t uid, int grace, int force)
1516 {
1517         struct ptlrpc_sec *sec;
1518
1519         if (imp == NULL)
1520                 return;
1521
1522         sec = sptlrpc_import_sec_ref(imp);
1523         if (sec == NULL)
1524                 return;
1525
1526         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1527         sptlrpc_sec_put(sec);
1528 }
1529
1530 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1531 {
1532         /* it's important to use grace mode, see explain in
1533          * sptlrpc_req_refresh_ctx() */
1534         import_flush_ctx_common(imp, 0, 1, 1);
1535 }
1536
1537 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1538 {
1539         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1540                                 1, 1);
1541 }
1542 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1543
1544 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1545 {
1546         import_flush_ctx_common(imp, -1, 1, 1);
1547 }
1548 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1549
1550 /**
1551  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1552  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1553  */
1554 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1555 {
1556         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1557         struct ptlrpc_sec_policy *policy;
1558         int rc;
1559
1560         LASSERT(ctx);
1561         LASSERT(ctx->cc_sec);
1562         LASSERT(ctx->cc_sec->ps_policy);
1563         LASSERT(req->rq_reqmsg == NULL);
1564         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1565
1566         policy = ctx->cc_sec->ps_policy;
1567         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1568         if (!rc) {
1569                 LASSERT(req->rq_reqmsg);
1570                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1571
1572                 /* zeroing preallocated buffer */
1573                 if (req->rq_pool)
1574                         memset(req->rq_reqmsg, 0, msgsize);
1575         }
1576
1577         return rc;
1578 }
1579
1580 /**
1581  * Used by ptlrpc client to free request buffer of \a req. After this
1582  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1583  */
1584 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1585 {
1586         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1587         struct ptlrpc_sec_policy *policy;
1588
1589         LASSERT(ctx);
1590         LASSERT(ctx->cc_sec);
1591         LASSERT(ctx->cc_sec->ps_policy);
1592         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1593
1594         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1595                 return;
1596
1597         policy = ctx->cc_sec->ps_policy;
1598         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1599         req->rq_reqmsg = NULL;
1600 }
1601
1602 /*
1603  * NOTE caller must guarantee the buffer size is enough for the enlargement
1604  */
1605 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1606                                   int segment, int newsize)
1607 {
1608         void   *src, *dst;
1609         int     oldsize, oldmsg_size, movesize;
1610
1611         LASSERT(segment < msg->lm_bufcount);
1612         LASSERT(msg->lm_buflens[segment] <= newsize);
1613
1614         if (msg->lm_buflens[segment] == newsize)
1615                 return;
1616
1617         /* nothing to do if we are enlarging the last segment */
1618         if (segment == msg->lm_bufcount - 1) {
1619                 msg->lm_buflens[segment] = newsize;
1620                 return;
1621         }
1622
1623         oldsize = msg->lm_buflens[segment];
1624
1625         src = lustre_msg_buf(msg, segment + 1, 0);
1626         msg->lm_buflens[segment] = newsize;
1627         dst = lustre_msg_buf(msg, segment + 1, 0);
1628         msg->lm_buflens[segment] = oldsize;
1629
1630         /* move from segment + 1 to end segment */
1631         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1632         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1633         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1634         LASSERT(movesize >= 0);
1635
1636         if (movesize)
1637                 memmove(dst, src, movesize);
1638
1639         /* note we don't clear the ares where old data live, not secret */
1640
1641         /* finally set new segment size */
1642         msg->lm_buflens[segment] = newsize;
1643 }
1644 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1645
1646 /**
1647  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1648  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1649  * preserved after the enlargement. this must be called after original request
1650  * buffer being allocated.
1651  *
1652  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1653  * so caller should refresh its local pointers if needed.
1654  */
1655 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1656                                int segment, int newsize)
1657 {
1658         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1659         struct ptlrpc_sec_cops   *cops;
1660         struct lustre_msg        *msg = req->rq_reqmsg;
1661
1662         LASSERT(ctx);
1663         LASSERT(msg);
1664         LASSERT(msg->lm_bufcount > segment);
1665         LASSERT(msg->lm_buflens[segment] <= newsize);
1666
1667         if (msg->lm_buflens[segment] == newsize)
1668                 return 0;
1669
1670         cops = ctx->cc_sec->ps_policy->sp_cops;
1671         LASSERT(cops->enlarge_reqbuf);
1672         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1673 }
1674 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1675
1676 /**
1677  * Used by ptlrpc client to allocate reply buffer of \a req.
1678  *
1679  * \note After this, req->rq_repmsg is still not accessible.
1680  */
1681 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1682 {
1683         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1684         struct ptlrpc_sec_policy *policy;
1685         ENTRY;
1686
1687         LASSERT(ctx);
1688         LASSERT(ctx->cc_sec);
1689         LASSERT(ctx->cc_sec->ps_policy);
1690
1691         if (req->rq_repbuf)
1692                 RETURN(0);
1693
1694         policy = ctx->cc_sec->ps_policy;
1695         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1696 }
1697
1698 /**
1699  * Used by ptlrpc client to free reply buffer of \a req. After this
1700  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1701  */
1702 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1703 {
1704         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1705         struct ptlrpc_sec_policy *policy;
1706         ENTRY;
1707
1708         LASSERT(ctx);
1709         LASSERT(ctx->cc_sec);
1710         LASSERT(ctx->cc_sec->ps_policy);
1711         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1712
1713         if (req->rq_repbuf == NULL)
1714                 return;
1715         LASSERT(req->rq_repbuf_len);
1716
1717         policy = ctx->cc_sec->ps_policy;
1718         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1719         req->rq_repmsg = NULL;
1720         EXIT;
1721 }
1722
1723 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1724                                 struct ptlrpc_cli_ctx *ctx)
1725 {
1726         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1727
1728         if (!policy->sp_cops->install_rctx)
1729                 return 0;
1730         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1731 }
1732
1733 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1734                                 struct ptlrpc_svc_ctx *ctx)
1735 {
1736         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1737
1738         if (!policy->sp_sops->install_rctx)
1739                 return 0;
1740         return policy->sp_sops->install_rctx(imp, ctx);
1741 }
1742
1743 /****************************************
1744  * server side security                 *
1745  ****************************************/
1746
1747 static int flavor_allowed(struct sptlrpc_flavor *exp,
1748                           struct ptlrpc_request *req)
1749 {
1750         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1751
1752         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1753                 return 1;
1754
1755         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1756             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1757             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1758             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1759                 return 1;
1760
1761         return 0;
1762 }
1763
1764 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1765
1766 /**
1767  * Given an export \a exp, check whether the flavor of incoming \a req
1768  * is allowed by the export \a exp. Main logic is about taking care of
1769  * changing configurations. Return 0 means success.
1770  */
1771 int sptlrpc_target_export_check(struct obd_export *exp,
1772                                 struct ptlrpc_request *req)
1773 {
1774         struct sptlrpc_flavor   flavor;
1775
1776         if (exp == NULL)
1777                 return 0;
1778
1779         /* client side export has no imp_reverse, skip
1780          * FIXME maybe we should check flavor this as well??? */
1781         if (exp->exp_imp_reverse == NULL)
1782                 return 0;
1783
1784         /* don't care about ctx fini rpc */
1785         if (req->rq_ctx_fini)
1786                 return 0;
1787
1788         spin_lock(&exp->exp_lock);
1789
1790         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1791          * the first req with the new flavor, then treat it as current flavor,
1792          * adapt reverse sec according to it.
1793          * note the first rpc with new flavor might not be with root ctx, in
1794          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1795         if (unlikely(exp->exp_flvr_changed) &&
1796             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1797                 /* make the new flavor as "current", and old ones as
1798                  * about-to-expire */
1799                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1800                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1801                 flavor = exp->exp_flvr_old[1];
1802                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1803                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1804                 exp->exp_flvr_old[0] = exp->exp_flvr;
1805                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1806                                           EXP_FLVR_UPDATE_EXPIRE;
1807                 exp->exp_flvr = flavor;
1808
1809                 /* flavor change finished */
1810                 exp->exp_flvr_changed = 0;
1811                 LASSERT(exp->exp_flvr_adapt == 1);
1812
1813                 /* if it's gss, we only interested in root ctx init */
1814                 if (req->rq_auth_gss &&
1815                     !(req->rq_ctx_init &&
1816                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1817                        req->rq_auth_usr_ost))) {
1818                         spin_unlock(&exp->exp_lock);
1819                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1820                                req->rq_auth_gss, req->rq_ctx_init,
1821                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1822                                req->rq_auth_usr_ost);
1823                         return 0;
1824                 }
1825
1826                 exp->exp_flvr_adapt = 0;
1827                 spin_unlock(&exp->exp_lock);
1828
1829                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1830                                                 req->rq_svc_ctx, &flavor);
1831         }
1832
1833         /* if it equals to the current flavor, we accept it, but need to
1834          * dealing with reverse sec/ctx */
1835         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1836                 /* most cases should return here, we only interested in
1837                  * gss root ctx init */
1838                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1839                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1840                      !req->rq_auth_usr_ost)) {
1841                         spin_unlock(&exp->exp_lock);
1842                         return 0;
1843                 }
1844
1845                 /* if flavor just changed, we should not proceed, just leave
1846                  * it and current flavor will be discovered and replaced
1847                  * shortly, and let _this_ rpc pass through */
1848                 if (exp->exp_flvr_changed) {
1849                         LASSERT(exp->exp_flvr_adapt);
1850                         spin_unlock(&exp->exp_lock);
1851                         return 0;
1852                 }
1853
1854                 if (exp->exp_flvr_adapt) {
1855                         exp->exp_flvr_adapt = 0;
1856                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1857                                exp, exp->exp_flvr.sf_rpc,
1858                                exp->exp_flvr_old[0].sf_rpc,
1859                                exp->exp_flvr_old[1].sf_rpc);
1860                         flavor = exp->exp_flvr;
1861                         spin_unlock(&exp->exp_lock);
1862
1863                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1864                                                         req->rq_svc_ctx,
1865                                                         &flavor);
1866                 } else {
1867                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1868                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1869                                exp->exp_flvr_old[0].sf_rpc,
1870                                exp->exp_flvr_old[1].sf_rpc);
1871                         spin_unlock(&exp->exp_lock);
1872
1873                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1874                                                            req->rq_svc_ctx);
1875                 }
1876         }
1877
1878         if (exp->exp_flvr_expire[0]) {
1879                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1880                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1881                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1882                                        "middle one ("CFS_DURATION_T")\n", exp,
1883                                        exp->exp_flvr.sf_rpc,
1884                                        exp->exp_flvr_old[0].sf_rpc,
1885                                        exp->exp_flvr_old[1].sf_rpc,
1886                                        exp->exp_flvr_expire[0] -
1887                                                 cfs_time_current_sec());
1888                                 spin_unlock(&exp->exp_lock);
1889                                 return 0;
1890                         }
1891                 } else {
1892                         CDEBUG(D_SEC, "mark middle expired\n");
1893                         exp->exp_flvr_expire[0] = 0;
1894                 }
1895                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1896                        exp->exp_flvr.sf_rpc,
1897                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1898                        req->rq_flvr.sf_rpc);
1899         }
1900
1901         /* now it doesn't match the current flavor, the only chance we can
1902          * accept it is match the old flavors which is not expired. */
1903         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1904                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1905                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1906                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1907                                        "oldest one ("CFS_DURATION_T")\n", exp,
1908                                        exp->exp_flvr.sf_rpc,
1909                                        exp->exp_flvr_old[0].sf_rpc,
1910                                        exp->exp_flvr_old[1].sf_rpc,
1911                                        exp->exp_flvr_expire[1] -
1912                                                 cfs_time_current_sec());
1913                                 spin_unlock(&exp->exp_lock);
1914                                 return 0;
1915                         }
1916                 } else {
1917                         CDEBUG(D_SEC, "mark oldest expired\n");
1918                         exp->exp_flvr_expire[1] = 0;
1919                 }
1920                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1921                        exp, exp->exp_flvr.sf_rpc,
1922                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1923                        req->rq_flvr.sf_rpc);
1924         } else {
1925                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1926                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1927                        exp->exp_flvr_old[1].sf_rpc);
1928         }
1929
1930         spin_unlock(&exp->exp_lock);
1931
1932         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1933               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1934               exp, exp->exp_obd->obd_name,
1935               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1936               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1937               req->rq_flvr.sf_rpc,
1938               exp->exp_flvr.sf_rpc,
1939               exp->exp_flvr_old[0].sf_rpc,
1940               exp->exp_flvr_expire[0] ?
1941               (unsigned long) (exp->exp_flvr_expire[0] -
1942                                cfs_time_current_sec()) : 0,
1943               exp->exp_flvr_old[1].sf_rpc,
1944               exp->exp_flvr_expire[1] ?
1945               (unsigned long) (exp->exp_flvr_expire[1] -
1946                                cfs_time_current_sec()) : 0);
1947         return -EACCES;
1948 }
1949 EXPORT_SYMBOL(sptlrpc_target_export_check);
1950
1951 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1952                                       struct sptlrpc_rule_set *rset)
1953 {
1954         struct obd_export       *exp;
1955         struct sptlrpc_flavor    new_flvr;
1956
1957         LASSERT(obd);
1958
1959         spin_lock(&obd->obd_dev_lock);
1960
1961         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1962                 if (exp->exp_connection == NULL)
1963                         continue;
1964
1965                 /* note if this export had just been updated flavor
1966                  * (exp_flvr_changed == 1), this will override the
1967                  * previous one. */
1968                 spin_lock(&exp->exp_lock);
1969                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1970                                              exp->exp_connection->c_peer.nid,
1971                                              &new_flvr);
1972                 if (exp->exp_flvr_changed ||
1973                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1974                         exp->exp_flvr_old[1] = new_flvr;
1975                         exp->exp_flvr_expire[1] = 0;
1976                         exp->exp_flvr_changed = 1;
1977                         exp->exp_flvr_adapt = 1;
1978
1979                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1980                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1981                                exp->exp_flvr.sf_rpc,
1982                                exp->exp_flvr_old[1].sf_rpc);
1983                 }
1984                 spin_unlock(&exp->exp_lock);
1985         }
1986
1987         spin_unlock(&obd->obd_dev_lock);
1988 }
1989 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1990
1991 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1992 {
1993         /* peer's claim is unreliable unless gss is being used */
1994         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1995                 return svc_rc;
1996
1997         switch (req->rq_sp_from) {
1998         case LUSTRE_SP_CLI:
1999                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2000                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2001                         svc_rc = SECSVC_DROP;
2002                 }
2003                 break;
2004         case LUSTRE_SP_MDT:
2005                 if (!req->rq_auth_usr_mdt) {
2006                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2007                         svc_rc = SECSVC_DROP;
2008                 }
2009                 break;
2010         case LUSTRE_SP_OST:
2011                 if (!req->rq_auth_usr_ost) {
2012                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2013                         svc_rc = SECSVC_DROP;
2014                 }
2015                 break;
2016         case LUSTRE_SP_MGS:
2017         case LUSTRE_SP_MGC:
2018                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2019                     !req->rq_auth_usr_ost) {
2020                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2021                         svc_rc = SECSVC_DROP;
2022                 }
2023                 break;
2024         case LUSTRE_SP_ANY:
2025         default:
2026                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2027                 svc_rc = SECSVC_DROP;
2028         }
2029
2030         return svc_rc;
2031 }
2032
2033 /**
2034  * Used by ptlrpc server, to perform transformation upon request message of
2035  * incoming \a req. This must be the first thing to do with a incoming
2036  * request in ptlrpc layer.
2037  *
2038  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2039  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2040  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2041  * reply message has been prepared.
2042  * \retval SECSVC_DROP failed, this request should be dropped.
2043  */
2044 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2045 {
2046         struct ptlrpc_sec_policy *policy;
2047         struct lustre_msg        *msg = req->rq_reqbuf;
2048         int                       rc;
2049         ENTRY;
2050
2051         LASSERT(msg);
2052         LASSERT(req->rq_reqmsg == NULL);
2053         LASSERT(req->rq_repmsg == NULL);
2054         LASSERT(req->rq_svc_ctx == NULL);
2055
2056         req->rq_req_swab_mask = 0;
2057
2058         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2059         switch (rc) {
2060         case 1:
2061                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2062         case 0:
2063                 break;
2064         default:
2065                 CERROR("error unpacking request from %s x"LPU64"\n",
2066                        libcfs_id2str(req->rq_peer), req->rq_xid);
2067                 RETURN(SECSVC_DROP);
2068         }
2069
2070         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2071         req->rq_sp_from = LUSTRE_SP_ANY;
2072         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2073         req->rq_auth_mapped_uid = -1;
2074
2075         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2076         if (!policy) {
2077                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2078                 RETURN(SECSVC_DROP);
2079         }
2080
2081         LASSERT(policy->sp_sops->accept);
2082         rc = policy->sp_sops->accept(req);
2083         sptlrpc_policy_put(policy);
2084         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2085         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2086
2087         /*
2088          * if it's not null flavor (which means embedded packing msg),
2089          * reset the swab mask for the comming inner msg unpacking.
2090          */
2091         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2092                 req->rq_req_swab_mask = 0;
2093
2094         /* sanity check for the request source */
2095         rc = sptlrpc_svc_check_from(req, rc);
2096         RETURN(rc);
2097 }
2098
2099 /**
2100  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2101  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2102  * a buffer of \a msglen size.
2103  */
2104 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2105 {
2106         struct ptlrpc_sec_policy *policy;
2107         struct ptlrpc_reply_state *rs;
2108         int rc;
2109         ENTRY;
2110
2111         LASSERT(req->rq_svc_ctx);
2112         LASSERT(req->rq_svc_ctx->sc_policy);
2113
2114         policy = req->rq_svc_ctx->sc_policy;
2115         LASSERT(policy->sp_sops->alloc_rs);
2116
2117         rc = policy->sp_sops->alloc_rs(req, msglen);
2118         if (unlikely(rc == -ENOMEM)) {
2119                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2120                 if (svcpt->scp_service->srv_max_reply_size <
2121                    msglen + sizeof(struct ptlrpc_reply_state)) {
2122                         /* Just return failure if the size is too big */
2123                         CERROR("size of message is too big (%zd), %d allowed\n",
2124                                 msglen + sizeof(struct ptlrpc_reply_state),
2125                                 svcpt->scp_service->srv_max_reply_size);
2126                         RETURN(-ENOMEM);
2127                 }
2128
2129                 /* failed alloc, try emergency pool */
2130                 rs = lustre_get_emerg_rs(svcpt);
2131                 if (rs == NULL)
2132                         RETURN(-ENOMEM);
2133
2134                 req->rq_reply_state = rs;
2135                 rc = policy->sp_sops->alloc_rs(req, msglen);
2136                 if (rc) {
2137                         lustre_put_emerg_rs(rs);
2138                         req->rq_reply_state = NULL;
2139                 }
2140         }
2141
2142         LASSERT(rc != 0 ||
2143                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2144
2145         RETURN(rc);
2146 }
2147
2148 /**
2149  * Used by ptlrpc server, to perform transformation upon reply message.
2150  *
2151  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2152  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2153  */
2154 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2155 {
2156         struct ptlrpc_sec_policy *policy;
2157         int rc;
2158         ENTRY;
2159
2160         LASSERT(req->rq_svc_ctx);
2161         LASSERT(req->rq_svc_ctx->sc_policy);
2162
2163         policy = req->rq_svc_ctx->sc_policy;
2164         LASSERT(policy->sp_sops->authorize);
2165
2166         rc = policy->sp_sops->authorize(req);
2167         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2168
2169         RETURN(rc);
2170 }
2171
2172 /**
2173  * Used by ptlrpc server, to free reply_state.
2174  */
2175 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2176 {
2177         struct ptlrpc_sec_policy *policy;
2178         unsigned int prealloc;
2179         ENTRY;
2180
2181         LASSERT(rs->rs_svc_ctx);
2182         LASSERT(rs->rs_svc_ctx->sc_policy);
2183
2184         policy = rs->rs_svc_ctx->sc_policy;
2185         LASSERT(policy->sp_sops->free_rs);
2186
2187         prealloc = rs->rs_prealloc;
2188         policy->sp_sops->free_rs(rs);
2189
2190         if (prealloc)
2191                 lustre_put_emerg_rs(rs);
2192         EXIT;
2193 }
2194
2195 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2196 {
2197         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2198
2199         if (ctx != NULL)
2200                 atomic_inc(&ctx->sc_refcount);
2201 }
2202
2203 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2204 {
2205         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2206
2207         if (ctx == NULL)
2208                 return;
2209
2210         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2211         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2212                 if (ctx->sc_policy->sp_sops->free_ctx)
2213                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2214         }
2215         req->rq_svc_ctx = NULL;
2216 }
2217
2218 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2219 {
2220         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2221
2222         if (ctx == NULL)
2223                 return;
2224
2225         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2226         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2227                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2228 }
2229 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2230
2231 /****************************************
2232  * bulk security                        *
2233  ****************************************/
2234
2235 /**
2236  * Perform transformation upon bulk data pointed by \a desc. This is called
2237  * before transforming the request message.
2238  */
2239 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2240                           struct ptlrpc_bulk_desc *desc)
2241 {
2242         struct ptlrpc_cli_ctx *ctx;
2243
2244         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2245
2246         if (!req->rq_pack_bulk)
2247                 return 0;
2248
2249         ctx = req->rq_cli_ctx;
2250         if (ctx->cc_ops->wrap_bulk)
2251                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2252         return 0;
2253 }
2254 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2255
2256 /**
2257  * This is called after unwrap the reply message.
2258  * return nob of actual plain text size received, or error code.
2259  */
2260 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2261                                  struct ptlrpc_bulk_desc *desc,
2262                                  int nob)
2263 {
2264         struct ptlrpc_cli_ctx  *ctx;
2265         int                     rc;
2266
2267         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2268
2269         if (!req->rq_pack_bulk)
2270                 return desc->bd_nob_transferred;
2271
2272         ctx = req->rq_cli_ctx;
2273         if (ctx->cc_ops->unwrap_bulk) {
2274                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2275                 if (rc < 0)
2276                         return rc;
2277         }
2278         return desc->bd_nob_transferred;
2279 }
2280 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2281
2282 /**
2283  * This is called after unwrap the reply message.
2284  * return 0 for success or error code.
2285  */
2286 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2287                                   struct ptlrpc_bulk_desc *desc)
2288 {
2289         struct ptlrpc_cli_ctx  *ctx;
2290         int                     rc;
2291
2292         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2293
2294         if (!req->rq_pack_bulk)
2295                 return 0;
2296
2297         ctx = req->rq_cli_ctx;
2298         if (ctx->cc_ops->unwrap_bulk) {
2299                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2300                 if (rc < 0)
2301                         return rc;
2302         }
2303
2304         /*
2305          * if everything is going right, nob should equals to nob_transferred.
2306          * in case of privacy mode, nob_transferred needs to be adjusted.
2307          */
2308         if (desc->bd_nob != desc->bd_nob_transferred) {
2309                 CERROR("nob %d doesn't match transferred nob %d\n",
2310                        desc->bd_nob, desc->bd_nob_transferred);
2311                 return -EPROTO;
2312         }
2313
2314         return 0;
2315 }
2316 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2317
2318 #ifdef HAVE_SERVER_SUPPORT
2319 /**
2320  * Performe transformation upon outgoing bulk read.
2321  */
2322 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2323                           struct ptlrpc_bulk_desc *desc)
2324 {
2325         struct ptlrpc_svc_ctx *ctx;
2326
2327         LASSERT(req->rq_bulk_read);
2328
2329         if (!req->rq_pack_bulk)
2330                 return 0;
2331
2332         ctx = req->rq_svc_ctx;
2333         if (ctx->sc_policy->sp_sops->wrap_bulk)
2334                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2335
2336         return 0;
2337 }
2338 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2339
2340 /**
2341  * Performe transformation upon incoming bulk write.
2342  */
2343 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2344                             struct ptlrpc_bulk_desc *desc)
2345 {
2346         struct ptlrpc_svc_ctx *ctx;
2347         int                    rc;
2348
2349         LASSERT(req->rq_bulk_write);
2350
2351         /*
2352          * if it's in privacy mode, transferred should >= expected; otherwise
2353          * transferred should == expected.
2354          */
2355         if (desc->bd_nob_transferred < desc->bd_nob ||
2356             (desc->bd_nob_transferred > desc->bd_nob &&
2357              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2358              SPTLRPC_BULK_SVC_PRIV)) {
2359                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2360                           desc->bd_nob_transferred, desc->bd_nob);
2361                 return -ETIMEDOUT;
2362         }
2363
2364         if (!req->rq_pack_bulk)
2365                 return 0;
2366
2367         ctx = req->rq_svc_ctx;
2368         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2369                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2370                 if (rc)
2371                         CERROR("error unwrap bulk: %d\n", rc);
2372         }
2373
2374         /* return 0 to allow reply be sent */
2375         return 0;
2376 }
2377 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2378
2379 /**
2380  * Prepare buffers for incoming bulk write.
2381  */
2382 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2383                           struct ptlrpc_bulk_desc *desc)
2384 {
2385         struct ptlrpc_svc_ctx *ctx;
2386
2387         LASSERT(req->rq_bulk_write);
2388
2389         if (!req->rq_pack_bulk)
2390                 return 0;
2391
2392         ctx = req->rq_svc_ctx;
2393         if (ctx->sc_policy->sp_sops->prep_bulk)
2394                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2395
2396         return 0;
2397 }
2398 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2399
2400 #endif /* HAVE_SERVER_SUPPORT */
2401
2402 /****************************************
2403  * user descriptor helpers              *
2404  ****************************************/
2405
2406 int sptlrpc_current_user_desc_size(void)
2407 {
2408         int ngroups;
2409
2410         ngroups = current_ngroups;
2411
2412         if (ngroups > LUSTRE_MAX_GROUPS)
2413                 ngroups = LUSTRE_MAX_GROUPS;
2414         return sptlrpc_user_desc_size(ngroups);
2415 }
2416 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2417
2418 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2419 {
2420         struct ptlrpc_user_desc *pud;
2421
2422         pud = lustre_msg_buf(msg, offset, 0);
2423
2424         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2425         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2426         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2427         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2428         pud->pud_cap = cfs_curproc_cap_pack();
2429         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2430
2431         task_lock(current);
2432         if (pud->pud_ngroups > current_ngroups)
2433                 pud->pud_ngroups = current_ngroups;
2434         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2435                pud->pud_ngroups * sizeof(__u32));
2436         task_unlock(current);
2437
2438         return 0;
2439 }
2440 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2441
2442 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2443 {
2444         struct ptlrpc_user_desc *pud;
2445         int                      i;
2446
2447         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2448         if (!pud)
2449                 return -EINVAL;
2450
2451         if (swabbed) {
2452                 __swab32s(&pud->pud_uid);
2453                 __swab32s(&pud->pud_gid);
2454                 __swab32s(&pud->pud_fsuid);
2455                 __swab32s(&pud->pud_fsgid);
2456                 __swab32s(&pud->pud_cap);
2457                 __swab32s(&pud->pud_ngroups);
2458         }
2459
2460         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2461                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2462                 return -EINVAL;
2463         }
2464
2465         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2466             msg->lm_buflens[offset]) {
2467                 CERROR("%u groups are claimed but bufsize only %u\n",
2468                        pud->pud_ngroups, msg->lm_buflens[offset]);
2469                 return -EINVAL;
2470         }
2471
2472         if (swabbed) {
2473                 for (i = 0; i < pud->pud_ngroups; i++)
2474                         __swab32s(&pud->pud_groups[i]);
2475         }
2476
2477         return 0;
2478 }
2479 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2480
2481 /****************************************
2482  * misc helpers                         *
2483  ****************************************/
2484
2485 const char * sec2target_str(struct ptlrpc_sec *sec)
2486 {
2487         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2488                 return "*";
2489         if (sec_is_reverse(sec))
2490                 return "c";
2491         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2492 }
2493 EXPORT_SYMBOL(sec2target_str);
2494
2495 /*
2496  * return true if the bulk data is protected
2497  */
2498 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2499 {
2500         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2501         case SPTLRPC_BULK_SVC_INTG:
2502         case SPTLRPC_BULK_SVC_PRIV:
2503                 return 1;
2504         default:
2505                 return 0;
2506         }
2507 }
2508 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2509
2510 /****************************************
2511  * crypto API helper/alloc blkciper     *
2512  ****************************************/
2513
2514 /****************************************
2515  * initialize/finalize                  *
2516  ****************************************/
2517
2518 int sptlrpc_init(void)
2519 {
2520         int rc;
2521
2522         rwlock_init(&policy_lock);
2523
2524         rc = sptlrpc_gc_init();
2525         if (rc)
2526                 goto out;
2527
2528         rc = sptlrpc_conf_init();
2529         if (rc)
2530                 goto out_gc;
2531
2532         rc = sptlrpc_enc_pool_init();
2533         if (rc)
2534                 goto out_conf;
2535
2536         rc = sptlrpc_null_init();
2537         if (rc)
2538                 goto out_pool;
2539
2540         rc = sptlrpc_plain_init();
2541         if (rc)
2542                 goto out_null;
2543
2544         rc = sptlrpc_lproc_init();
2545         if (rc)
2546                 goto out_plain;
2547
2548         return 0;
2549
2550 out_plain:
2551         sptlrpc_plain_fini();
2552 out_null:
2553         sptlrpc_null_fini();
2554 out_pool:
2555         sptlrpc_enc_pool_fini();
2556 out_conf:
2557         sptlrpc_conf_fini();
2558 out_gc:
2559         sptlrpc_gc_fini();
2560 out:
2561         return rc;
2562 }
2563
2564 void sptlrpc_fini(void)
2565 {
2566         sptlrpc_lproc_fini();
2567         sptlrpc_plain_fini();
2568         sptlrpc_null_fini();
2569         sptlrpc_enc_pool_fini();
2570         sptlrpc_conf_fini();
2571         sptlrpc_gc_fini();
2572 }