Whamcloud - gitweb
5c44f89cb8d622dd90cd035d47c5b8bec4966664
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #ifdef HAVE_UIDGID_HEADER
41 # include <linux/uidgid.h>
42 #endif
43 #include <linux/crypto.h>
44 #include <linux/key.h>
45
46 #include <libcfs/libcfs.h>
47 #include <obd.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_dlm.h>
53 #include <lustre_sec.h>
54
55 #include "ptlrpc_internal.h"
56
57 /***********************************************
58  * policy registers                            *
59  ***********************************************/
60
61 static rwlock_t policy_lock;
62 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
63         NULL,
64 };
65
66 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
67 {
68         __u16 number = policy->sp_policy;
69
70         LASSERT(policy->sp_name);
71         LASSERT(policy->sp_cops);
72         LASSERT(policy->sp_sops);
73
74         if (number >= SPTLRPC_POLICY_MAX)
75                 return -EINVAL;
76
77         write_lock(&policy_lock);
78         if (unlikely(policies[number])) {
79                 write_unlock(&policy_lock);
80                 return -EALREADY;
81         }
82         policies[number] = policy;
83         write_unlock(&policy_lock);
84
85         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
86         return 0;
87 }
88 EXPORT_SYMBOL(sptlrpc_register_policy);
89
90 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
91 {
92         __u16 number = policy->sp_policy;
93
94         LASSERT(number < SPTLRPC_POLICY_MAX);
95
96         write_lock(&policy_lock);
97         if (unlikely(policies[number] == NULL)) {
98                 write_unlock(&policy_lock);
99                 CERROR("%s: already unregistered\n", policy->sp_name);
100                 return -EINVAL;
101         }
102
103         LASSERT(policies[number] == policy);
104         policies[number] = NULL;
105         write_unlock(&policy_lock);
106
107         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
108         return 0;
109 }
110 EXPORT_SYMBOL(sptlrpc_unregister_policy);
111
112 static
113 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
114 {
115         static DEFINE_MUTEX(load_mutex);
116         static atomic_t           loaded = ATOMIC_INIT(0);
117         struct ptlrpc_sec_policy *policy;
118         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
119         __u16                     flag = 0;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 if (policy == NULL)
130                         flag = atomic_read(&loaded);
131                 read_unlock(&policy_lock);
132
133                 if (policy != NULL || flag != 0 ||
134                     number != SPTLRPC_POLICY_GSS)
135                         break;
136
137                 /* try to load gss module, once */
138                 mutex_lock(&load_mutex);
139                 if (atomic_read(&loaded) == 0) {
140                         if (request_module("ptlrpc_gss") == 0)
141                                 CDEBUG(D_SEC,
142                                        "module ptlrpc_gss loaded on demand\n");
143                         else
144                                 CERROR("Unable to load module ptlrpc_gss\n");
145
146                         atomic_set(&loaded, 1);
147                 }
148                 mutex_unlock(&load_mutex);
149         }
150
151         return policy;
152 }
153
154 __u32 sptlrpc_name2flavor_base(const char *name)
155 {
156         if (!strcmp(name, "null"))
157                 return SPTLRPC_FLVR_NULL;
158         if (!strcmp(name, "plain"))
159                 return SPTLRPC_FLVR_PLAIN;
160         if (!strcmp(name, "gssnull"))
161                 return SPTLRPC_FLVR_GSSNULL;
162         if (!strcmp(name, "krb5n"))
163                 return SPTLRPC_FLVR_KRB5N;
164         if (!strcmp(name, "krb5a"))
165                 return SPTLRPC_FLVR_KRB5A;
166         if (!strcmp(name, "krb5i"))
167                 return SPTLRPC_FLVR_KRB5I;
168         if (!strcmp(name, "krb5p"))
169                 return SPTLRPC_FLVR_KRB5P;
170         if (!strcmp(name, "skn"))
171                 return SPTLRPC_FLVR_SKN;
172         if (!strcmp(name, "ska"))
173                 return SPTLRPC_FLVR_SKA;
174         if (!strcmp(name, "ski"))
175                 return SPTLRPC_FLVR_SKI;
176         if (!strcmp(name, "skpi"))
177                 return SPTLRPC_FLVR_SKPI;
178
179         return SPTLRPC_FLVR_INVALID;
180 }
181 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
182
183 const char *sptlrpc_flavor2name_base(__u32 flvr)
184 {
185         __u32   base = SPTLRPC_FLVR_BASE(flvr);
186
187         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
188                 return "null";
189         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
190                 return "plain";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
192                 return "gssnull";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
194                 return "krb5n";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
196                 return "krb5a";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
198                 return "krb5i";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
200                 return "krb5p";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
202                 return "skn";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
204                 return "ska";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
206                 return "ski";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
208                 return "skpi";
209
210         CERROR("invalid wire flavor 0x%x\n", flvr);
211         return "invalid";
212 }
213 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
214
215 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
216                                char *buf, int bufsize)
217 {
218         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
219                 snprintf(buf, bufsize, "hash:%s",
220                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
221         else
222                 snprintf(buf, bufsize, "%s",
223                          sptlrpc_flavor2name_base(sf->sf_rpc));
224
225         buf[bufsize - 1] = '\0';
226         return buf;
227 }
228 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
229
230 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
231 {
232         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
233
234         /*
235          * currently we don't support customized bulk specification for
236          * flavors other than plain
237          */
238         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
239                 char bspec[16];
240
241                 bspec[0] = '-';
242                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
243                 strncat(buf, bspec, bufsize);
244         }
245
246         buf[bufsize - 1] = '\0';
247         return buf;
248 }
249 EXPORT_SYMBOL(sptlrpc_flavor2name);
250
251 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
252 {
253         buf[0] = '\0';
254
255         if (flags & PTLRPC_SEC_FL_REVERSE)
256                 strlcat(buf, "reverse,", bufsize);
257         if (flags & PTLRPC_SEC_FL_ROOTONLY)
258                 strlcat(buf, "rootonly,", bufsize);
259         if (flags & PTLRPC_SEC_FL_UDESC)
260                 strlcat(buf, "udesc,", bufsize);
261         if (flags & PTLRPC_SEC_FL_BULK)
262                 strlcat(buf, "bulk,", bufsize);
263         if (buf[0] == '\0')
264                 strlcat(buf, "-,", bufsize);
265
266         return buf;
267 }
268 EXPORT_SYMBOL(sptlrpc_secflags2str);
269
270 /**************************************************
271  * client context APIs                            *
272  **************************************************/
273
274 static
275 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
276 {
277         struct vfs_cred vcred;
278         int create = 1, remove_dead = 1;
279
280         LASSERT(sec);
281         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
282
283         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
284                                      PTLRPC_SEC_FL_ROOTONLY)) {
285                 vcred.vc_uid = 0;
286                 vcred.vc_gid = 0;
287                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
288                         create = 0;
289                         remove_dead = 0;
290                 }
291         } else {
292                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
293                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
294         }
295
296         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
297                                                    remove_dead);
298 }
299
300 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
301 {
302         atomic_inc(&ctx->cc_refcount);
303         return ctx;
304 }
305 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
306
307 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
308 {
309         struct ptlrpc_sec *sec = ctx->cc_sec;
310
311         LASSERT(sec);
312         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
313
314         if (!atomic_dec_and_test(&ctx->cc_refcount))
315                 return;
316
317         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
318 }
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
320
321 /**
322  * Expire the client context immediately.
323  *
324  * \pre Caller must hold at least 1 reference on the \a ctx.
325  */
326 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
327 {
328         LASSERT(ctx->cc_ops->die);
329         ctx->cc_ops->die(ctx, 0);
330 }
331 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
332
333 /**
334  * To wake up the threads who are waiting for this client context. Called
335  * after some status change happened on \a ctx.
336  */
337 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
338 {
339         struct ptlrpc_request *req, *next;
340
341         spin_lock(&ctx->cc_lock);
342         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
343                                      rq_ctx_chain) {
344                 list_del_init(&req->rq_ctx_chain);
345                 ptlrpc_client_wake_req(req);
346         }
347         spin_unlock(&ctx->cc_lock);
348 }
349 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
350
351 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
352 {
353         LASSERT(ctx->cc_ops);
354
355         if (ctx->cc_ops->display == NULL)
356                 return 0;
357
358         return ctx->cc_ops->display(ctx, buf, bufsize);
359 }
360
361 static int import_sec_check_expire(struct obd_import *imp)
362 {
363         int     adapt = 0;
364
365         spin_lock(&imp->imp_lock);
366         if (imp->imp_sec_expire &&
367             imp->imp_sec_expire < ktime_get_real_seconds()) {
368                 adapt = 1;
369                 imp->imp_sec_expire = 0;
370         }
371         spin_unlock(&imp->imp_lock);
372
373         if (!adapt)
374                 return 0;
375
376         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
377         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
378 }
379
380 /**
381  * Get and validate the client side ptlrpc security facilities from
382  * \a imp. There is a race condition on client reconnect when the import is
383  * being destroyed while there are outstanding client bound requests. In
384  * this case do not output any error messages if import secuity is not
385  * found.
386  *
387  * \param[in] imp obd import associated with client
388  * \param[out] sec client side ptlrpc security
389  *
390  * \retval 0 if security retrieved successfully
391  * \retval -ve errno if there was a problem
392  */
393 static int import_sec_validate_get(struct obd_import *imp,
394                                    struct ptlrpc_sec **sec)
395 {
396         int     rc;
397
398         if (unlikely(imp->imp_sec_expire)) {
399                 rc = import_sec_check_expire(imp);
400                 if (rc)
401                         return rc;
402         }
403
404         *sec = sptlrpc_import_sec_ref(imp);
405         if (*sec == NULL) {
406                 CERROR("import %p (%s) with no sec\n",
407                         imp, ptlrpc_import_state_name(imp->imp_state));
408                 return -EACCES;
409         }
410
411         if (unlikely((*sec)->ps_dying)) {
412                 CERROR("attempt to use dying sec %p\n", sec);
413                 sptlrpc_sec_put(*sec);
414                 return -EACCES;
415         }
416
417         return 0;
418 }
419
420 /**
421  * Given a \a req, find or allocate an appropriate context for it.
422  * \pre req->rq_cli_ctx == NULL.
423  *
424  * \retval 0 succeed, and req->rq_cli_ctx is set.
425  * \retval -ev error number, and req->rq_cli_ctx == NULL.
426  */
427 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
428 {
429         struct obd_import *imp = req->rq_import;
430         struct ptlrpc_sec *sec;
431         int                rc;
432         ENTRY;
433
434         LASSERT(!req->rq_cli_ctx);
435         LASSERT(imp);
436
437         rc = import_sec_validate_get(imp, &sec);
438         if (rc)
439                 RETURN(rc);
440
441         req->rq_cli_ctx = get_my_ctx(sec);
442
443         sptlrpc_sec_put(sec);
444
445         if (!req->rq_cli_ctx) {
446                 CERROR("req %p: fail to get context\n", req);
447                 RETURN(-ECONNREFUSED);
448         }
449
450         RETURN(0);
451 }
452
453 /**
454  * Drop the context for \a req.
455  * \pre req->rq_cli_ctx != NULL.
456  * \post req->rq_cli_ctx == NULL.
457  *
458  * If \a sync == 0, this function should return quickly without sleep;
459  * otherwise it might trigger and wait for the whole process of sending
460  * an context-destroying rpc to server.
461  */
462 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
463 {
464         ENTRY;
465
466         LASSERT(req);
467         LASSERT(req->rq_cli_ctx);
468
469         /* request might be asked to release earlier while still
470          * in the context waiting list.
471          */
472         if (!list_empty(&req->rq_ctx_chain)) {
473                 spin_lock(&req->rq_cli_ctx->cc_lock);
474                 list_del_init(&req->rq_ctx_chain);
475                 spin_unlock(&req->rq_cli_ctx->cc_lock);
476         }
477
478         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
479         req->rq_cli_ctx = NULL;
480         EXIT;
481 }
482
483 static
484 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
485                            struct ptlrpc_cli_ctx *oldctx,
486                            struct ptlrpc_cli_ctx *newctx)
487 {
488         struct sptlrpc_flavor   old_flvr;
489         char                   *reqmsg = NULL; /* to workaround old gcc */
490         int                     reqmsg_size;
491         int                     rc = 0;
492
493         LASSERT(req->rq_reqmsg);
494         LASSERT(req->rq_reqlen);
495         LASSERT(req->rq_replen);
496
497         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
498                "switch sec %p(%s) -> %p(%s)\n", req,
499                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
500                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
501                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
502                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
503
504         /* save flavor */
505         old_flvr = req->rq_flvr;
506
507         /* save request message */
508         reqmsg_size = req->rq_reqlen;
509         if (reqmsg_size != 0) {
510                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
511                 if (reqmsg == NULL)
512                         return -ENOMEM;
513                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
514         }
515
516         /* release old req/rep buf */
517         req->rq_cli_ctx = oldctx;
518         sptlrpc_cli_free_reqbuf(req);
519         sptlrpc_cli_free_repbuf(req);
520         req->rq_cli_ctx = newctx;
521
522         /* recalculate the flavor */
523         sptlrpc_req_set_flavor(req, 0);
524
525         /* alloc new request buffer
526          * we don't need to alloc reply buffer here, leave it to the
527          * rest procedure of ptlrpc */
528         if (reqmsg_size != 0) {
529                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
530                 if (!rc) {
531                         LASSERT(req->rq_reqmsg);
532                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
533                 } else {
534                         CWARN("failed to alloc reqbuf: %d\n", rc);
535                         req->rq_flvr = old_flvr;
536                 }
537
538                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
539         }
540         return rc;
541 }
542
543 /**
544  * If current context of \a req is dead somehow, e.g. we just switched flavor
545  * thus marked original contexts dead, we'll find a new context for it. if
546  * no switch is needed, \a req will end up with the same context.
547  *
548  * \note a request must have a context, to keep other parts of code happy.
549  * In any case of failure during the switching, we must restore the old one.
550  */
551 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
552 {
553         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
554         struct ptlrpc_cli_ctx *newctx;
555         int                    rc;
556         ENTRY;
557
558         LASSERT(oldctx);
559
560         sptlrpc_cli_ctx_get(oldctx);
561         sptlrpc_req_put_ctx(req, 0);
562
563         rc = sptlrpc_req_get_ctx(req);
564         if (unlikely(rc)) {
565                 LASSERT(!req->rq_cli_ctx);
566
567                 /* restore old ctx */
568                 req->rq_cli_ctx = oldctx;
569                 RETURN(rc);
570         }
571
572         newctx = req->rq_cli_ctx;
573         LASSERT(newctx);
574
575         if (unlikely(newctx == oldctx &&
576                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
577                 /*
578                  * still get the old dead ctx, usually means system too busy
579                  */
580                 CDEBUG(D_SEC,
581                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
582                        newctx, newctx->cc_flags);
583
584                 set_current_state(TASK_INTERRUPTIBLE);
585                 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
586         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
587                             == 0)) {
588                 /*
589                  * new ctx not up to date yet
590                  */
591                 CDEBUG(D_SEC,
592                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
593                        newctx, newctx->cc_flags);
594         } else {
595                 /*
596                  * it's possible newctx == oldctx if we're switching
597                  * subflavor with the same sec.
598                  */
599                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
600                 if (rc) {
601                         /* restore old ctx */
602                         sptlrpc_req_put_ctx(req, 0);
603                         req->rq_cli_ctx = oldctx;
604                         RETURN(rc);
605                 }
606
607                 LASSERT(req->rq_cli_ctx == newctx);
608         }
609
610         sptlrpc_cli_ctx_put(oldctx, 1);
611         RETURN(0);
612 }
613 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
614
615 static
616 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
617 {
618         if (cli_ctx_is_refreshed(ctx))
619                 return 1;
620         return 0;
621 }
622
623 static
624 int ctx_refresh_timeout(void *data)
625 {
626         struct ptlrpc_request *req = data;
627         int rc;
628
629         /* conn_cnt is needed in expire_one_request */
630         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
631
632         rc = ptlrpc_expire_one_request(req, 1);
633         /* if we started recovery, we should mark this ctx dead; otherwise
634          * in case of lgssd died nobody would retire this ctx, following
635          * connecting will still find the same ctx thus cause deadlock.
636          * there's an assumption that expire time of the request should be
637          * later than the context refresh expire time.
638          */
639         if (rc == 0)
640                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
641         return rc;
642 }
643
644 static
645 void ctx_refresh_interrupt(void *data)
646 {
647         struct ptlrpc_request *req = data;
648
649         spin_lock(&req->rq_lock);
650         req->rq_intr = 1;
651         spin_unlock(&req->rq_lock);
652 }
653
654 static
655 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
656 {
657         spin_lock(&ctx->cc_lock);
658         if (!list_empty(&req->rq_ctx_chain))
659                 list_del_init(&req->rq_ctx_chain);
660         spin_unlock(&ctx->cc_lock);
661 }
662
663 /**
664  * To refresh the context of \req, if it's not up-to-date.
665  * \param timeout
666  * - < 0: don't wait
667  * - = 0: wait until success or fatal error occur
668  * - > 0: timeout value (in seconds)
669  *
670  * The status of the context could be subject to be changed by other threads
671  * at any time. We allow this race, but once we return with 0, the caller will
672  * suppose it's uptodated and keep using it until the owning rpc is done.
673  *
674  * \retval 0 only if the context is uptodated.
675  * \retval -ev error number.
676  */
677 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
678 {
679         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
680         struct ptlrpc_sec      *sec;
681         struct l_wait_info      lwi;
682         int                     rc;
683         ENTRY;
684
685         LASSERT(ctx);
686
687         if (req->rq_ctx_init || req->rq_ctx_fini)
688                 RETURN(0);
689
690         /*
691          * during the process a request's context might change type even
692          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
693          * everything
694          */
695 again:
696         rc = import_sec_validate_get(req->rq_import, &sec);
697         if (rc)
698                 RETURN(rc);
699
700         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
701                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
702                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
703                 req_off_ctx_list(req, ctx);
704                 sptlrpc_req_replace_dead_ctx(req);
705                 ctx = req->rq_cli_ctx;
706         }
707         sptlrpc_sec_put(sec);
708
709         if (cli_ctx_is_eternal(ctx))
710                 RETURN(0);
711
712         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
713                 LASSERT(ctx->cc_ops->refresh);
714                 ctx->cc_ops->refresh(ctx);
715         }
716         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
717
718         LASSERT(ctx->cc_ops->validate);
719         if (ctx->cc_ops->validate(ctx) == 0) {
720                 req_off_ctx_list(req, ctx);
721                 RETURN(0);
722         }
723
724         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
725                 spin_lock(&req->rq_lock);
726                 req->rq_err = 1;
727                 spin_unlock(&req->rq_lock);
728                 req_off_ctx_list(req, ctx);
729                 RETURN(-EPERM);
730         }
731
732         /*
733          * There's a subtle issue for resending RPCs, suppose following
734          * situation:
735          *  1. the request was sent to server.
736          *  2. recovery was kicked start, after finished the request was
737          *     marked as resent.
738          *  3. resend the request.
739          *  4. old reply from server received, we accept and verify the reply.
740          *     this has to be success, otherwise the error will be aware
741          *     by application.
742          *  5. new reply from server received, dropped by LNet.
743          *
744          * Note the xid of old & new request is the same. We can't simply
745          * change xid for the resent request because the server replies on
746          * it for reply reconstruction.
747          *
748          * Commonly the original context should be uptodate because we
749          * have an expiry nice time; server will keep its context because
750          * we at least hold a ref of old context which prevent context
751          * from destroying RPC being sent. So server still can accept the
752          * request and finish the RPC. But if that's not the case:
753          *  1. If server side context has been trimmed, a NO_CONTEXT will
754          *     be returned, gss_cli_ctx_verify/unseal will switch to new
755          *     context by force.
756          *  2. Current context never be refreshed, then we are fine: we
757          *     never really send request with old context before.
758          */
759         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
760             unlikely(req->rq_reqmsg) &&
761             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
762                 req_off_ctx_list(req, ctx);
763                 RETURN(0);
764         }
765
766         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
767                 req_off_ctx_list(req, ctx);
768                 /*
769                  * don't switch ctx if import was deactivated
770                  */
771                 if (req->rq_import->imp_deactive) {
772                         spin_lock(&req->rq_lock);
773                         req->rq_err = 1;
774                         spin_unlock(&req->rq_lock);
775                         RETURN(-EINTR);
776                 }
777
778                 rc = sptlrpc_req_replace_dead_ctx(req);
779                 if (rc) {
780                         LASSERT(ctx == req->rq_cli_ctx);
781                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
782                                req, ctx, rc);
783                         spin_lock(&req->rq_lock);
784                         req->rq_err = 1;
785                         spin_unlock(&req->rq_lock);
786                         RETURN(rc);
787                 }
788
789                 ctx = req->rq_cli_ctx;
790                 goto again;
791         }
792
793         /*
794          * Now we're sure this context is during upcall, add myself into
795          * waiting list
796          */
797         spin_lock(&ctx->cc_lock);
798         if (list_empty(&req->rq_ctx_chain))
799                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
800         spin_unlock(&ctx->cc_lock);
801
802         if (timeout < 0)
803                 RETURN(-EWOULDBLOCK);
804
805         /* Clear any flags that may be present from previous sends */
806         LASSERT(req->rq_receiving_reply == 0);
807         spin_lock(&req->rq_lock);
808         req->rq_err = 0;
809         req->rq_timedout = 0;
810         req->rq_resend = 0;
811         req->rq_restart = 0;
812         spin_unlock(&req->rq_lock);
813
814         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
815                                ctx_refresh_timeout,
816                                ctx_refresh_interrupt, req);
817         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
818
819         /*
820          * following cases could lead us here:
821          * - successfully refreshed;
822          * - interrupted;
823          * - timedout, and we don't want recover from the failure;
824          * - timedout, and waked up upon recovery finished;
825          * - someone else mark this ctx dead by force;
826          * - someone invalidate the req and call ptlrpc_client_wake_req(),
827          *   e.g. ptlrpc_abort_inflight();
828          */
829         if (!cli_ctx_is_refreshed(ctx)) {
830                 /* timed out or interruptted */
831                 req_off_ctx_list(req, ctx);
832
833                 LASSERT(rc != 0);
834                 RETURN(rc);
835         }
836
837         goto again;
838 }
839
840 /**
841  * Initialize flavor settings for \a req, according to \a opcode.
842  *
843  * \note this could be called in two situations:
844  * - new request from ptlrpc_pre_req(), with proper @opcode
845  * - old request which changed ctx in the middle, with @opcode == 0
846  */
847 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
848 {
849         struct ptlrpc_sec *sec;
850
851         LASSERT(req->rq_import);
852         LASSERT(req->rq_cli_ctx);
853         LASSERT(req->rq_cli_ctx->cc_sec);
854         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
855
856         /* special security flags according to opcode */
857         switch (opcode) {
858         case OST_READ:
859         case MDS_READPAGE:
860         case MGS_CONFIG_READ:
861         case OBD_IDX_READ:
862                 req->rq_bulk_read = 1;
863                 break;
864         case OST_WRITE:
865         case MDS_WRITEPAGE:
866                 req->rq_bulk_write = 1;
867                 break;
868         case SEC_CTX_INIT:
869                 req->rq_ctx_init = 1;
870                 break;
871         case SEC_CTX_FINI:
872                 req->rq_ctx_fini = 1;
873                 break;
874         case 0:
875                 /* init/fini rpc won't be resend, so can't be here */
876                 LASSERT(req->rq_ctx_init == 0);
877                 LASSERT(req->rq_ctx_fini == 0);
878
879                 /* cleanup flags, which should be recalculated */
880                 req->rq_pack_udesc = 0;
881                 req->rq_pack_bulk = 0;
882                 break;
883         }
884
885         sec = req->rq_cli_ctx->cc_sec;
886
887         spin_lock(&sec->ps_lock);
888         req->rq_flvr = sec->ps_flvr;
889         spin_unlock(&sec->ps_lock);
890
891         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
892          * destruction rpc */
893         if (unlikely(req->rq_ctx_init))
894                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
895         else if (unlikely(req->rq_ctx_fini))
896                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
897
898         /* user descriptor flag, null security can't do it anyway */
899         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
900             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
901                 req->rq_pack_udesc = 1;
902
903         /* bulk security flag */
904         if ((req->rq_bulk_read || req->rq_bulk_write) &&
905             sptlrpc_flavor_has_bulk(&req->rq_flvr))
906                 req->rq_pack_bulk = 1;
907 }
908
909 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
910 {
911         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
912                 return;
913
914         LASSERT(req->rq_clrbuf);
915         if (req->rq_pool || !req->rq_reqbuf)
916                 return;
917
918         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
919         req->rq_reqbuf = NULL;
920         req->rq_reqbuf_len = 0;
921 }
922
923 /**
924  * Given an import \a imp, check whether current user has a valid context
925  * or not. We may create a new context and try to refresh it, and try
926  * repeatedly try in case of non-fatal errors. Return 0 means success.
927  */
928 int sptlrpc_import_check_ctx(struct obd_import *imp)
929 {
930         struct ptlrpc_sec     *sec;
931         struct ptlrpc_cli_ctx *ctx;
932         struct ptlrpc_request *req = NULL;
933         int rc;
934         ENTRY;
935
936         might_sleep();
937
938         sec = sptlrpc_import_sec_ref(imp);
939         ctx = get_my_ctx(sec);
940         sptlrpc_sec_put(sec);
941
942         if (!ctx)
943                 RETURN(-ENOMEM);
944
945         if (cli_ctx_is_eternal(ctx) ||
946             ctx->cc_ops->validate(ctx) == 0) {
947                 sptlrpc_cli_ctx_put(ctx, 1);
948                 RETURN(0);
949         }
950
951         if (cli_ctx_is_error(ctx)) {
952                 sptlrpc_cli_ctx_put(ctx, 1);
953                 RETURN(-EACCES);
954         }
955
956         req = ptlrpc_request_cache_alloc(GFP_NOFS);
957         if (!req)
958                 RETURN(-ENOMEM);
959
960         ptlrpc_cli_req_init(req);
961         atomic_set(&req->rq_refcount, 10000);
962
963         req->rq_import = imp;
964         req->rq_flvr = sec->ps_flvr;
965         req->rq_cli_ctx = ctx;
966
967         rc = sptlrpc_req_refresh_ctx(req, 0);
968         LASSERT(list_empty(&req->rq_ctx_chain));
969         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
970         ptlrpc_request_cache_free(req);
971
972         RETURN(rc);
973 }
974
975 /**
976  * Used by ptlrpc client, to perform the pre-defined security transformation
977  * upon the request message of \a req. After this function called,
978  * req->rq_reqmsg is still accessible as clear text.
979  */
980 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
981 {
982         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
983         int rc = 0;
984         ENTRY;
985
986         LASSERT(ctx);
987         LASSERT(ctx->cc_sec);
988         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
989
990         /* we wrap bulk request here because now we can be sure
991          * the context is uptodate.
992          */
993         if (req->rq_bulk) {
994                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
995                 if (rc)
996                         RETURN(rc);
997         }
998
999         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1000         case SPTLRPC_SVC_NULL:
1001         case SPTLRPC_SVC_AUTH:
1002         case SPTLRPC_SVC_INTG:
1003                 LASSERT(ctx->cc_ops->sign);
1004                 rc = ctx->cc_ops->sign(ctx, req);
1005                 break;
1006         case SPTLRPC_SVC_PRIV:
1007                 LASSERT(ctx->cc_ops->seal);
1008                 rc = ctx->cc_ops->seal(ctx, req);
1009                 break;
1010         default:
1011                 LBUG();
1012         }
1013
1014         if (rc == 0) {
1015                 LASSERT(req->rq_reqdata_len);
1016                 LASSERT(req->rq_reqdata_len % 8 == 0);
1017                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1018         }
1019
1020         RETURN(rc);
1021 }
1022
1023 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1024 {
1025         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1026         int                    rc;
1027         ENTRY;
1028
1029         LASSERT(ctx);
1030         LASSERT(ctx->cc_sec);
1031         LASSERT(req->rq_repbuf);
1032         LASSERT(req->rq_repdata);
1033         LASSERT(req->rq_repmsg == NULL);
1034
1035         req->rq_rep_swab_mask = 0;
1036
1037         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1038         switch (rc) {
1039         case 1:
1040                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1041         case 0:
1042                 break;
1043         default:
1044                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1045                 RETURN(-EPROTO);
1046         }
1047
1048         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1049                 CERROR("replied data length %d too small\n",
1050                        req->rq_repdata_len);
1051                 RETURN(-EPROTO);
1052         }
1053
1054         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1055             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1056                 CERROR("reply policy %u doesn't match request policy %u\n",
1057                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1058                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1059                 RETURN(-EPROTO);
1060         }
1061
1062         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1063         case SPTLRPC_SVC_NULL:
1064         case SPTLRPC_SVC_AUTH:
1065         case SPTLRPC_SVC_INTG:
1066                 LASSERT(ctx->cc_ops->verify);
1067                 rc = ctx->cc_ops->verify(ctx, req);
1068                 break;
1069         case SPTLRPC_SVC_PRIV:
1070                 LASSERT(ctx->cc_ops->unseal);
1071                 rc = ctx->cc_ops->unseal(ctx, req);
1072                 break;
1073         default:
1074                 LBUG();
1075         }
1076         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1077
1078         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1079             !req->rq_ctx_init)
1080                 req->rq_rep_swab_mask = 0;
1081         RETURN(rc);
1082 }
1083
1084 /**
1085  * Used by ptlrpc client, to perform security transformation upon the reply
1086  * message of \a req. After return successfully, req->rq_repmsg points to
1087  * the reply message in clear text.
1088  *
1089  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1090  * going to change.
1091  */
1092 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1093 {
1094         LASSERT(req->rq_repbuf);
1095         LASSERT(req->rq_repdata == NULL);
1096         LASSERT(req->rq_repmsg == NULL);
1097         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1098
1099         if (req->rq_reply_off == 0 &&
1100             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1101                 CERROR("real reply with offset 0\n");
1102                 return -EPROTO;
1103         }
1104
1105         if (req->rq_reply_off % 8 != 0) {
1106                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1107                 return -EPROTO;
1108         }
1109
1110         req->rq_repdata = (struct lustre_msg *)
1111                                 (req->rq_repbuf + req->rq_reply_off);
1112         req->rq_repdata_len = req->rq_nob_received;
1113
1114         return do_cli_unwrap_reply(req);
1115 }
1116
1117 /**
1118  * Used by ptlrpc client, to perform security transformation upon the early
1119  * reply message of \a req. We expect the rq_reply_off is 0, and
1120  * rq_nob_received is the early reply size.
1121  * 
1122  * Because the receive buffer might be still posted, the reply data might be
1123  * changed at any time, no matter we're holding rq_lock or not. For this reason
1124  * we allocate a separate ptlrpc_request and reply buffer for early reply
1125  * processing.
1126  *
1127  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1128  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1129  * \a *req_ret to release it.
1130  * \retval -ev error number, and \a req_ret will not be set.
1131  */
1132 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1133                                    struct ptlrpc_request **req_ret)
1134 {
1135         struct ptlrpc_request  *early_req;
1136         char                   *early_buf;
1137         int                     early_bufsz, early_size;
1138         int                     rc;
1139         ENTRY;
1140
1141         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1142         if (early_req == NULL)
1143                 RETURN(-ENOMEM);
1144
1145         ptlrpc_cli_req_init(early_req);
1146
1147         early_size = req->rq_nob_received;
1148         early_bufsz = size_roundup_power2(early_size);
1149         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1150         if (early_buf == NULL)
1151                 GOTO(err_req, rc = -ENOMEM);
1152
1153         /* sanity checkings and copy data out, do it inside spinlock */
1154         spin_lock(&req->rq_lock);
1155
1156         if (req->rq_replied) {
1157                 spin_unlock(&req->rq_lock);
1158                 GOTO(err_buf, rc = -EALREADY);
1159         }
1160
1161         LASSERT(req->rq_repbuf);
1162         LASSERT(req->rq_repdata == NULL);
1163         LASSERT(req->rq_repmsg == NULL);
1164
1165         if (req->rq_reply_off != 0) {
1166                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1167                 spin_unlock(&req->rq_lock);
1168                 GOTO(err_buf, rc = -EPROTO);
1169         }
1170
1171         if (req->rq_nob_received != early_size) {
1172                 /* even another early arrived the size should be the same */
1173                 CERROR("data size has changed from %u to %u\n",
1174                        early_size, req->rq_nob_received);
1175                 spin_unlock(&req->rq_lock);
1176                 GOTO(err_buf, rc = -EINVAL);
1177         }
1178
1179         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1180                 CERROR("early reply length %d too small\n",
1181                        req->rq_nob_received);
1182                 spin_unlock(&req->rq_lock);
1183                 GOTO(err_buf, rc = -EALREADY);
1184         }
1185
1186         memcpy(early_buf, req->rq_repbuf, early_size);
1187         spin_unlock(&req->rq_lock);
1188
1189         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1190         early_req->rq_flvr = req->rq_flvr;
1191         early_req->rq_repbuf = early_buf;
1192         early_req->rq_repbuf_len = early_bufsz;
1193         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1194         early_req->rq_repdata_len = early_size;
1195         early_req->rq_early = 1;
1196         early_req->rq_reqmsg = req->rq_reqmsg;
1197
1198         rc = do_cli_unwrap_reply(early_req);
1199         if (rc) {
1200                 DEBUG_REQ(D_ADAPTTO, early_req,
1201                           "error %d unwrap early reply", rc);
1202                 GOTO(err_ctx, rc);
1203         }
1204
1205         LASSERT(early_req->rq_repmsg);
1206         *req_ret = early_req;
1207         RETURN(0);
1208
1209 err_ctx:
1210         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1211 err_buf:
1212         OBD_FREE_LARGE(early_buf, early_bufsz);
1213 err_req:
1214         ptlrpc_request_cache_free(early_req);
1215         RETURN(rc);
1216 }
1217
1218 /**
1219  * Used by ptlrpc client, to release a processed early reply \a early_req.
1220  *
1221  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1222  */
1223 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1224 {
1225         LASSERT(early_req->rq_repbuf);
1226         LASSERT(early_req->rq_repdata);
1227         LASSERT(early_req->rq_repmsg);
1228
1229         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1230         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1231         ptlrpc_request_cache_free(early_req);
1232 }
1233
1234 /**************************************************
1235  * sec ID                                         *
1236  **************************************************/
1237
1238 /*
1239  * "fixed" sec (e.g. null) use sec_id < 0
1240  */
1241 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1242
1243 int sptlrpc_get_next_secid(void)
1244 {
1245         return atomic_inc_return(&sptlrpc_sec_id);
1246 }
1247 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1248
1249 /**************************************************
1250  * client side high-level security APIs           *
1251  **************************************************/
1252
1253 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1254                                    int grace, int force)
1255 {
1256         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1257
1258         LASSERT(policy->sp_cops);
1259         LASSERT(policy->sp_cops->flush_ctx_cache);
1260
1261         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1262 }
1263
1264 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1265 {
1266         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1267
1268         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1269         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1270         LASSERT(policy->sp_cops->destroy_sec);
1271
1272         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1273
1274         policy->sp_cops->destroy_sec(sec);
1275         sptlrpc_policy_put(policy);
1276 }
1277
1278 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1279 {
1280         sec_cop_destroy_sec(sec);
1281 }
1282 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1283
1284 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1285 {
1286         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1287
1288         if (sec->ps_policy->sp_cops->kill_sec) {
1289                 sec->ps_policy->sp_cops->kill_sec(sec);
1290
1291                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1292         }
1293 }
1294
1295 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1296 {
1297         if (sec)
1298                 atomic_inc(&sec->ps_refcount);
1299
1300         return sec;
1301 }
1302 EXPORT_SYMBOL(sptlrpc_sec_get);
1303
1304 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1305 {
1306         if (sec) {
1307                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1308
1309                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1310                         sptlrpc_gc_del_sec(sec);
1311                         sec_cop_destroy_sec(sec);
1312                 }
1313         }
1314 }
1315 EXPORT_SYMBOL(sptlrpc_sec_put);
1316
1317 /*
1318  * policy module is responsible for taking refrence of import
1319  */
1320 static
1321 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1322                                        struct ptlrpc_svc_ctx *svc_ctx,
1323                                        struct sptlrpc_flavor *sf,
1324                                        enum lustre_sec_part sp)
1325 {
1326         struct ptlrpc_sec_policy *policy;
1327         struct ptlrpc_sec        *sec;
1328         char                      str[32];
1329         ENTRY;
1330
1331         if (svc_ctx) {
1332                 LASSERT(imp->imp_dlm_fake == 1);
1333
1334                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1335                        imp->imp_obd->obd_type->typ_name,
1336                        imp->imp_obd->obd_name,
1337                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1338
1339                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1340                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1341         } else {
1342                 LASSERT(imp->imp_dlm_fake == 0);
1343
1344                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1345                        imp->imp_obd->obd_type->typ_name,
1346                        imp->imp_obd->obd_name,
1347                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1348
1349                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1350                 if (!policy) {
1351                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1352                         RETURN(NULL);
1353                 }
1354         }
1355
1356         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1357         if (sec) {
1358                 atomic_inc(&sec->ps_refcount);
1359
1360                 sec->ps_part = sp;
1361
1362                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1363                         sptlrpc_gc_add_sec(sec);
1364         } else {
1365                 sptlrpc_policy_put(policy);
1366         }
1367
1368         RETURN(sec);
1369 }
1370
1371 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1372 {
1373         struct ptlrpc_sec *sec;
1374
1375         spin_lock(&imp->imp_lock);
1376         sec = sptlrpc_sec_get(imp->imp_sec);
1377         spin_unlock(&imp->imp_lock);
1378
1379         return sec;
1380 }
1381 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1382
1383 static void sptlrpc_import_sec_install(struct obd_import *imp,
1384                                        struct ptlrpc_sec *sec)
1385 {
1386         struct ptlrpc_sec *old_sec;
1387
1388         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1389
1390         spin_lock(&imp->imp_lock);
1391         old_sec = imp->imp_sec;
1392         imp->imp_sec = sec;
1393         spin_unlock(&imp->imp_lock);
1394
1395         if (old_sec) {
1396                 sptlrpc_sec_kill(old_sec);
1397
1398                 /* balance the ref taken by this import */
1399                 sptlrpc_sec_put(old_sec);
1400         }
1401 }
1402
1403 static inline
1404 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1405 {
1406         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1407 }
1408
1409 static inline
1410 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1411 {
1412         *dst = *src;
1413 }
1414
1415 /**
1416  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1417  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1418  *
1419  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1420  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1421  */
1422 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1423                              struct ptlrpc_svc_ctx *svc_ctx,
1424                              struct sptlrpc_flavor *flvr)
1425 {
1426         struct ptlrpc_connection   *conn;
1427         struct sptlrpc_flavor       sf;
1428         struct ptlrpc_sec          *sec, *newsec;
1429         enum lustre_sec_part        sp;
1430         char                        str[24];
1431         int                         rc = 0;
1432         ENTRY;
1433
1434         might_sleep();
1435
1436         if (imp == NULL)
1437                 RETURN(0);
1438
1439         conn = imp->imp_connection;
1440
1441         if (svc_ctx == NULL) {
1442                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1443                 /*
1444                  * normal import, determine flavor from rule set, except
1445                  * for mgc the flavor is predetermined.
1446                  */
1447                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1448                         sf = cliobd->cl_flvr_mgc;
1449                 else 
1450                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1451                                                    cliobd->cl_sp_to,
1452                                                    &cliobd->cl_target_uuid,
1453                                                    conn->c_self, &sf);
1454
1455                 sp = imp->imp_obd->u.cli.cl_sp_me;
1456         } else {
1457                 /* reverse import, determine flavor from incoming reqeust */
1458                 sf = *flvr;
1459
1460                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1461                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1462                                       PTLRPC_SEC_FL_ROOTONLY;
1463
1464                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1465         }
1466
1467         sec = sptlrpc_import_sec_ref(imp);
1468         if (sec) {
1469                 char    str2[24];
1470
1471                 if (flavor_equal(&sf, &sec->ps_flvr))
1472                         GOTO(out, rc);
1473
1474                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1475                        imp->imp_obd->obd_name,
1476                        obd_uuid2str(&conn->c_remote_uuid),
1477                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1478                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1479         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1480                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1481                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1482                        imp->imp_obd->obd_name,
1483                        obd_uuid2str(&conn->c_remote_uuid),
1484                        LNET_NIDNET(conn->c_self),
1485                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1486         }
1487
1488         mutex_lock(&imp->imp_sec_mutex);
1489
1490         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1491         if (newsec) {
1492                 sptlrpc_import_sec_install(imp, newsec);
1493         } else {
1494                 CERROR("import %s->%s: failed to create new sec\n",
1495                        imp->imp_obd->obd_name,
1496                        obd_uuid2str(&conn->c_remote_uuid));
1497                 rc = -EPERM;
1498         }
1499
1500         mutex_unlock(&imp->imp_sec_mutex);
1501 out:
1502         sptlrpc_sec_put(sec);
1503         RETURN(rc);
1504 }
1505
1506 void sptlrpc_import_sec_put(struct obd_import *imp)
1507 {
1508         if (imp->imp_sec) {
1509                 sptlrpc_sec_kill(imp->imp_sec);
1510
1511                 sptlrpc_sec_put(imp->imp_sec);
1512                 imp->imp_sec = NULL;
1513         }
1514 }
1515
1516 static void import_flush_ctx_common(struct obd_import *imp,
1517                                     uid_t uid, int grace, int force)
1518 {
1519         struct ptlrpc_sec *sec;
1520
1521         if (imp == NULL)
1522                 return;
1523
1524         sec = sptlrpc_import_sec_ref(imp);
1525         if (sec == NULL)
1526                 return;
1527
1528         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1529         sptlrpc_sec_put(sec);
1530 }
1531
1532 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1533 {
1534         /* it's important to use grace mode, see explain in
1535          * sptlrpc_req_refresh_ctx() */
1536         import_flush_ctx_common(imp, 0, 1, 1);
1537 }
1538
1539 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1540 {
1541         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1542                                 1, 1);
1543 }
1544 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1545
1546 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1547 {
1548         import_flush_ctx_common(imp, -1, 1, 1);
1549 }
1550 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1551
1552 /**
1553  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1554  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1555  */
1556 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1557 {
1558         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1559         struct ptlrpc_sec_policy *policy;
1560         int rc;
1561
1562         LASSERT(ctx);
1563         LASSERT(ctx->cc_sec);
1564         LASSERT(ctx->cc_sec->ps_policy);
1565         LASSERT(req->rq_reqmsg == NULL);
1566         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1567
1568         policy = ctx->cc_sec->ps_policy;
1569         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1570         if (!rc) {
1571                 LASSERT(req->rq_reqmsg);
1572                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1573
1574                 /* zeroing preallocated buffer */
1575                 if (req->rq_pool)
1576                         memset(req->rq_reqmsg, 0, msgsize);
1577         }
1578
1579         return rc;
1580 }
1581
1582 /**
1583  * Used by ptlrpc client to free request buffer of \a req. After this
1584  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1585  */
1586 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1587 {
1588         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1589         struct ptlrpc_sec_policy *policy;
1590
1591         LASSERT(ctx);
1592         LASSERT(ctx->cc_sec);
1593         LASSERT(ctx->cc_sec->ps_policy);
1594         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1595
1596         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1597                 return;
1598
1599         policy = ctx->cc_sec->ps_policy;
1600         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1601         req->rq_reqmsg = NULL;
1602 }
1603
1604 /*
1605  * NOTE caller must guarantee the buffer size is enough for the enlargement
1606  */
1607 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1608                                   int segment, int newsize)
1609 {
1610         void   *src, *dst;
1611         int     oldsize, oldmsg_size, movesize;
1612
1613         LASSERT(segment < msg->lm_bufcount);
1614         LASSERT(msg->lm_buflens[segment] <= newsize);
1615
1616         if (msg->lm_buflens[segment] == newsize)
1617                 return;
1618
1619         /* nothing to do if we are enlarging the last segment */
1620         if (segment == msg->lm_bufcount - 1) {
1621                 msg->lm_buflens[segment] = newsize;
1622                 return;
1623         }
1624
1625         oldsize = msg->lm_buflens[segment];
1626
1627         src = lustre_msg_buf(msg, segment + 1, 0);
1628         msg->lm_buflens[segment] = newsize;
1629         dst = lustre_msg_buf(msg, segment + 1, 0);
1630         msg->lm_buflens[segment] = oldsize;
1631
1632         /* move from segment + 1 to end segment */
1633         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1634         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1635         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1636         LASSERT(movesize >= 0);
1637
1638         if (movesize)
1639                 memmove(dst, src, movesize);
1640
1641         /* note we don't clear the ares where old data live, not secret */
1642
1643         /* finally set new segment size */
1644         msg->lm_buflens[segment] = newsize;
1645 }
1646 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1647
1648 /**
1649  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1650  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1651  * preserved after the enlargement. this must be called after original request
1652  * buffer being allocated.
1653  *
1654  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1655  * so caller should refresh its local pointers if needed.
1656  */
1657 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1658                                const struct req_msg_field *field,
1659                                int newsize)
1660 {
1661         struct req_capsule *pill = &req->rq_pill;
1662         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1663         struct ptlrpc_sec_cops *cops;
1664         struct lustre_msg *msg = req->rq_reqmsg;
1665         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1666
1667         LASSERT(ctx);
1668         LASSERT(msg);
1669         LASSERT(msg->lm_bufcount > segment);
1670         LASSERT(msg->lm_buflens[segment] <= newsize);
1671
1672         if (msg->lm_buflens[segment] == newsize)
1673                 return 0;
1674
1675         cops = ctx->cc_sec->ps_policy->sp_cops;
1676         LASSERT(cops->enlarge_reqbuf);
1677         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1678 }
1679 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1680
1681 /**
1682  * Used by ptlrpc client to allocate reply buffer of \a req.
1683  *
1684  * \note After this, req->rq_repmsg is still not accessible.
1685  */
1686 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1687 {
1688         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1689         struct ptlrpc_sec_policy *policy;
1690         ENTRY;
1691
1692         LASSERT(ctx);
1693         LASSERT(ctx->cc_sec);
1694         LASSERT(ctx->cc_sec->ps_policy);
1695
1696         if (req->rq_repbuf)
1697                 RETURN(0);
1698
1699         policy = ctx->cc_sec->ps_policy;
1700         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1701 }
1702
1703 /**
1704  * Used by ptlrpc client to free reply buffer of \a req. After this
1705  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1706  */
1707 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1708 {
1709         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1710         struct ptlrpc_sec_policy *policy;
1711         ENTRY;
1712
1713         LASSERT(ctx);
1714         LASSERT(ctx->cc_sec);
1715         LASSERT(ctx->cc_sec->ps_policy);
1716         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1717
1718         if (req->rq_repbuf == NULL)
1719                 return;
1720         LASSERT(req->rq_repbuf_len);
1721
1722         policy = ctx->cc_sec->ps_policy;
1723         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1724         req->rq_repmsg = NULL;
1725         EXIT;
1726 }
1727 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1728
1729 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1730                                 struct ptlrpc_cli_ctx *ctx)
1731 {
1732         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1733
1734         if (!policy->sp_cops->install_rctx)
1735                 return 0;
1736         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1737 }
1738
1739 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1740                                 struct ptlrpc_svc_ctx *ctx)
1741 {
1742         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1743
1744         if (!policy->sp_sops->install_rctx)
1745                 return 0;
1746         return policy->sp_sops->install_rctx(imp, ctx);
1747 }
1748
1749 /****************************************
1750  * server side security                 *
1751  ****************************************/
1752
1753 static int flavor_allowed(struct sptlrpc_flavor *exp,
1754                           struct ptlrpc_request *req)
1755 {
1756         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1757
1758         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1759                 return 1;
1760
1761         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1762             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1763             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1764             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1765                 return 1;
1766
1767         return 0;
1768 }
1769
1770 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1771
1772 /**
1773  * Given an export \a exp, check whether the flavor of incoming \a req
1774  * is allowed by the export \a exp. Main logic is about taking care of
1775  * changing configurations. Return 0 means success.
1776  */
1777 int sptlrpc_target_export_check(struct obd_export *exp,
1778                                 struct ptlrpc_request *req)
1779 {
1780         struct sptlrpc_flavor   flavor;
1781
1782         if (exp == NULL)
1783                 return 0;
1784
1785         /* client side export has no imp_reverse, skip
1786          * FIXME maybe we should check flavor this as well??? */
1787         if (exp->exp_imp_reverse == NULL)
1788                 return 0;
1789
1790         /* don't care about ctx fini rpc */
1791         if (req->rq_ctx_fini)
1792                 return 0;
1793
1794         spin_lock(&exp->exp_lock);
1795
1796         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1797          * the first req with the new flavor, then treat it as current flavor,
1798          * adapt reverse sec according to it.
1799          * note the first rpc with new flavor might not be with root ctx, in
1800          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1801         if (unlikely(exp->exp_flvr_changed) &&
1802             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1803                 /* make the new flavor as "current", and old ones as
1804                  * about-to-expire */
1805                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1806                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1807                 flavor = exp->exp_flvr_old[1];
1808                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1809                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1810                 exp->exp_flvr_old[0] = exp->exp_flvr;
1811                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1812                                           EXP_FLVR_UPDATE_EXPIRE;
1813                 exp->exp_flvr = flavor;
1814
1815                 /* flavor change finished */
1816                 exp->exp_flvr_changed = 0;
1817                 LASSERT(exp->exp_flvr_adapt == 1);
1818
1819                 /* if it's gss, we only interested in root ctx init */
1820                 if (req->rq_auth_gss &&
1821                     !(req->rq_ctx_init &&
1822                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1823                        req->rq_auth_usr_ost))) {
1824                         spin_unlock(&exp->exp_lock);
1825                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1826                                req->rq_auth_gss, req->rq_ctx_init,
1827                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1828                                req->rq_auth_usr_ost);
1829                         return 0;
1830                 }
1831
1832                 exp->exp_flvr_adapt = 0;
1833                 spin_unlock(&exp->exp_lock);
1834
1835                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1836                                                 req->rq_svc_ctx, &flavor);
1837         }
1838
1839         /* if it equals to the current flavor, we accept it, but need to
1840          * dealing with reverse sec/ctx */
1841         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1842                 /* most cases should return here, we only interested in
1843                  * gss root ctx init */
1844                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1845                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1846                      !req->rq_auth_usr_ost)) {
1847                         spin_unlock(&exp->exp_lock);
1848                         return 0;
1849                 }
1850
1851                 /* if flavor just changed, we should not proceed, just leave
1852                  * it and current flavor will be discovered and replaced
1853                  * shortly, and let _this_ rpc pass through */
1854                 if (exp->exp_flvr_changed) {
1855                         LASSERT(exp->exp_flvr_adapt);
1856                         spin_unlock(&exp->exp_lock);
1857                         return 0;
1858                 }
1859
1860                 if (exp->exp_flvr_adapt) {
1861                         exp->exp_flvr_adapt = 0;
1862                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1863                                exp, exp->exp_flvr.sf_rpc,
1864                                exp->exp_flvr_old[0].sf_rpc,
1865                                exp->exp_flvr_old[1].sf_rpc);
1866                         flavor = exp->exp_flvr;
1867                         spin_unlock(&exp->exp_lock);
1868
1869                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1870                                                         req->rq_svc_ctx,
1871                                                         &flavor);
1872                 } else {
1873                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1874                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1875                                exp->exp_flvr_old[0].sf_rpc,
1876                                exp->exp_flvr_old[1].sf_rpc);
1877                         spin_unlock(&exp->exp_lock);
1878
1879                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1880                                                            req->rq_svc_ctx);
1881                 }
1882         }
1883
1884         if (exp->exp_flvr_expire[0]) {
1885                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
1886                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1887                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
1888                                        exp->exp_flvr.sf_rpc,
1889                                        exp->exp_flvr_old[0].sf_rpc,
1890                                        exp->exp_flvr_old[1].sf_rpc,
1891                                        (s64)(exp->exp_flvr_expire[0] -
1892                                        ktime_get_real_seconds()));
1893                                 spin_unlock(&exp->exp_lock);
1894                                 return 0;
1895                         }
1896                 } else {
1897                         CDEBUG(D_SEC, "mark middle expired\n");
1898                         exp->exp_flvr_expire[0] = 0;
1899                 }
1900                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1901                        exp->exp_flvr.sf_rpc,
1902                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1903                        req->rq_flvr.sf_rpc);
1904         }
1905
1906         /* now it doesn't match the current flavor, the only chance we can
1907          * accept it is match the old flavors which is not expired. */
1908         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1909                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
1910                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1911                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
1912                                        exp,
1913                                        exp->exp_flvr.sf_rpc,
1914                                        exp->exp_flvr_old[0].sf_rpc,
1915                                        exp->exp_flvr_old[1].sf_rpc,
1916                                        (s64)(exp->exp_flvr_expire[1] -
1917                                        ktime_get_real_seconds()));
1918                                 spin_unlock(&exp->exp_lock);
1919                                 return 0;
1920                         }
1921                 } else {
1922                         CDEBUG(D_SEC, "mark oldest expired\n");
1923                         exp->exp_flvr_expire[1] = 0;
1924                 }
1925                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1926                        exp, exp->exp_flvr.sf_rpc,
1927                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1928                        req->rq_flvr.sf_rpc);
1929         } else {
1930                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1931                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1932                        exp->exp_flvr_old[1].sf_rpc);
1933         }
1934
1935         spin_unlock(&exp->exp_lock);
1936
1937         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
1938               exp, exp->exp_obd->obd_name,
1939               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1940               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1941               req->rq_flvr.sf_rpc,
1942               exp->exp_flvr.sf_rpc,
1943               exp->exp_flvr_old[0].sf_rpc,
1944               exp->exp_flvr_expire[0] ?
1945               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
1946               exp->exp_flvr_old[1].sf_rpc,
1947               exp->exp_flvr_expire[1] ?
1948               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
1949         return -EACCES;
1950 }
1951 EXPORT_SYMBOL(sptlrpc_target_export_check);
1952
1953 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1954                                       struct sptlrpc_rule_set *rset)
1955 {
1956         struct obd_export       *exp;
1957         struct sptlrpc_flavor    new_flvr;
1958
1959         LASSERT(obd);
1960
1961         spin_lock(&obd->obd_dev_lock);
1962
1963         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1964                 if (exp->exp_connection == NULL)
1965                         continue;
1966
1967                 /* note if this export had just been updated flavor
1968                  * (exp_flvr_changed == 1), this will override the
1969                  * previous one. */
1970                 spin_lock(&exp->exp_lock);
1971                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1972                                              exp->exp_connection->c_peer.nid,
1973                                              &new_flvr);
1974                 if (exp->exp_flvr_changed ||
1975                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1976                         exp->exp_flvr_old[1] = new_flvr;
1977                         exp->exp_flvr_expire[1] = 0;
1978                         exp->exp_flvr_changed = 1;
1979                         exp->exp_flvr_adapt = 1;
1980
1981                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1982                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1983                                exp->exp_flvr.sf_rpc,
1984                                exp->exp_flvr_old[1].sf_rpc);
1985                 }
1986                 spin_unlock(&exp->exp_lock);
1987         }
1988
1989         spin_unlock(&obd->obd_dev_lock);
1990 }
1991 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1992
1993 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1994 {
1995         /* peer's claim is unreliable unless gss is being used */
1996         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1997                 return svc_rc;
1998
1999         switch (req->rq_sp_from) {
2000         case LUSTRE_SP_CLI:
2001                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2002                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2003                         svc_rc = SECSVC_DROP;
2004                 }
2005                 break;
2006         case LUSTRE_SP_MDT:
2007                 if (!req->rq_auth_usr_mdt) {
2008                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2009                         svc_rc = SECSVC_DROP;
2010                 }
2011                 break;
2012         case LUSTRE_SP_OST:
2013                 if (!req->rq_auth_usr_ost) {
2014                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2015                         svc_rc = SECSVC_DROP;
2016                 }
2017                 break;
2018         case LUSTRE_SP_MGS:
2019         case LUSTRE_SP_MGC:
2020                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2021                     !req->rq_auth_usr_ost) {
2022                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2023                         svc_rc = SECSVC_DROP;
2024                 }
2025                 break;
2026         case LUSTRE_SP_ANY:
2027         default:
2028                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2029                 svc_rc = SECSVC_DROP;
2030         }
2031
2032         return svc_rc;
2033 }
2034
2035 /**
2036  * Used by ptlrpc server, to perform transformation upon request message of
2037  * incoming \a req. This must be the first thing to do with an incoming
2038  * request in ptlrpc layer.
2039  *
2040  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2041  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2042  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2043  * reply message has been prepared.
2044  * \retval SECSVC_DROP failed, this request should be dropped.
2045  */
2046 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2047 {
2048         struct ptlrpc_sec_policy *policy;
2049         struct lustre_msg        *msg = req->rq_reqbuf;
2050         int                       rc;
2051         ENTRY;
2052
2053         LASSERT(msg);
2054         LASSERT(req->rq_reqmsg == NULL);
2055         LASSERT(req->rq_repmsg == NULL);
2056         LASSERT(req->rq_svc_ctx == NULL);
2057
2058         req->rq_req_swab_mask = 0;
2059
2060         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2061         switch (rc) {
2062         case 1:
2063                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2064         case 0:
2065                 break;
2066         default:
2067                 CERROR("error unpacking request from %s x%llu\n",
2068                        libcfs_id2str(req->rq_peer), req->rq_xid);
2069                 RETURN(SECSVC_DROP);
2070         }
2071
2072         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2073         req->rq_sp_from = LUSTRE_SP_ANY;
2074         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2075         req->rq_auth_mapped_uid = -1;
2076
2077         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2078         if (!policy) {
2079                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2080                 RETURN(SECSVC_DROP);
2081         }
2082
2083         LASSERT(policy->sp_sops->accept);
2084         rc = policy->sp_sops->accept(req);
2085         sptlrpc_policy_put(policy);
2086         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2087         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2088
2089         /*
2090          * if it's not null flavor (which means embedded packing msg),
2091          * reset the swab mask for the comming inner msg unpacking.
2092          */
2093         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2094                 req->rq_req_swab_mask = 0;
2095
2096         /* sanity check for the request source */
2097         rc = sptlrpc_svc_check_from(req, rc);
2098         RETURN(rc);
2099 }
2100
2101 /**
2102  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2103  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2104  * a buffer of \a msglen size.
2105  */
2106 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2107 {
2108         struct ptlrpc_sec_policy *policy;
2109         struct ptlrpc_reply_state *rs;
2110         int rc;
2111         ENTRY;
2112
2113         LASSERT(req->rq_svc_ctx);
2114         LASSERT(req->rq_svc_ctx->sc_policy);
2115
2116         policy = req->rq_svc_ctx->sc_policy;
2117         LASSERT(policy->sp_sops->alloc_rs);
2118
2119         rc = policy->sp_sops->alloc_rs(req, msglen);
2120         if (unlikely(rc == -ENOMEM)) {
2121                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2122                 if (svcpt->scp_service->srv_max_reply_size <
2123                    msglen + sizeof(struct ptlrpc_reply_state)) {
2124                         /* Just return failure if the size is too big */
2125                         CERROR("size of message is too big (%zd), %d allowed\n",
2126                                 msglen + sizeof(struct ptlrpc_reply_state),
2127                                 svcpt->scp_service->srv_max_reply_size);
2128                         RETURN(-ENOMEM);
2129                 }
2130
2131                 /* failed alloc, try emergency pool */
2132                 rs = lustre_get_emerg_rs(svcpt);
2133                 if (rs == NULL)
2134                         RETURN(-ENOMEM);
2135
2136                 req->rq_reply_state = rs;
2137                 rc = policy->sp_sops->alloc_rs(req, msglen);
2138                 if (rc) {
2139                         lustre_put_emerg_rs(rs);
2140                         req->rq_reply_state = NULL;
2141                 }
2142         }
2143
2144         LASSERT(rc != 0 ||
2145                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2146
2147         RETURN(rc);
2148 }
2149
2150 /**
2151  * Used by ptlrpc server, to perform transformation upon reply message.
2152  *
2153  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2154  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2155  */
2156 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2157 {
2158         struct ptlrpc_sec_policy *policy;
2159         int rc;
2160         ENTRY;
2161
2162         LASSERT(req->rq_svc_ctx);
2163         LASSERT(req->rq_svc_ctx->sc_policy);
2164
2165         policy = req->rq_svc_ctx->sc_policy;
2166         LASSERT(policy->sp_sops->authorize);
2167
2168         rc = policy->sp_sops->authorize(req);
2169         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2170
2171         RETURN(rc);
2172 }
2173
2174 /**
2175  * Used by ptlrpc server, to free reply_state.
2176  */
2177 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2178 {
2179         struct ptlrpc_sec_policy *policy;
2180         unsigned int prealloc;
2181         ENTRY;
2182
2183         LASSERT(rs->rs_svc_ctx);
2184         LASSERT(rs->rs_svc_ctx->sc_policy);
2185
2186         policy = rs->rs_svc_ctx->sc_policy;
2187         LASSERT(policy->sp_sops->free_rs);
2188
2189         prealloc = rs->rs_prealloc;
2190         policy->sp_sops->free_rs(rs);
2191
2192         if (prealloc)
2193                 lustre_put_emerg_rs(rs);
2194         EXIT;
2195 }
2196
2197 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2198 {
2199         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2200
2201         if (ctx != NULL)
2202                 atomic_inc(&ctx->sc_refcount);
2203 }
2204
2205 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2206 {
2207         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2208
2209         if (ctx == NULL)
2210                 return;
2211
2212         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2213         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2214                 if (ctx->sc_policy->sp_sops->free_ctx)
2215                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2216         }
2217         req->rq_svc_ctx = NULL;
2218 }
2219
2220 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2221 {
2222         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2223
2224         if (ctx == NULL)
2225                 return;
2226
2227         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2228         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2229                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2230 }
2231 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2232
2233 /****************************************
2234  * bulk security                        *
2235  ****************************************/
2236
2237 /**
2238  * Perform transformation upon bulk data pointed by \a desc. This is called
2239  * before transforming the request message.
2240  */
2241 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2242                           struct ptlrpc_bulk_desc *desc)
2243 {
2244         struct ptlrpc_cli_ctx *ctx;
2245
2246         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2247
2248         if (!req->rq_pack_bulk)
2249                 return 0;
2250
2251         ctx = req->rq_cli_ctx;
2252         if (ctx->cc_ops->wrap_bulk)
2253                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2254         return 0;
2255 }
2256 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2257
2258 /**
2259  * This is called after unwrap the reply message.
2260  * return nob of actual plain text size received, or error code.
2261  */
2262 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2263                                  struct ptlrpc_bulk_desc *desc,
2264                                  int nob)
2265 {
2266         struct ptlrpc_cli_ctx  *ctx;
2267         int                     rc;
2268
2269         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2270
2271         if (!req->rq_pack_bulk)
2272                 return desc->bd_nob_transferred;
2273
2274         ctx = req->rq_cli_ctx;
2275         if (ctx->cc_ops->unwrap_bulk) {
2276                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2277                 if (rc < 0)
2278                         return rc;
2279         }
2280         return desc->bd_nob_transferred;
2281 }
2282 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2283
2284 /**
2285  * This is called after unwrap the reply message.
2286  * return 0 for success or error code.
2287  */
2288 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2289                                   struct ptlrpc_bulk_desc *desc)
2290 {
2291         struct ptlrpc_cli_ctx  *ctx;
2292         int                     rc;
2293
2294         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2295
2296         if (!req->rq_pack_bulk)
2297                 return 0;
2298
2299         ctx = req->rq_cli_ctx;
2300         if (ctx->cc_ops->unwrap_bulk) {
2301                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2302                 if (rc < 0)
2303                         return rc;
2304         }
2305
2306         /*
2307          * if everything is going right, nob should equals to nob_transferred.
2308          * in case of privacy mode, nob_transferred needs to be adjusted.
2309          */
2310         if (desc->bd_nob != desc->bd_nob_transferred) {
2311                 CERROR("nob %d doesn't match transferred nob %d\n",
2312                        desc->bd_nob, desc->bd_nob_transferred);
2313                 return -EPROTO;
2314         }
2315
2316         return 0;
2317 }
2318 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2319
2320 #ifdef HAVE_SERVER_SUPPORT
2321 /**
2322  * Performe transformation upon outgoing bulk read.
2323  */
2324 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2325                           struct ptlrpc_bulk_desc *desc)
2326 {
2327         struct ptlrpc_svc_ctx *ctx;
2328
2329         LASSERT(req->rq_bulk_read);
2330
2331         if (!req->rq_pack_bulk)
2332                 return 0;
2333
2334         ctx = req->rq_svc_ctx;
2335         if (ctx->sc_policy->sp_sops->wrap_bulk)
2336                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2337
2338         return 0;
2339 }
2340 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2341
2342 /**
2343  * Performe transformation upon incoming bulk write.
2344  */
2345 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2346                             struct ptlrpc_bulk_desc *desc)
2347 {
2348         struct ptlrpc_svc_ctx *ctx;
2349         int                    rc;
2350
2351         LASSERT(req->rq_bulk_write);
2352
2353         /*
2354          * if it's in privacy mode, transferred should >= expected; otherwise
2355          * transferred should == expected.
2356          */
2357         if (desc->bd_nob_transferred < desc->bd_nob ||
2358             (desc->bd_nob_transferred > desc->bd_nob &&
2359              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2360              SPTLRPC_BULK_SVC_PRIV)) {
2361                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2362                           desc->bd_nob_transferred, desc->bd_nob);
2363                 return -ETIMEDOUT;
2364         }
2365
2366         if (!req->rq_pack_bulk)
2367                 return 0;
2368
2369         ctx = req->rq_svc_ctx;
2370         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2371                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2372                 if (rc)
2373                         CERROR("error unwrap bulk: %d\n", rc);
2374         }
2375
2376         /* return 0 to allow reply be sent */
2377         return 0;
2378 }
2379 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2380
2381 /**
2382  * Prepare buffers for incoming bulk write.
2383  */
2384 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2385                           struct ptlrpc_bulk_desc *desc)
2386 {
2387         struct ptlrpc_svc_ctx *ctx;
2388
2389         LASSERT(req->rq_bulk_write);
2390
2391         if (!req->rq_pack_bulk)
2392                 return 0;
2393
2394         ctx = req->rq_svc_ctx;
2395         if (ctx->sc_policy->sp_sops->prep_bulk)
2396                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2397
2398         return 0;
2399 }
2400 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2401
2402 #endif /* HAVE_SERVER_SUPPORT */
2403
2404 /****************************************
2405  * user descriptor helpers              *
2406  ****************************************/
2407
2408 int sptlrpc_current_user_desc_size(void)
2409 {
2410         int ngroups;
2411
2412         ngroups = current_ngroups;
2413
2414         if (ngroups > LUSTRE_MAX_GROUPS)
2415                 ngroups = LUSTRE_MAX_GROUPS;
2416         return sptlrpc_user_desc_size(ngroups);
2417 }
2418 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2419
2420 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2421 {
2422         struct ptlrpc_user_desc *pud;
2423
2424         pud = lustre_msg_buf(msg, offset, 0);
2425
2426         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2427         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2428         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2429         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2430         pud->pud_cap = cfs_curproc_cap_pack();
2431         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2432
2433         task_lock(current);
2434         if (pud->pud_ngroups > current_ngroups)
2435                 pud->pud_ngroups = current_ngroups;
2436 #ifdef HAVE_GROUP_INFO_GID
2437         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2438                pud->pud_ngroups * sizeof(__u32));
2439 #else /* !HAVE_GROUP_INFO_GID */
2440         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2441                pud->pud_ngroups * sizeof(__u32));
2442 #endif /* HAVE_GROUP_INFO_GID */
2443         task_unlock(current);
2444
2445         return 0;
2446 }
2447 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2448
2449 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2450 {
2451         struct ptlrpc_user_desc *pud;
2452         int                      i;
2453
2454         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2455         if (!pud)
2456                 return -EINVAL;
2457
2458         if (swabbed) {
2459                 __swab32s(&pud->pud_uid);
2460                 __swab32s(&pud->pud_gid);
2461                 __swab32s(&pud->pud_fsuid);
2462                 __swab32s(&pud->pud_fsgid);
2463                 __swab32s(&pud->pud_cap);
2464                 __swab32s(&pud->pud_ngroups);
2465         }
2466
2467         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2468                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2469                 return -EINVAL;
2470         }
2471
2472         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2473             msg->lm_buflens[offset]) {
2474                 CERROR("%u groups are claimed but bufsize only %u\n",
2475                        pud->pud_ngroups, msg->lm_buflens[offset]);
2476                 return -EINVAL;
2477         }
2478
2479         if (swabbed) {
2480                 for (i = 0; i < pud->pud_ngroups; i++)
2481                         __swab32s(&pud->pud_groups[i]);
2482         }
2483
2484         return 0;
2485 }
2486 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2487
2488 /****************************************
2489  * misc helpers                         *
2490  ****************************************/
2491
2492 const char * sec2target_str(struct ptlrpc_sec *sec)
2493 {
2494         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2495                 return "*";
2496         if (sec_is_reverse(sec))
2497                 return "c";
2498         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2499 }
2500 EXPORT_SYMBOL(sec2target_str);
2501
2502 /*
2503  * return true if the bulk data is protected
2504  */
2505 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2506 {
2507         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2508         case SPTLRPC_BULK_SVC_INTG:
2509         case SPTLRPC_BULK_SVC_PRIV:
2510                 return 1;
2511         default:
2512                 return 0;
2513         }
2514 }
2515 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2516
2517 /****************************************
2518  * crypto API helper/alloc blkciper     *
2519  ****************************************/
2520
2521 /****************************************
2522  * initialize/finalize                  *
2523  ****************************************/
2524
2525 int sptlrpc_init(void)
2526 {
2527         int rc;
2528
2529         rwlock_init(&policy_lock);
2530
2531         rc = sptlrpc_gc_init();
2532         if (rc)
2533                 goto out;
2534
2535         rc = sptlrpc_conf_init();
2536         if (rc)
2537                 goto out_gc;
2538
2539         rc = sptlrpc_enc_pool_init();
2540         if (rc)
2541                 goto out_conf;
2542
2543         rc = sptlrpc_null_init();
2544         if (rc)
2545                 goto out_pool;
2546
2547         rc = sptlrpc_plain_init();
2548         if (rc)
2549                 goto out_null;
2550
2551         rc = sptlrpc_lproc_init();
2552         if (rc)
2553                 goto out_plain;
2554
2555         return 0;
2556
2557 out_plain:
2558         sptlrpc_plain_fini();
2559 out_null:
2560         sptlrpc_null_fini();
2561 out_pool:
2562         sptlrpc_enc_pool_fini();
2563 out_conf:
2564         sptlrpc_conf_fini();
2565 out_gc:
2566         sptlrpc_gc_fini();
2567 out:
2568         return rc;
2569 }
2570
2571 void sptlrpc_fini(void)
2572 {
2573         sptlrpc_lproc_fini();
2574         sptlrpc_plain_fini();
2575         sptlrpc_null_fini();
2576         sptlrpc_enc_pool_fini();
2577         sptlrpc_conf_fini();
2578         sptlrpc_gc_fini();
2579 }