Whamcloud - gitweb
a4289d3b57d8537d9c793803de01686bfba800e3
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42
43 #include <linux/user_namespace.h>
44 #ifdef HAVE_UIDGID_HEADER
45 # include <linux/uidgid.h>
46 #endif
47 #include <linux/crypto.h>
48 #include <linux/key.h>
49
50 #include <libcfs/libcfs.h>
51 #include <obd.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_dlm.h>
57 #include <lustre_sec.h>
58
59 #include "ptlrpc_internal.h"
60
61 /***********************************************
62  * policy registers                            *
63  ***********************************************/
64
65 static rwlock_t policy_lock;
66 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
67         NULL,
68 };
69
70 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
71 {
72         __u16 number = policy->sp_policy;
73
74         LASSERT(policy->sp_name);
75         LASSERT(policy->sp_cops);
76         LASSERT(policy->sp_sops);
77
78         if (number >= SPTLRPC_POLICY_MAX)
79                 return -EINVAL;
80
81         write_lock(&policy_lock);
82         if (unlikely(policies[number])) {
83                 write_unlock(&policy_lock);
84                 return -EALREADY;
85         }
86         policies[number] = policy;
87         write_unlock(&policy_lock);
88
89         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
90         return 0;
91 }
92 EXPORT_SYMBOL(sptlrpc_register_policy);
93
94 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
95 {
96         __u16 number = policy->sp_policy;
97
98         LASSERT(number < SPTLRPC_POLICY_MAX);
99
100         write_lock(&policy_lock);
101         if (unlikely(policies[number] == NULL)) {
102                 write_unlock(&policy_lock);
103                 CERROR("%s: already unregistered\n", policy->sp_name);
104                 return -EINVAL;
105         }
106
107         LASSERT(policies[number] == policy);
108         policies[number] = NULL;
109         write_unlock(&policy_lock);
110
111         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
112         return 0;
113 }
114 EXPORT_SYMBOL(sptlrpc_unregister_policy);
115
116 static
117 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
118 {
119         static DEFINE_MUTEX(load_mutex);
120         static atomic_t           loaded = ATOMIC_INIT(0);
121         struct ptlrpc_sec_policy *policy;
122         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
123         __u16                     flag = 0;
124
125         if (number >= SPTLRPC_POLICY_MAX)
126                 return NULL;
127
128         while (1) {
129                 read_lock(&policy_lock);
130                 policy = policies[number];
131                 if (policy && !try_module_get(policy->sp_owner))
132                         policy = NULL;
133                 if (policy == NULL)
134                         flag = atomic_read(&loaded);
135                 read_unlock(&policy_lock);
136
137                 if (policy != NULL || flag != 0 ||
138                     number != SPTLRPC_POLICY_GSS)
139                         break;
140
141                 /* try to load gss module, once */
142                 mutex_lock(&load_mutex);
143                 if (atomic_read(&loaded) == 0) {
144                         if (request_module("ptlrpc_gss") == 0)
145                                 CDEBUG(D_SEC,
146                                        "module ptlrpc_gss loaded on demand\n");
147                         else
148                                 CERROR("Unable to load module ptlrpc_gss\n");
149
150                         atomic_set(&loaded, 1);
151                 }
152                 mutex_unlock(&load_mutex);
153         }
154
155         return policy;
156 }
157
158 __u32 sptlrpc_name2flavor_base(const char *name)
159 {
160         if (!strcmp(name, "null"))
161                 return SPTLRPC_FLVR_NULL;
162         if (!strcmp(name, "plain"))
163                 return SPTLRPC_FLVR_PLAIN;
164         if (!strcmp(name, "gssnull"))
165                 return SPTLRPC_FLVR_GSSNULL;
166         if (!strcmp(name, "krb5n"))
167                 return SPTLRPC_FLVR_KRB5N;
168         if (!strcmp(name, "krb5a"))
169                 return SPTLRPC_FLVR_KRB5A;
170         if (!strcmp(name, "krb5i"))
171                 return SPTLRPC_FLVR_KRB5I;
172         if (!strcmp(name, "krb5p"))
173                 return SPTLRPC_FLVR_KRB5P;
174         if (!strcmp(name, "skn"))
175                 return SPTLRPC_FLVR_SKN;
176         if (!strcmp(name, "ska"))
177                 return SPTLRPC_FLVR_SKA;
178         if (!strcmp(name, "ski"))
179                 return SPTLRPC_FLVR_SKI;
180         if (!strcmp(name, "skpi"))
181                 return SPTLRPC_FLVR_SKPI;
182
183         return SPTLRPC_FLVR_INVALID;
184 }
185 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
186
187 const char *sptlrpc_flavor2name_base(__u32 flvr)
188 {
189         __u32   base = SPTLRPC_FLVR_BASE(flvr);
190
191         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
192                 return "null";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
194                 return "plain";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
196                 return "gssnull";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
198                 return "krb5n";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
200                 return "krb5a";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
202                 return "krb5i";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
204                 return "krb5p";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
206                 return "skn";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
208                 return "ska";
209         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
210                 return "ski";
211         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
212                 return "skpi";
213
214         CERROR("invalid wire flavor 0x%x\n", flvr);
215         return "invalid";
216 }
217 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
218
219 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
220                                char *buf, int bufsize)
221 {
222         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
223                 snprintf(buf, bufsize, "hash:%s",
224                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
225         else
226                 snprintf(buf, bufsize, "%s",
227                          sptlrpc_flavor2name_base(sf->sf_rpc));
228
229         buf[bufsize - 1] = '\0';
230         return buf;
231 }
232 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
233
234 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
235 {
236         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
237
238         /*
239          * currently we don't support customized bulk specification for
240          * flavors other than plain
241          */
242         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
243                 char bspec[16];
244
245                 bspec[0] = '-';
246                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
247                 strncat(buf, bspec, bufsize);
248         }
249
250         buf[bufsize - 1] = '\0';
251         return buf;
252 }
253 EXPORT_SYMBOL(sptlrpc_flavor2name);
254
255 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
256 {
257         buf[0] = '\0';
258
259         if (flags & PTLRPC_SEC_FL_REVERSE)
260                 strlcat(buf, "reverse,", bufsize);
261         if (flags & PTLRPC_SEC_FL_ROOTONLY)
262                 strlcat(buf, "rootonly,", bufsize);
263         if (flags & PTLRPC_SEC_FL_UDESC)
264                 strlcat(buf, "udesc,", bufsize);
265         if (flags & PTLRPC_SEC_FL_BULK)
266                 strlcat(buf, "bulk,", bufsize);
267         if (buf[0] == '\0')
268                 strlcat(buf, "-,", bufsize);
269
270         return buf;
271 }
272 EXPORT_SYMBOL(sptlrpc_secflags2str);
273
274 /**************************************************
275  * client context APIs                            *
276  **************************************************/
277
278 static
279 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
280 {
281         struct vfs_cred vcred;
282         int create = 1, remove_dead = 1;
283
284         LASSERT(sec);
285         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
286
287         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
288                                      PTLRPC_SEC_FL_ROOTONLY)) {
289                 vcred.vc_uid = 0;
290                 vcred.vc_gid = 0;
291                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
292                         create = 0;
293                         remove_dead = 0;
294                 }
295         } else {
296                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
297                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
298         }
299
300         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
301                                                    remove_dead);
302 }
303
304 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
305 {
306         atomic_inc(&ctx->cc_refcount);
307         return ctx;
308 }
309 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
310
311 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
312 {
313         struct ptlrpc_sec *sec = ctx->cc_sec;
314
315         LASSERT(sec);
316         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
317
318         if (!atomic_dec_and_test(&ctx->cc_refcount))
319                 return;
320
321         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
322 }
323 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
324
325 /**
326  * Expire the client context immediately.
327  *
328  * \pre Caller must hold at least 1 reference on the \a ctx.
329  */
330 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
331 {
332         LASSERT(ctx->cc_ops->die);
333         ctx->cc_ops->die(ctx, 0);
334 }
335 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
336
337 /**
338  * To wake up the threads who are waiting for this client context. Called
339  * after some status change happened on \a ctx.
340  */
341 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
342 {
343         struct ptlrpc_request *req, *next;
344
345         spin_lock(&ctx->cc_lock);
346         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
347                                      rq_ctx_chain) {
348                 list_del_init(&req->rq_ctx_chain);
349                 ptlrpc_client_wake_req(req);
350         }
351         spin_unlock(&ctx->cc_lock);
352 }
353 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
354
355 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
356 {
357         LASSERT(ctx->cc_ops);
358
359         if (ctx->cc_ops->display == NULL)
360                 return 0;
361
362         return ctx->cc_ops->display(ctx, buf, bufsize);
363 }
364
365 static int import_sec_check_expire(struct obd_import *imp)
366 {
367         int     adapt = 0;
368
369         spin_lock(&imp->imp_lock);
370         if (imp->imp_sec_expire &&
371             imp->imp_sec_expire < cfs_time_current_sec()) {
372                 adapt = 1;
373                 imp->imp_sec_expire = 0;
374         }
375         spin_unlock(&imp->imp_lock);
376
377         if (!adapt)
378                 return 0;
379
380         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
381         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
382 }
383
384 /**
385  * Get and validate the client side ptlrpc security facilities from
386  * \a imp. There is a race condition on client reconnect when the import is
387  * being destroyed while there are outstanding client bound requests. In
388  * this case do not output any error messages if import secuity is not
389  * found.
390  *
391  * \param[in] imp obd import associated with client
392  * \param[out] sec client side ptlrpc security
393  *
394  * \retval 0 if security retrieved successfully
395  * \retval -ve errno if there was a problem
396  */
397 static int import_sec_validate_get(struct obd_import *imp,
398                                    struct ptlrpc_sec **sec)
399 {
400         int     rc;
401
402         if (unlikely(imp->imp_sec_expire)) {
403                 rc = import_sec_check_expire(imp);
404                 if (rc)
405                         return rc;
406         }
407
408         *sec = sptlrpc_import_sec_ref(imp);
409         /* Only output an error when the import is still active */
410         if (*sec == NULL) {
411                 if (list_empty(&imp->imp_zombie_chain))
412                         CERROR("import %p (%s) with no sec\n",
413                                 imp, ptlrpc_import_state_name(imp->imp_state));
414                 return -EACCES;
415         }
416
417         if (unlikely((*sec)->ps_dying)) {
418                 CERROR("attempt to use dying sec %p\n", sec);
419                 sptlrpc_sec_put(*sec);
420                 return -EACCES;
421         }
422
423         return 0;
424 }
425
426 /**
427  * Given a \a req, find or allocate an appropriate context for it.
428  * \pre req->rq_cli_ctx == NULL.
429  *
430  * \retval 0 succeed, and req->rq_cli_ctx is set.
431  * \retval -ev error number, and req->rq_cli_ctx == NULL.
432  */
433 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
434 {
435         struct obd_import *imp = req->rq_import;
436         struct ptlrpc_sec *sec;
437         int                rc;
438         ENTRY;
439
440         LASSERT(!req->rq_cli_ctx);
441         LASSERT(imp);
442
443         rc = import_sec_validate_get(imp, &sec);
444         if (rc)
445                 RETURN(rc);
446
447         req->rq_cli_ctx = get_my_ctx(sec);
448
449         sptlrpc_sec_put(sec);
450
451         if (!req->rq_cli_ctx) {
452                 CERROR("req %p: fail to get context\n", req);
453                 RETURN(-ECONNREFUSED);
454         }
455
456         RETURN(0);
457 }
458
459 /**
460  * Drop the context for \a req.
461  * \pre req->rq_cli_ctx != NULL.
462  * \post req->rq_cli_ctx == NULL.
463  *
464  * If \a sync == 0, this function should return quickly without sleep;
465  * otherwise it might trigger and wait for the whole process of sending
466  * an context-destroying rpc to server.
467  */
468 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
469 {
470         ENTRY;
471
472         LASSERT(req);
473         LASSERT(req->rq_cli_ctx);
474
475         /* request might be asked to release earlier while still
476          * in the context waiting list.
477          */
478         if (!list_empty(&req->rq_ctx_chain)) {
479                 spin_lock(&req->rq_cli_ctx->cc_lock);
480                 list_del_init(&req->rq_ctx_chain);
481                 spin_unlock(&req->rq_cli_ctx->cc_lock);
482         }
483
484         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
485         req->rq_cli_ctx = NULL;
486         EXIT;
487 }
488
489 static
490 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
491                            struct ptlrpc_cli_ctx *oldctx,
492                            struct ptlrpc_cli_ctx *newctx)
493 {
494         struct sptlrpc_flavor   old_flvr;
495         char                   *reqmsg = NULL; /* to workaround old gcc */
496         int                     reqmsg_size;
497         int                     rc = 0;
498
499         LASSERT(req->rq_reqmsg);
500         LASSERT(req->rq_reqlen);
501         LASSERT(req->rq_replen);
502
503         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
504                "switch sec %p(%s) -> %p(%s)\n", req,
505                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
506                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
507                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
508                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
509
510         /* save flavor */
511         old_flvr = req->rq_flvr;
512
513         /* save request message */
514         reqmsg_size = req->rq_reqlen;
515         if (reqmsg_size != 0) {
516                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
517                 if (reqmsg == NULL)
518                         return -ENOMEM;
519                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
520         }
521
522         /* release old req/rep buf */
523         req->rq_cli_ctx = oldctx;
524         sptlrpc_cli_free_reqbuf(req);
525         sptlrpc_cli_free_repbuf(req);
526         req->rq_cli_ctx = newctx;
527
528         /* recalculate the flavor */
529         sptlrpc_req_set_flavor(req, 0);
530
531         /* alloc new request buffer
532          * we don't need to alloc reply buffer here, leave it to the
533          * rest procedure of ptlrpc */
534         if (reqmsg_size != 0) {
535                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
536                 if (!rc) {
537                         LASSERT(req->rq_reqmsg);
538                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
539                 } else {
540                         CWARN("failed to alloc reqbuf: %d\n", rc);
541                         req->rq_flvr = old_flvr;
542                 }
543
544                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
545         }
546         return rc;
547 }
548
549 /**
550  * If current context of \a req is dead somehow, e.g. we just switched flavor
551  * thus marked original contexts dead, we'll find a new context for it. if
552  * no switch is needed, \a req will end up with the same context.
553  *
554  * \note a request must have a context, to keep other parts of code happy.
555  * In any case of failure during the switching, we must restore the old one.
556  */
557 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
558 {
559         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
560         struct ptlrpc_cli_ctx *newctx;
561         int                    rc;
562         ENTRY;
563
564         LASSERT(oldctx);
565
566         sptlrpc_cli_ctx_get(oldctx);
567         sptlrpc_req_put_ctx(req, 0);
568
569         rc = sptlrpc_req_get_ctx(req);
570         if (unlikely(rc)) {
571                 LASSERT(!req->rq_cli_ctx);
572
573                 /* restore old ctx */
574                 req->rq_cli_ctx = oldctx;
575                 RETURN(rc);
576         }
577
578         newctx = req->rq_cli_ctx;
579         LASSERT(newctx);
580
581         if (unlikely(newctx == oldctx &&
582                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
583                 /*
584                  * still get the old dead ctx, usually means system too busy
585                  */
586                 CDEBUG(D_SEC,
587                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
588                        newctx, newctx->cc_flags);
589
590                 set_current_state(TASK_INTERRUPTIBLE);
591                 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
592         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
593                             == 0)) {
594                 /*
595                  * new ctx not up to date yet
596                  */
597                 CDEBUG(D_SEC,
598                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
599                        newctx, newctx->cc_flags);
600         } else {
601                 /*
602                  * it's possible newctx == oldctx if we're switching
603                  * subflavor with the same sec.
604                  */
605                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
606                 if (rc) {
607                         /* restore old ctx */
608                         sptlrpc_req_put_ctx(req, 0);
609                         req->rq_cli_ctx = oldctx;
610                         RETURN(rc);
611                 }
612
613                 LASSERT(req->rq_cli_ctx == newctx);
614         }
615
616         sptlrpc_cli_ctx_put(oldctx, 1);
617         RETURN(0);
618 }
619 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
620
621 static
622 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
623 {
624         if (cli_ctx_is_refreshed(ctx))
625                 return 1;
626         return 0;
627 }
628
629 static
630 int ctx_refresh_timeout(void *data)
631 {
632         struct ptlrpc_request *req = data;
633         int rc;
634
635         /* conn_cnt is needed in expire_one_request */
636         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
637
638         rc = ptlrpc_expire_one_request(req, 1);
639         /* if we started recovery, we should mark this ctx dead; otherwise
640          * in case of lgssd died nobody would retire this ctx, following
641          * connecting will still find the same ctx thus cause deadlock.
642          * there's an assumption that expire time of the request should be
643          * later than the context refresh expire time.
644          */
645         if (rc == 0)
646                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
647         return rc;
648 }
649
650 static
651 void ctx_refresh_interrupt(void *data)
652 {
653         struct ptlrpc_request *req = data;
654
655         spin_lock(&req->rq_lock);
656         req->rq_intr = 1;
657         spin_unlock(&req->rq_lock);
658 }
659
660 static
661 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
662 {
663         spin_lock(&ctx->cc_lock);
664         if (!list_empty(&req->rq_ctx_chain))
665                 list_del_init(&req->rq_ctx_chain);
666         spin_unlock(&ctx->cc_lock);
667 }
668
669 /**
670  * To refresh the context of \req, if it's not up-to-date.
671  * \param timeout
672  * - < 0: don't wait
673  * - = 0: wait until success or fatal error occur
674  * - > 0: timeout value (in seconds)
675  *
676  * The status of the context could be subject to be changed by other threads
677  * at any time. We allow this race, but once we return with 0, the caller will
678  * suppose it's uptodated and keep using it until the owning rpc is done.
679  *
680  * \retval 0 only if the context is uptodated.
681  * \retval -ev error number.
682  */
683 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
684 {
685         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
686         struct ptlrpc_sec      *sec;
687         struct l_wait_info      lwi;
688         int                     rc;
689         ENTRY;
690
691         LASSERT(ctx);
692
693         if (req->rq_ctx_init || req->rq_ctx_fini)
694                 RETURN(0);
695
696         /*
697          * during the process a request's context might change type even
698          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
699          * everything
700          */
701 again:
702         rc = import_sec_validate_get(req->rq_import, &sec);
703         if (rc)
704                 RETURN(rc);
705
706         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
707                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
708                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
709                 req_off_ctx_list(req, ctx);
710                 sptlrpc_req_replace_dead_ctx(req);
711                 ctx = req->rq_cli_ctx;
712         }
713         sptlrpc_sec_put(sec);
714
715         if (cli_ctx_is_eternal(ctx))
716                 RETURN(0);
717
718         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
719                 LASSERT(ctx->cc_ops->refresh);
720                 ctx->cc_ops->refresh(ctx);
721         }
722         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
723
724         LASSERT(ctx->cc_ops->validate);
725         if (ctx->cc_ops->validate(ctx) == 0) {
726                 req_off_ctx_list(req, ctx);
727                 RETURN(0);
728         }
729
730         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
731                 spin_lock(&req->rq_lock);
732                 req->rq_err = 1;
733                 spin_unlock(&req->rq_lock);
734                 req_off_ctx_list(req, ctx);
735                 RETURN(-EPERM);
736         }
737
738         /*
739          * There's a subtle issue for resending RPCs, suppose following
740          * situation:
741          *  1. the request was sent to server.
742          *  2. recovery was kicked start, after finished the request was
743          *     marked as resent.
744          *  3. resend the request.
745          *  4. old reply from server received, we accept and verify the reply.
746          *     this has to be success, otherwise the error will be aware
747          *     by application.
748          *  5. new reply from server received, dropped by LNet.
749          *
750          * Note the xid of old & new request is the same. We can't simply
751          * change xid for the resent request because the server replies on
752          * it for reply reconstruction.
753          *
754          * Commonly the original context should be uptodate because we
755          * have an expiry nice time; server will keep its context because
756          * we at least hold a ref of old context which prevent context
757          * from destroying RPC being sent. So server still can accept the
758          * request and finish the RPC. But if that's not the case:
759          *  1. If server side context has been trimmed, a NO_CONTEXT will
760          *     be returned, gss_cli_ctx_verify/unseal will switch to new
761          *     context by force.
762          *  2. Current context never be refreshed, then we are fine: we
763          *     never really send request with old context before.
764          */
765         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
766             unlikely(req->rq_reqmsg) &&
767             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
768                 req_off_ctx_list(req, ctx);
769                 RETURN(0);
770         }
771
772         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
773                 req_off_ctx_list(req, ctx);
774                 /*
775                  * don't switch ctx if import was deactivated
776                  */
777                 if (req->rq_import->imp_deactive) {
778                         spin_lock(&req->rq_lock);
779                         req->rq_err = 1;
780                         spin_unlock(&req->rq_lock);
781                         RETURN(-EINTR);
782                 }
783
784                 rc = sptlrpc_req_replace_dead_ctx(req);
785                 if (rc) {
786                         LASSERT(ctx == req->rq_cli_ctx);
787                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
788                                req, ctx, rc);
789                         spin_lock(&req->rq_lock);
790                         req->rq_err = 1;
791                         spin_unlock(&req->rq_lock);
792                         RETURN(rc);
793                 }
794
795                 ctx = req->rq_cli_ctx;
796                 goto again;
797         }
798
799         /*
800          * Now we're sure this context is during upcall, add myself into
801          * waiting list
802          */
803         spin_lock(&ctx->cc_lock);
804         if (list_empty(&req->rq_ctx_chain))
805                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
806         spin_unlock(&ctx->cc_lock);
807
808         if (timeout < 0)
809                 RETURN(-EWOULDBLOCK);
810
811         /* Clear any flags that may be present from previous sends */
812         LASSERT(req->rq_receiving_reply == 0);
813         spin_lock(&req->rq_lock);
814         req->rq_err = 0;
815         req->rq_timedout = 0;
816         req->rq_resend = 0;
817         req->rq_restart = 0;
818         spin_unlock(&req->rq_lock);
819
820         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
821                                ctx_refresh_timeout,
822                                ctx_refresh_interrupt, req);
823         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
824
825         /*
826          * following cases could lead us here:
827          * - successfully refreshed;
828          * - interrupted;
829          * - timedout, and we don't want recover from the failure;
830          * - timedout, and waked up upon recovery finished;
831          * - someone else mark this ctx dead by force;
832          * - someone invalidate the req and call ptlrpc_client_wake_req(),
833          *   e.g. ptlrpc_abort_inflight();
834          */
835         if (!cli_ctx_is_refreshed(ctx)) {
836                 /* timed out or interruptted */
837                 req_off_ctx_list(req, ctx);
838
839                 LASSERT(rc != 0);
840                 RETURN(rc);
841         }
842
843         goto again;
844 }
845
846 /**
847  * Initialize flavor settings for \a req, according to \a opcode.
848  *
849  * \note this could be called in two situations:
850  * - new request from ptlrpc_pre_req(), with proper @opcode
851  * - old request which changed ctx in the middle, with @opcode == 0
852  */
853 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
854 {
855         struct ptlrpc_sec *sec;
856
857         LASSERT(req->rq_import);
858         LASSERT(req->rq_cli_ctx);
859         LASSERT(req->rq_cli_ctx->cc_sec);
860         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
861
862         /* special security flags according to opcode */
863         switch (opcode) {
864         case OST_READ:
865         case MDS_READPAGE:
866         case MGS_CONFIG_READ:
867         case OBD_IDX_READ:
868                 req->rq_bulk_read = 1;
869                 break;
870         case OST_WRITE:
871         case MDS_WRITEPAGE:
872                 req->rq_bulk_write = 1;
873                 break;
874         case SEC_CTX_INIT:
875                 req->rq_ctx_init = 1;
876                 break;
877         case SEC_CTX_FINI:
878                 req->rq_ctx_fini = 1;
879                 break;
880         case 0:
881                 /* init/fini rpc won't be resend, so can't be here */
882                 LASSERT(req->rq_ctx_init == 0);
883                 LASSERT(req->rq_ctx_fini == 0);
884
885                 /* cleanup flags, which should be recalculated */
886                 req->rq_pack_udesc = 0;
887                 req->rq_pack_bulk = 0;
888                 break;
889         }
890
891         sec = req->rq_cli_ctx->cc_sec;
892
893         spin_lock(&sec->ps_lock);
894         req->rq_flvr = sec->ps_flvr;
895         spin_unlock(&sec->ps_lock);
896
897         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
898          * destruction rpc */
899         if (unlikely(req->rq_ctx_init))
900                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
901         else if (unlikely(req->rq_ctx_fini))
902                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
903
904         /* user descriptor flag, null security can't do it anyway */
905         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
906             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
907                 req->rq_pack_udesc = 1;
908
909         /* bulk security flag */
910         if ((req->rq_bulk_read || req->rq_bulk_write) &&
911             sptlrpc_flavor_has_bulk(&req->rq_flvr))
912                 req->rq_pack_bulk = 1;
913 }
914
915 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
916 {
917         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
918                 return;
919
920         LASSERT(req->rq_clrbuf);
921         if (req->rq_pool || !req->rq_reqbuf)
922                 return;
923
924         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
925         req->rq_reqbuf = NULL;
926         req->rq_reqbuf_len = 0;
927 }
928
929 /**
930  * Given an import \a imp, check whether current user has a valid context
931  * or not. We may create a new context and try to refresh it, and try
932  * repeatedly try in case of non-fatal errors. Return 0 means success.
933  */
934 int sptlrpc_import_check_ctx(struct obd_import *imp)
935 {
936         struct ptlrpc_sec     *sec;
937         struct ptlrpc_cli_ctx *ctx;
938         struct ptlrpc_request *req = NULL;
939         int rc;
940         ENTRY;
941
942         might_sleep();
943
944         sec = sptlrpc_import_sec_ref(imp);
945         ctx = get_my_ctx(sec);
946         sptlrpc_sec_put(sec);
947
948         if (!ctx)
949                 RETURN(-ENOMEM);
950
951         if (cli_ctx_is_eternal(ctx) ||
952             ctx->cc_ops->validate(ctx) == 0) {
953                 sptlrpc_cli_ctx_put(ctx, 1);
954                 RETURN(0);
955         }
956
957         if (cli_ctx_is_error(ctx)) {
958                 sptlrpc_cli_ctx_put(ctx, 1);
959                 RETURN(-EACCES);
960         }
961
962         req = ptlrpc_request_cache_alloc(GFP_NOFS);
963         if (!req)
964                 RETURN(-ENOMEM);
965
966         ptlrpc_cli_req_init(req);
967         atomic_set(&req->rq_refcount, 10000);
968
969         req->rq_import = imp;
970         req->rq_flvr = sec->ps_flvr;
971         req->rq_cli_ctx = ctx;
972
973         rc = sptlrpc_req_refresh_ctx(req, 0);
974         LASSERT(list_empty(&req->rq_ctx_chain));
975         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
976         ptlrpc_request_cache_free(req);
977
978         RETURN(rc);
979 }
980
981 /**
982  * Used by ptlrpc client, to perform the pre-defined security transformation
983  * upon the request message of \a req. After this function called,
984  * req->rq_reqmsg is still accessible as clear text.
985  */
986 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
987 {
988         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
989         int rc = 0;
990         ENTRY;
991
992         LASSERT(ctx);
993         LASSERT(ctx->cc_sec);
994         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
995
996         /* we wrap bulk request here because now we can be sure
997          * the context is uptodate.
998          */
999         if (req->rq_bulk) {
1000                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1001                 if (rc)
1002                         RETURN(rc);
1003         }
1004
1005         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1006         case SPTLRPC_SVC_NULL:
1007         case SPTLRPC_SVC_AUTH:
1008         case SPTLRPC_SVC_INTG:
1009                 LASSERT(ctx->cc_ops->sign);
1010                 rc = ctx->cc_ops->sign(ctx, req);
1011                 break;
1012         case SPTLRPC_SVC_PRIV:
1013                 LASSERT(ctx->cc_ops->seal);
1014                 rc = ctx->cc_ops->seal(ctx, req);
1015                 break;
1016         default:
1017                 LBUG();
1018         }
1019
1020         if (rc == 0) {
1021                 LASSERT(req->rq_reqdata_len);
1022                 LASSERT(req->rq_reqdata_len % 8 == 0);
1023                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1024         }
1025
1026         RETURN(rc);
1027 }
1028
1029 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1030 {
1031         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1032         int                    rc;
1033         ENTRY;
1034
1035         LASSERT(ctx);
1036         LASSERT(ctx->cc_sec);
1037         LASSERT(req->rq_repbuf);
1038         LASSERT(req->rq_repdata);
1039         LASSERT(req->rq_repmsg == NULL);
1040
1041         req->rq_rep_swab_mask = 0;
1042
1043         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1044         switch (rc) {
1045         case 1:
1046                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1047         case 0:
1048                 break;
1049         default:
1050                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1051                 RETURN(-EPROTO);
1052         }
1053
1054         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1055                 CERROR("replied data length %d too small\n",
1056                        req->rq_repdata_len);
1057                 RETURN(-EPROTO);
1058         }
1059
1060         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1061             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1062                 CERROR("reply policy %u doesn't match request policy %u\n",
1063                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1064                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1065                 RETURN(-EPROTO);
1066         }
1067
1068         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1069         case SPTLRPC_SVC_NULL:
1070         case SPTLRPC_SVC_AUTH:
1071         case SPTLRPC_SVC_INTG:
1072                 LASSERT(ctx->cc_ops->verify);
1073                 rc = ctx->cc_ops->verify(ctx, req);
1074                 break;
1075         case SPTLRPC_SVC_PRIV:
1076                 LASSERT(ctx->cc_ops->unseal);
1077                 rc = ctx->cc_ops->unseal(ctx, req);
1078                 break;
1079         default:
1080                 LBUG();
1081         }
1082         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1083
1084         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1085             !req->rq_ctx_init)
1086                 req->rq_rep_swab_mask = 0;
1087         RETURN(rc);
1088 }
1089
1090 /**
1091  * Used by ptlrpc client, to perform security transformation upon the reply
1092  * message of \a req. After return successfully, req->rq_repmsg points to
1093  * the reply message in clear text.
1094  *
1095  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1096  * going to change.
1097  */
1098 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1099 {
1100         LASSERT(req->rq_repbuf);
1101         LASSERT(req->rq_repdata == NULL);
1102         LASSERT(req->rq_repmsg == NULL);
1103         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1104
1105         if (req->rq_reply_off == 0 &&
1106             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1107                 CERROR("real reply with offset 0\n");
1108                 return -EPROTO;
1109         }
1110
1111         if (req->rq_reply_off % 8 != 0) {
1112                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1113                 return -EPROTO;
1114         }
1115
1116         req->rq_repdata = (struct lustre_msg *)
1117                                 (req->rq_repbuf + req->rq_reply_off);
1118         req->rq_repdata_len = req->rq_nob_received;
1119
1120         return do_cli_unwrap_reply(req);
1121 }
1122
1123 /**
1124  * Used by ptlrpc client, to perform security transformation upon the early
1125  * reply message of \a req. We expect the rq_reply_off is 0, and
1126  * rq_nob_received is the early reply size.
1127  * 
1128  * Because the receive buffer might be still posted, the reply data might be
1129  * changed at any time, no matter we're holding rq_lock or not. For this reason
1130  * we allocate a separate ptlrpc_request and reply buffer for early reply
1131  * processing.
1132  *
1133  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1134  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1135  * \a *req_ret to release it.
1136  * \retval -ev error number, and \a req_ret will not be set.
1137  */
1138 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1139                                    struct ptlrpc_request **req_ret)
1140 {
1141         struct ptlrpc_request  *early_req;
1142         char                   *early_buf;
1143         int                     early_bufsz, early_size;
1144         int                     rc;
1145         ENTRY;
1146
1147         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1148         if (early_req == NULL)
1149                 RETURN(-ENOMEM);
1150
1151         ptlrpc_cli_req_init(early_req);
1152
1153         early_size = req->rq_nob_received;
1154         early_bufsz = size_roundup_power2(early_size);
1155         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1156         if (early_buf == NULL)
1157                 GOTO(err_req, rc = -ENOMEM);
1158
1159         /* sanity checkings and copy data out, do it inside spinlock */
1160         spin_lock(&req->rq_lock);
1161
1162         if (req->rq_replied) {
1163                 spin_unlock(&req->rq_lock);
1164                 GOTO(err_buf, rc = -EALREADY);
1165         }
1166
1167         LASSERT(req->rq_repbuf);
1168         LASSERT(req->rq_repdata == NULL);
1169         LASSERT(req->rq_repmsg == NULL);
1170
1171         if (req->rq_reply_off != 0) {
1172                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1173                 spin_unlock(&req->rq_lock);
1174                 GOTO(err_buf, rc = -EPROTO);
1175         }
1176
1177         if (req->rq_nob_received != early_size) {
1178                 /* even another early arrived the size should be the same */
1179                 CERROR("data size has changed from %u to %u\n",
1180                        early_size, req->rq_nob_received);
1181                 spin_unlock(&req->rq_lock);
1182                 GOTO(err_buf, rc = -EINVAL);
1183         }
1184
1185         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1186                 CERROR("early reply length %d too small\n",
1187                        req->rq_nob_received);
1188                 spin_unlock(&req->rq_lock);
1189                 GOTO(err_buf, rc = -EALREADY);
1190         }
1191
1192         memcpy(early_buf, req->rq_repbuf, early_size);
1193         spin_unlock(&req->rq_lock);
1194
1195         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1196         early_req->rq_flvr = req->rq_flvr;
1197         early_req->rq_repbuf = early_buf;
1198         early_req->rq_repbuf_len = early_bufsz;
1199         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1200         early_req->rq_repdata_len = early_size;
1201         early_req->rq_early = 1;
1202         early_req->rq_reqmsg = req->rq_reqmsg;
1203
1204         rc = do_cli_unwrap_reply(early_req);
1205         if (rc) {
1206                 DEBUG_REQ(D_ADAPTTO, early_req,
1207                           "error %d unwrap early reply", rc);
1208                 GOTO(err_ctx, rc);
1209         }
1210
1211         LASSERT(early_req->rq_repmsg);
1212         *req_ret = early_req;
1213         RETURN(0);
1214
1215 err_ctx:
1216         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1217 err_buf:
1218         OBD_FREE_LARGE(early_buf, early_bufsz);
1219 err_req:
1220         ptlrpc_request_cache_free(early_req);
1221         RETURN(rc);
1222 }
1223
1224 /**
1225  * Used by ptlrpc client, to release a processed early reply \a early_req.
1226  *
1227  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1228  */
1229 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1230 {
1231         LASSERT(early_req->rq_repbuf);
1232         LASSERT(early_req->rq_repdata);
1233         LASSERT(early_req->rq_repmsg);
1234
1235         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1236         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1237         ptlrpc_request_cache_free(early_req);
1238 }
1239
1240 /**************************************************
1241  * sec ID                                         *
1242  **************************************************/
1243
1244 /*
1245  * "fixed" sec (e.g. null) use sec_id < 0
1246  */
1247 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1248
1249 int sptlrpc_get_next_secid(void)
1250 {
1251         return atomic_inc_return(&sptlrpc_sec_id);
1252 }
1253 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1254
1255 /**************************************************
1256  * client side high-level security APIs           *
1257  **************************************************/
1258
1259 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1260                                    int grace, int force)
1261 {
1262         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1263
1264         LASSERT(policy->sp_cops);
1265         LASSERT(policy->sp_cops->flush_ctx_cache);
1266
1267         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1268 }
1269
1270 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1271 {
1272         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1273
1274         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1275         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1276         LASSERT(policy->sp_cops->destroy_sec);
1277
1278         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1279
1280         policy->sp_cops->destroy_sec(sec);
1281         sptlrpc_policy_put(policy);
1282 }
1283
1284 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1285 {
1286         sec_cop_destroy_sec(sec);
1287 }
1288 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1289
1290 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1291 {
1292         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1293
1294         if (sec->ps_policy->sp_cops->kill_sec) {
1295                 sec->ps_policy->sp_cops->kill_sec(sec);
1296
1297                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1298         }
1299 }
1300
1301 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1302 {
1303         if (sec)
1304                 atomic_inc(&sec->ps_refcount);
1305
1306         return sec;
1307 }
1308 EXPORT_SYMBOL(sptlrpc_sec_get);
1309
1310 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1311 {
1312         if (sec) {
1313                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1314
1315                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1316                         sptlrpc_gc_del_sec(sec);
1317                         sec_cop_destroy_sec(sec);
1318                 }
1319         }
1320 }
1321 EXPORT_SYMBOL(sptlrpc_sec_put);
1322
1323 /*
1324  * policy module is responsible for taking refrence of import
1325  */
1326 static
1327 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1328                                        struct ptlrpc_svc_ctx *svc_ctx,
1329                                        struct sptlrpc_flavor *sf,
1330                                        enum lustre_sec_part sp)
1331 {
1332         struct ptlrpc_sec_policy *policy;
1333         struct ptlrpc_sec        *sec;
1334         char                      str[32];
1335         ENTRY;
1336
1337         if (svc_ctx) {
1338                 LASSERT(imp->imp_dlm_fake == 1);
1339
1340                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1341                        imp->imp_obd->obd_type->typ_name,
1342                        imp->imp_obd->obd_name,
1343                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1344
1345                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1346                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1347         } else {
1348                 LASSERT(imp->imp_dlm_fake == 0);
1349
1350                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1351                        imp->imp_obd->obd_type->typ_name,
1352                        imp->imp_obd->obd_name,
1353                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1354
1355                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1356                 if (!policy) {
1357                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1358                         RETURN(NULL);
1359                 }
1360         }
1361
1362         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1363         if (sec) {
1364                 atomic_inc(&sec->ps_refcount);
1365
1366                 sec->ps_part = sp;
1367
1368                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1369                         sptlrpc_gc_add_sec(sec);
1370         } else {
1371                 sptlrpc_policy_put(policy);
1372         }
1373
1374         RETURN(sec);
1375 }
1376
1377 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1378 {
1379         struct ptlrpc_sec *sec;
1380
1381         spin_lock(&imp->imp_lock);
1382         sec = sptlrpc_sec_get(imp->imp_sec);
1383         spin_unlock(&imp->imp_lock);
1384
1385         return sec;
1386 }
1387 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1388
1389 static void sptlrpc_import_sec_install(struct obd_import *imp,
1390                                        struct ptlrpc_sec *sec)
1391 {
1392         struct ptlrpc_sec *old_sec;
1393
1394         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1395
1396         spin_lock(&imp->imp_lock);
1397         old_sec = imp->imp_sec;
1398         imp->imp_sec = sec;
1399         spin_unlock(&imp->imp_lock);
1400
1401         if (old_sec) {
1402                 sptlrpc_sec_kill(old_sec);
1403
1404                 /* balance the ref taken by this import */
1405                 sptlrpc_sec_put(old_sec);
1406         }
1407 }
1408
1409 static inline
1410 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1411 {
1412         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1413 }
1414
1415 static inline
1416 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1417 {
1418         *dst = *src;
1419 }
1420
1421 /**
1422  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1423  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1424  *
1425  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1426  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1427  */
1428 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1429                              struct ptlrpc_svc_ctx *svc_ctx,
1430                              struct sptlrpc_flavor *flvr)
1431 {
1432         struct ptlrpc_connection   *conn;
1433         struct sptlrpc_flavor       sf;
1434         struct ptlrpc_sec          *sec, *newsec;
1435         enum lustre_sec_part        sp;
1436         char                        str[24];
1437         int                         rc = 0;
1438         ENTRY;
1439
1440         might_sleep();
1441
1442         if (imp == NULL)
1443                 RETURN(0);
1444
1445         conn = imp->imp_connection;
1446
1447         if (svc_ctx == NULL) {
1448                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1449                 /*
1450                  * normal import, determine flavor from rule set, except
1451                  * for mgc the flavor is predetermined.
1452                  */
1453                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1454                         sf = cliobd->cl_flvr_mgc;
1455                 else 
1456                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1457                                                    cliobd->cl_sp_to,
1458                                                    &cliobd->cl_target_uuid,
1459                                                    conn->c_self, &sf);
1460
1461                 sp = imp->imp_obd->u.cli.cl_sp_me;
1462         } else {
1463                 /* reverse import, determine flavor from incoming reqeust */
1464                 sf = *flvr;
1465
1466                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1467                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1468                                       PTLRPC_SEC_FL_ROOTONLY;
1469
1470                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1471         }
1472
1473         sec = sptlrpc_import_sec_ref(imp);
1474         if (sec) {
1475                 char    str2[24];
1476
1477                 if (flavor_equal(&sf, &sec->ps_flvr))
1478                         GOTO(out, rc);
1479
1480                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1481                        imp->imp_obd->obd_name,
1482                        obd_uuid2str(&conn->c_remote_uuid),
1483                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1484                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1485         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1486                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1487                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1488                        imp->imp_obd->obd_name,
1489                        obd_uuid2str(&conn->c_remote_uuid),
1490                        LNET_NIDNET(conn->c_self),
1491                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1492         }
1493
1494         mutex_lock(&imp->imp_sec_mutex);
1495
1496         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1497         if (newsec) {
1498                 sptlrpc_import_sec_install(imp, newsec);
1499         } else {
1500                 CERROR("import %s->%s: failed to create new sec\n",
1501                        imp->imp_obd->obd_name,
1502                        obd_uuid2str(&conn->c_remote_uuid));
1503                 rc = -EPERM;
1504         }
1505
1506         mutex_unlock(&imp->imp_sec_mutex);
1507 out:
1508         sptlrpc_sec_put(sec);
1509         RETURN(rc);
1510 }
1511
1512 void sptlrpc_import_sec_put(struct obd_import *imp)
1513 {
1514         if (imp->imp_sec) {
1515                 sptlrpc_sec_kill(imp->imp_sec);
1516
1517                 sptlrpc_sec_put(imp->imp_sec);
1518                 imp->imp_sec = NULL;
1519         }
1520 }
1521
1522 static void import_flush_ctx_common(struct obd_import *imp,
1523                                     uid_t uid, int grace, int force)
1524 {
1525         struct ptlrpc_sec *sec;
1526
1527         if (imp == NULL)
1528                 return;
1529
1530         sec = sptlrpc_import_sec_ref(imp);
1531         if (sec == NULL)
1532                 return;
1533
1534         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1535         sptlrpc_sec_put(sec);
1536 }
1537
1538 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1539 {
1540         /* it's important to use grace mode, see explain in
1541          * sptlrpc_req_refresh_ctx() */
1542         import_flush_ctx_common(imp, 0, 1, 1);
1543 }
1544
1545 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1546 {
1547         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1548                                 1, 1);
1549 }
1550 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1551
1552 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1553 {
1554         import_flush_ctx_common(imp, -1, 1, 1);
1555 }
1556 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1557
1558 /**
1559  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1560  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1561  */
1562 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1563 {
1564         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1565         struct ptlrpc_sec_policy *policy;
1566         int rc;
1567
1568         LASSERT(ctx);
1569         LASSERT(ctx->cc_sec);
1570         LASSERT(ctx->cc_sec->ps_policy);
1571         LASSERT(req->rq_reqmsg == NULL);
1572         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1573
1574         policy = ctx->cc_sec->ps_policy;
1575         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1576         if (!rc) {
1577                 LASSERT(req->rq_reqmsg);
1578                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1579
1580                 /* zeroing preallocated buffer */
1581                 if (req->rq_pool)
1582                         memset(req->rq_reqmsg, 0, msgsize);
1583         }
1584
1585         return rc;
1586 }
1587
1588 /**
1589  * Used by ptlrpc client to free request buffer of \a req. After this
1590  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1591  */
1592 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1593 {
1594         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1595         struct ptlrpc_sec_policy *policy;
1596
1597         LASSERT(ctx);
1598         LASSERT(ctx->cc_sec);
1599         LASSERT(ctx->cc_sec->ps_policy);
1600         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1601
1602         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1603                 return;
1604
1605         policy = ctx->cc_sec->ps_policy;
1606         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1607         req->rq_reqmsg = NULL;
1608 }
1609
1610 /*
1611  * NOTE caller must guarantee the buffer size is enough for the enlargement
1612  */
1613 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1614                                   int segment, int newsize)
1615 {
1616         void   *src, *dst;
1617         int     oldsize, oldmsg_size, movesize;
1618
1619         LASSERT(segment < msg->lm_bufcount);
1620         LASSERT(msg->lm_buflens[segment] <= newsize);
1621
1622         if (msg->lm_buflens[segment] == newsize)
1623                 return;
1624
1625         /* nothing to do if we are enlarging the last segment */
1626         if (segment == msg->lm_bufcount - 1) {
1627                 msg->lm_buflens[segment] = newsize;
1628                 return;
1629         }
1630
1631         oldsize = msg->lm_buflens[segment];
1632
1633         src = lustre_msg_buf(msg, segment + 1, 0);
1634         msg->lm_buflens[segment] = newsize;
1635         dst = lustre_msg_buf(msg, segment + 1, 0);
1636         msg->lm_buflens[segment] = oldsize;
1637
1638         /* move from segment + 1 to end segment */
1639         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1640         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1641         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1642         LASSERT(movesize >= 0);
1643
1644         if (movesize)
1645                 memmove(dst, src, movesize);
1646
1647         /* note we don't clear the ares where old data live, not secret */
1648
1649         /* finally set new segment size */
1650         msg->lm_buflens[segment] = newsize;
1651 }
1652 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1653
1654 /**
1655  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1656  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1657  * preserved after the enlargement. this must be called after original request
1658  * buffer being allocated.
1659  *
1660  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1661  * so caller should refresh its local pointers if needed.
1662  */
1663 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1664                                int segment, int newsize)
1665 {
1666         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1667         struct ptlrpc_sec_cops   *cops;
1668         struct lustre_msg        *msg = req->rq_reqmsg;
1669
1670         LASSERT(ctx);
1671         LASSERT(msg);
1672         LASSERT(msg->lm_bufcount > segment);
1673         LASSERT(msg->lm_buflens[segment] <= newsize);
1674
1675         if (msg->lm_buflens[segment] == newsize)
1676                 return 0;
1677
1678         cops = ctx->cc_sec->ps_policy->sp_cops;
1679         LASSERT(cops->enlarge_reqbuf);
1680         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1681 }
1682 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1683
1684 /**
1685  * Used by ptlrpc client to allocate reply buffer of \a req.
1686  *
1687  * \note After this, req->rq_repmsg is still not accessible.
1688  */
1689 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1690 {
1691         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1692         struct ptlrpc_sec_policy *policy;
1693         ENTRY;
1694
1695         LASSERT(ctx);
1696         LASSERT(ctx->cc_sec);
1697         LASSERT(ctx->cc_sec->ps_policy);
1698
1699         if (req->rq_repbuf)
1700                 RETURN(0);
1701
1702         policy = ctx->cc_sec->ps_policy;
1703         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1704 }
1705
1706 /**
1707  * Used by ptlrpc client to free reply buffer of \a req. After this
1708  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1709  */
1710 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1711 {
1712         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1713         struct ptlrpc_sec_policy *policy;
1714         ENTRY;
1715
1716         LASSERT(ctx);
1717         LASSERT(ctx->cc_sec);
1718         LASSERT(ctx->cc_sec->ps_policy);
1719         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1720
1721         if (req->rq_repbuf == NULL)
1722                 return;
1723         LASSERT(req->rq_repbuf_len);
1724
1725         policy = ctx->cc_sec->ps_policy;
1726         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1727         req->rq_repmsg = NULL;
1728         EXIT;
1729 }
1730
1731 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1732                                 struct ptlrpc_cli_ctx *ctx)
1733 {
1734         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1735
1736         if (!policy->sp_cops->install_rctx)
1737                 return 0;
1738         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1739 }
1740
1741 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1742                                 struct ptlrpc_svc_ctx *ctx)
1743 {
1744         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1745
1746         if (!policy->sp_sops->install_rctx)
1747                 return 0;
1748         return policy->sp_sops->install_rctx(imp, ctx);
1749 }
1750
1751 /****************************************
1752  * server side security                 *
1753  ****************************************/
1754
1755 static int flavor_allowed(struct sptlrpc_flavor *exp,
1756                           struct ptlrpc_request *req)
1757 {
1758         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1759
1760         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1761                 return 1;
1762
1763         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1764             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1765             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1766             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1767                 return 1;
1768
1769         return 0;
1770 }
1771
1772 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1773
1774 /**
1775  * Given an export \a exp, check whether the flavor of incoming \a req
1776  * is allowed by the export \a exp. Main logic is about taking care of
1777  * changing configurations. Return 0 means success.
1778  */
1779 int sptlrpc_target_export_check(struct obd_export *exp,
1780                                 struct ptlrpc_request *req)
1781 {
1782         struct sptlrpc_flavor   flavor;
1783
1784         if (exp == NULL)
1785                 return 0;
1786
1787         /* client side export has no imp_reverse, skip
1788          * FIXME maybe we should check flavor this as well??? */
1789         if (exp->exp_imp_reverse == NULL)
1790                 return 0;
1791
1792         /* don't care about ctx fini rpc */
1793         if (req->rq_ctx_fini)
1794                 return 0;
1795
1796         spin_lock(&exp->exp_lock);
1797
1798         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1799          * the first req with the new flavor, then treat it as current flavor,
1800          * adapt reverse sec according to it.
1801          * note the first rpc with new flavor might not be with root ctx, in
1802          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1803         if (unlikely(exp->exp_flvr_changed) &&
1804             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1805                 /* make the new flavor as "current", and old ones as
1806                  * about-to-expire */
1807                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1808                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1809                 flavor = exp->exp_flvr_old[1];
1810                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1811                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1812                 exp->exp_flvr_old[0] = exp->exp_flvr;
1813                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1814                                           EXP_FLVR_UPDATE_EXPIRE;
1815                 exp->exp_flvr = flavor;
1816
1817                 /* flavor change finished */
1818                 exp->exp_flvr_changed = 0;
1819                 LASSERT(exp->exp_flvr_adapt == 1);
1820
1821                 /* if it's gss, we only interested in root ctx init */
1822                 if (req->rq_auth_gss &&
1823                     !(req->rq_ctx_init &&
1824                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1825                        req->rq_auth_usr_ost))) {
1826                         spin_unlock(&exp->exp_lock);
1827                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1828                                req->rq_auth_gss, req->rq_ctx_init,
1829                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1830                                req->rq_auth_usr_ost);
1831                         return 0;
1832                 }
1833
1834                 exp->exp_flvr_adapt = 0;
1835                 spin_unlock(&exp->exp_lock);
1836
1837                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1838                                                 req->rq_svc_ctx, &flavor);
1839         }
1840
1841         /* if it equals to the current flavor, we accept it, but need to
1842          * dealing with reverse sec/ctx */
1843         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1844                 /* most cases should return here, we only interested in
1845                  * gss root ctx init */
1846                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1847                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1848                      !req->rq_auth_usr_ost)) {
1849                         spin_unlock(&exp->exp_lock);
1850                         return 0;
1851                 }
1852
1853                 /* if flavor just changed, we should not proceed, just leave
1854                  * it and current flavor will be discovered and replaced
1855                  * shortly, and let _this_ rpc pass through */
1856                 if (exp->exp_flvr_changed) {
1857                         LASSERT(exp->exp_flvr_adapt);
1858                         spin_unlock(&exp->exp_lock);
1859                         return 0;
1860                 }
1861
1862                 if (exp->exp_flvr_adapt) {
1863                         exp->exp_flvr_adapt = 0;
1864                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1865                                exp, exp->exp_flvr.sf_rpc,
1866                                exp->exp_flvr_old[0].sf_rpc,
1867                                exp->exp_flvr_old[1].sf_rpc);
1868                         flavor = exp->exp_flvr;
1869                         spin_unlock(&exp->exp_lock);
1870
1871                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1872                                                         req->rq_svc_ctx,
1873                                                         &flavor);
1874                 } else {
1875                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1876                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1877                                exp->exp_flvr_old[0].sf_rpc,
1878                                exp->exp_flvr_old[1].sf_rpc);
1879                         spin_unlock(&exp->exp_lock);
1880
1881                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1882                                                            req->rq_svc_ctx);
1883                 }
1884         }
1885
1886         if (exp->exp_flvr_expire[0]) {
1887                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1888                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1889                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1890                                        "middle one ("CFS_DURATION_T")\n", exp,
1891                                        exp->exp_flvr.sf_rpc,
1892                                        exp->exp_flvr_old[0].sf_rpc,
1893                                        exp->exp_flvr_old[1].sf_rpc,
1894                                        exp->exp_flvr_expire[0] -
1895                                                 cfs_time_current_sec());
1896                                 spin_unlock(&exp->exp_lock);
1897                                 return 0;
1898                         }
1899                 } else {
1900                         CDEBUG(D_SEC, "mark middle expired\n");
1901                         exp->exp_flvr_expire[0] = 0;
1902                 }
1903                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1904                        exp->exp_flvr.sf_rpc,
1905                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1906                        req->rq_flvr.sf_rpc);
1907         }
1908
1909         /* now it doesn't match the current flavor, the only chance we can
1910          * accept it is match the old flavors which is not expired. */
1911         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1912                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1913                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1914                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1915                                        "oldest one ("CFS_DURATION_T")\n", exp,
1916                                        exp->exp_flvr.sf_rpc,
1917                                        exp->exp_flvr_old[0].sf_rpc,
1918                                        exp->exp_flvr_old[1].sf_rpc,
1919                                        exp->exp_flvr_expire[1] -
1920                                                 cfs_time_current_sec());
1921                                 spin_unlock(&exp->exp_lock);
1922                                 return 0;
1923                         }
1924                 } else {
1925                         CDEBUG(D_SEC, "mark oldest expired\n");
1926                         exp->exp_flvr_expire[1] = 0;
1927                 }
1928                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1929                        exp, exp->exp_flvr.sf_rpc,
1930                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1931                        req->rq_flvr.sf_rpc);
1932         } else {
1933                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1934                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1935                        exp->exp_flvr_old[1].sf_rpc);
1936         }
1937
1938         spin_unlock(&exp->exp_lock);
1939
1940         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1941               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1942               exp, exp->exp_obd->obd_name,
1943               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1944               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1945               req->rq_flvr.sf_rpc,
1946               exp->exp_flvr.sf_rpc,
1947               exp->exp_flvr_old[0].sf_rpc,
1948               exp->exp_flvr_expire[0] ?
1949               (unsigned long) (exp->exp_flvr_expire[0] -
1950                                cfs_time_current_sec()) : 0,
1951               exp->exp_flvr_old[1].sf_rpc,
1952               exp->exp_flvr_expire[1] ?
1953               (unsigned long) (exp->exp_flvr_expire[1] -
1954                                cfs_time_current_sec()) : 0);
1955         return -EACCES;
1956 }
1957 EXPORT_SYMBOL(sptlrpc_target_export_check);
1958
1959 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1960                                       struct sptlrpc_rule_set *rset)
1961 {
1962         struct obd_export       *exp;
1963         struct sptlrpc_flavor    new_flvr;
1964
1965         LASSERT(obd);
1966
1967         spin_lock(&obd->obd_dev_lock);
1968
1969         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1970                 if (exp->exp_connection == NULL)
1971                         continue;
1972
1973                 /* note if this export had just been updated flavor
1974                  * (exp_flvr_changed == 1), this will override the
1975                  * previous one. */
1976                 spin_lock(&exp->exp_lock);
1977                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1978                                              exp->exp_connection->c_peer.nid,
1979                                              &new_flvr);
1980                 if (exp->exp_flvr_changed ||
1981                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1982                         exp->exp_flvr_old[1] = new_flvr;
1983                         exp->exp_flvr_expire[1] = 0;
1984                         exp->exp_flvr_changed = 1;
1985                         exp->exp_flvr_adapt = 1;
1986
1987                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1988                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1989                                exp->exp_flvr.sf_rpc,
1990                                exp->exp_flvr_old[1].sf_rpc);
1991                 }
1992                 spin_unlock(&exp->exp_lock);
1993         }
1994
1995         spin_unlock(&obd->obd_dev_lock);
1996 }
1997 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1998
1999 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2000 {
2001         /* peer's claim is unreliable unless gss is being used */
2002         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2003                 return svc_rc;
2004
2005         switch (req->rq_sp_from) {
2006         case LUSTRE_SP_CLI:
2007                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2008                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2009                         svc_rc = SECSVC_DROP;
2010                 }
2011                 break;
2012         case LUSTRE_SP_MDT:
2013                 if (!req->rq_auth_usr_mdt) {
2014                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2015                         svc_rc = SECSVC_DROP;
2016                 }
2017                 break;
2018         case LUSTRE_SP_OST:
2019                 if (!req->rq_auth_usr_ost) {
2020                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2021                         svc_rc = SECSVC_DROP;
2022                 }
2023                 break;
2024         case LUSTRE_SP_MGS:
2025         case LUSTRE_SP_MGC:
2026                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2027                     !req->rq_auth_usr_ost) {
2028                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2029                         svc_rc = SECSVC_DROP;
2030                 }
2031                 break;
2032         case LUSTRE_SP_ANY:
2033         default:
2034                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2035                 svc_rc = SECSVC_DROP;
2036         }
2037
2038         return svc_rc;
2039 }
2040
2041 /**
2042  * Used by ptlrpc server, to perform transformation upon request message of
2043  * incoming \a req. This must be the first thing to do with an incoming
2044  * request in ptlrpc layer.
2045  *
2046  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2047  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2048  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2049  * reply message has been prepared.
2050  * \retval SECSVC_DROP failed, this request should be dropped.
2051  */
2052 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2053 {
2054         struct ptlrpc_sec_policy *policy;
2055         struct lustre_msg        *msg = req->rq_reqbuf;
2056         int                       rc;
2057         ENTRY;
2058
2059         LASSERT(msg);
2060         LASSERT(req->rq_reqmsg == NULL);
2061         LASSERT(req->rq_repmsg == NULL);
2062         LASSERT(req->rq_svc_ctx == NULL);
2063
2064         req->rq_req_swab_mask = 0;
2065
2066         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2067         switch (rc) {
2068         case 1:
2069                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2070         case 0:
2071                 break;
2072         default:
2073                 CERROR("error unpacking request from %s x"LPU64"\n",
2074                        libcfs_id2str(req->rq_peer), req->rq_xid);
2075                 RETURN(SECSVC_DROP);
2076         }
2077
2078         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2079         req->rq_sp_from = LUSTRE_SP_ANY;
2080         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2081         req->rq_auth_mapped_uid = -1;
2082
2083         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2084         if (!policy) {
2085                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2086                 RETURN(SECSVC_DROP);
2087         }
2088
2089         LASSERT(policy->sp_sops->accept);
2090         rc = policy->sp_sops->accept(req);
2091         sptlrpc_policy_put(policy);
2092         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2093         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2094
2095         /*
2096          * if it's not null flavor (which means embedded packing msg),
2097          * reset the swab mask for the comming inner msg unpacking.
2098          */
2099         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2100                 req->rq_req_swab_mask = 0;
2101
2102         /* sanity check for the request source */
2103         rc = sptlrpc_svc_check_from(req, rc);
2104         RETURN(rc);
2105 }
2106
2107 /**
2108  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2109  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2110  * a buffer of \a msglen size.
2111  */
2112 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2113 {
2114         struct ptlrpc_sec_policy *policy;
2115         struct ptlrpc_reply_state *rs;
2116         int rc;
2117         ENTRY;
2118
2119         LASSERT(req->rq_svc_ctx);
2120         LASSERT(req->rq_svc_ctx->sc_policy);
2121
2122         policy = req->rq_svc_ctx->sc_policy;
2123         LASSERT(policy->sp_sops->alloc_rs);
2124
2125         rc = policy->sp_sops->alloc_rs(req, msglen);
2126         if (unlikely(rc == -ENOMEM)) {
2127                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2128                 if (svcpt->scp_service->srv_max_reply_size <
2129                    msglen + sizeof(struct ptlrpc_reply_state)) {
2130                         /* Just return failure if the size is too big */
2131                         CERROR("size of message is too big (%zd), %d allowed\n",
2132                                 msglen + sizeof(struct ptlrpc_reply_state),
2133                                 svcpt->scp_service->srv_max_reply_size);
2134                         RETURN(-ENOMEM);
2135                 }
2136
2137                 /* failed alloc, try emergency pool */
2138                 rs = lustre_get_emerg_rs(svcpt);
2139                 if (rs == NULL)
2140                         RETURN(-ENOMEM);
2141
2142                 req->rq_reply_state = rs;
2143                 rc = policy->sp_sops->alloc_rs(req, msglen);
2144                 if (rc) {
2145                         lustre_put_emerg_rs(rs);
2146                         req->rq_reply_state = NULL;
2147                 }
2148         }
2149
2150         LASSERT(rc != 0 ||
2151                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2152
2153         RETURN(rc);
2154 }
2155
2156 /**
2157  * Used by ptlrpc server, to perform transformation upon reply message.
2158  *
2159  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2160  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2161  */
2162 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2163 {
2164         struct ptlrpc_sec_policy *policy;
2165         int rc;
2166         ENTRY;
2167
2168         LASSERT(req->rq_svc_ctx);
2169         LASSERT(req->rq_svc_ctx->sc_policy);
2170
2171         policy = req->rq_svc_ctx->sc_policy;
2172         LASSERT(policy->sp_sops->authorize);
2173
2174         rc = policy->sp_sops->authorize(req);
2175         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2176
2177         RETURN(rc);
2178 }
2179
2180 /**
2181  * Used by ptlrpc server, to free reply_state.
2182  */
2183 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2184 {
2185         struct ptlrpc_sec_policy *policy;
2186         unsigned int prealloc;
2187         ENTRY;
2188
2189         LASSERT(rs->rs_svc_ctx);
2190         LASSERT(rs->rs_svc_ctx->sc_policy);
2191
2192         policy = rs->rs_svc_ctx->sc_policy;
2193         LASSERT(policy->sp_sops->free_rs);
2194
2195         prealloc = rs->rs_prealloc;
2196         policy->sp_sops->free_rs(rs);
2197
2198         if (prealloc)
2199                 lustre_put_emerg_rs(rs);
2200         EXIT;
2201 }
2202
2203 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2204 {
2205         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2206
2207         if (ctx != NULL)
2208                 atomic_inc(&ctx->sc_refcount);
2209 }
2210
2211 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2212 {
2213         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2214
2215         if (ctx == NULL)
2216                 return;
2217
2218         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2219         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2220                 if (ctx->sc_policy->sp_sops->free_ctx)
2221                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2222         }
2223         req->rq_svc_ctx = NULL;
2224 }
2225
2226 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2227 {
2228         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2229
2230         if (ctx == NULL)
2231                 return;
2232
2233         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2234         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2235                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2236 }
2237 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2238
2239 /****************************************
2240  * bulk security                        *
2241  ****************************************/
2242
2243 /**
2244  * Perform transformation upon bulk data pointed by \a desc. This is called
2245  * before transforming the request message.
2246  */
2247 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2248                           struct ptlrpc_bulk_desc *desc)
2249 {
2250         struct ptlrpc_cli_ctx *ctx;
2251
2252         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2253
2254         if (!req->rq_pack_bulk)
2255                 return 0;
2256
2257         ctx = req->rq_cli_ctx;
2258         if (ctx->cc_ops->wrap_bulk)
2259                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2260         return 0;
2261 }
2262 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2263
2264 /**
2265  * This is called after unwrap the reply message.
2266  * return nob of actual plain text size received, or error code.
2267  */
2268 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2269                                  struct ptlrpc_bulk_desc *desc,
2270                                  int nob)
2271 {
2272         struct ptlrpc_cli_ctx  *ctx;
2273         int                     rc;
2274
2275         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2276
2277         if (!req->rq_pack_bulk)
2278                 return desc->bd_nob_transferred;
2279
2280         ctx = req->rq_cli_ctx;
2281         if (ctx->cc_ops->unwrap_bulk) {
2282                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2283                 if (rc < 0)
2284                         return rc;
2285         }
2286         return desc->bd_nob_transferred;
2287 }
2288 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2289
2290 /**
2291  * This is called after unwrap the reply message.
2292  * return 0 for success or error code.
2293  */
2294 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2295                                   struct ptlrpc_bulk_desc *desc)
2296 {
2297         struct ptlrpc_cli_ctx  *ctx;
2298         int                     rc;
2299
2300         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2301
2302         if (!req->rq_pack_bulk)
2303                 return 0;
2304
2305         ctx = req->rq_cli_ctx;
2306         if (ctx->cc_ops->unwrap_bulk) {
2307                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2308                 if (rc < 0)
2309                         return rc;
2310         }
2311
2312         /*
2313          * if everything is going right, nob should equals to nob_transferred.
2314          * in case of privacy mode, nob_transferred needs to be adjusted.
2315          */
2316         if (desc->bd_nob != desc->bd_nob_transferred) {
2317                 CERROR("nob %d doesn't match transferred nob %d\n",
2318                        desc->bd_nob, desc->bd_nob_transferred);
2319                 return -EPROTO;
2320         }
2321
2322         return 0;
2323 }
2324 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2325
2326 #ifdef HAVE_SERVER_SUPPORT
2327 /**
2328  * Performe transformation upon outgoing bulk read.
2329  */
2330 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2331                           struct ptlrpc_bulk_desc *desc)
2332 {
2333         struct ptlrpc_svc_ctx *ctx;
2334
2335         LASSERT(req->rq_bulk_read);
2336
2337         if (!req->rq_pack_bulk)
2338                 return 0;
2339
2340         ctx = req->rq_svc_ctx;
2341         if (ctx->sc_policy->sp_sops->wrap_bulk)
2342                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2343
2344         return 0;
2345 }
2346 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2347
2348 /**
2349  * Performe transformation upon incoming bulk write.
2350  */
2351 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2352                             struct ptlrpc_bulk_desc *desc)
2353 {
2354         struct ptlrpc_svc_ctx *ctx;
2355         int                    rc;
2356
2357         LASSERT(req->rq_bulk_write);
2358
2359         /*
2360          * if it's in privacy mode, transferred should >= expected; otherwise
2361          * transferred should == expected.
2362          */
2363         if (desc->bd_nob_transferred < desc->bd_nob ||
2364             (desc->bd_nob_transferred > desc->bd_nob &&
2365              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2366              SPTLRPC_BULK_SVC_PRIV)) {
2367                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2368                           desc->bd_nob_transferred, desc->bd_nob);
2369                 return -ETIMEDOUT;
2370         }
2371
2372         if (!req->rq_pack_bulk)
2373                 return 0;
2374
2375         ctx = req->rq_svc_ctx;
2376         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2377                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2378                 if (rc)
2379                         CERROR("error unwrap bulk: %d\n", rc);
2380         }
2381
2382         /* return 0 to allow reply be sent */
2383         return 0;
2384 }
2385 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2386
2387 /**
2388  * Prepare buffers for incoming bulk write.
2389  */
2390 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2391                           struct ptlrpc_bulk_desc *desc)
2392 {
2393         struct ptlrpc_svc_ctx *ctx;
2394
2395         LASSERT(req->rq_bulk_write);
2396
2397         if (!req->rq_pack_bulk)
2398                 return 0;
2399
2400         ctx = req->rq_svc_ctx;
2401         if (ctx->sc_policy->sp_sops->prep_bulk)
2402                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2403
2404         return 0;
2405 }
2406 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2407
2408 #endif /* HAVE_SERVER_SUPPORT */
2409
2410 /****************************************
2411  * user descriptor helpers              *
2412  ****************************************/
2413
2414 int sptlrpc_current_user_desc_size(void)
2415 {
2416         int ngroups;
2417
2418         ngroups = current_ngroups;
2419
2420         if (ngroups > LUSTRE_MAX_GROUPS)
2421                 ngroups = LUSTRE_MAX_GROUPS;
2422         return sptlrpc_user_desc_size(ngroups);
2423 }
2424 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2425
2426 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2427 {
2428         struct ptlrpc_user_desc *pud;
2429
2430         pud = lustre_msg_buf(msg, offset, 0);
2431
2432         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2433         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2434         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2435         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2436         pud->pud_cap = cfs_curproc_cap_pack();
2437         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2438
2439         task_lock(current);
2440         if (pud->pud_ngroups > current_ngroups)
2441                 pud->pud_ngroups = current_ngroups;
2442         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2443                pud->pud_ngroups * sizeof(__u32));
2444         task_unlock(current);
2445
2446         return 0;
2447 }
2448 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2449
2450 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2451 {
2452         struct ptlrpc_user_desc *pud;
2453         int                      i;
2454
2455         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2456         if (!pud)
2457                 return -EINVAL;
2458
2459         if (swabbed) {
2460                 __swab32s(&pud->pud_uid);
2461                 __swab32s(&pud->pud_gid);
2462                 __swab32s(&pud->pud_fsuid);
2463                 __swab32s(&pud->pud_fsgid);
2464                 __swab32s(&pud->pud_cap);
2465                 __swab32s(&pud->pud_ngroups);
2466         }
2467
2468         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2469                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2470                 return -EINVAL;
2471         }
2472
2473         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2474             msg->lm_buflens[offset]) {
2475                 CERROR("%u groups are claimed but bufsize only %u\n",
2476                        pud->pud_ngroups, msg->lm_buflens[offset]);
2477                 return -EINVAL;
2478         }
2479
2480         if (swabbed) {
2481                 for (i = 0; i < pud->pud_ngroups; i++)
2482                         __swab32s(&pud->pud_groups[i]);
2483         }
2484
2485         return 0;
2486 }
2487 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2488
2489 /****************************************
2490  * misc helpers                         *
2491  ****************************************/
2492
2493 const char * sec2target_str(struct ptlrpc_sec *sec)
2494 {
2495         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2496                 return "*";
2497         if (sec_is_reverse(sec))
2498                 return "c";
2499         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2500 }
2501 EXPORT_SYMBOL(sec2target_str);
2502
2503 /*
2504  * return true if the bulk data is protected
2505  */
2506 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2507 {
2508         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2509         case SPTLRPC_BULK_SVC_INTG:
2510         case SPTLRPC_BULK_SVC_PRIV:
2511                 return 1;
2512         default:
2513                 return 0;
2514         }
2515 }
2516 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2517
2518 /****************************************
2519  * crypto API helper/alloc blkciper     *
2520  ****************************************/
2521
2522 /****************************************
2523  * initialize/finalize                  *
2524  ****************************************/
2525
2526 int sptlrpc_init(void)
2527 {
2528         int rc;
2529
2530         rwlock_init(&policy_lock);
2531
2532         rc = sptlrpc_gc_init();
2533         if (rc)
2534                 goto out;
2535
2536         rc = sptlrpc_conf_init();
2537         if (rc)
2538                 goto out_gc;
2539
2540         rc = sptlrpc_enc_pool_init();
2541         if (rc)
2542                 goto out_conf;
2543
2544         rc = sptlrpc_null_init();
2545         if (rc)
2546                 goto out_pool;
2547
2548         rc = sptlrpc_plain_init();
2549         if (rc)
2550                 goto out_null;
2551
2552         rc = sptlrpc_lproc_init();
2553         if (rc)
2554                 goto out_plain;
2555
2556         return 0;
2557
2558 out_plain:
2559         sptlrpc_plain_fini();
2560 out_null:
2561         sptlrpc_null_fini();
2562 out_pool:
2563         sptlrpc_enc_pool_fini();
2564 out_conf:
2565         sptlrpc_conf_fini();
2566 out_gc:
2567         sptlrpc_gc_fini();
2568 out:
2569         return rc;
2570 }
2571
2572 void sptlrpc_fini(void)
2573 {
2574         sptlrpc_lproc_fini();
2575         sptlrpc_plain_fini();
2576         sptlrpc_null_fini();
2577         sptlrpc_enc_pool_fini();
2578         sptlrpc_conf_fini();
2579         sptlrpc_gc_fini();
2580 }