Whamcloud - gitweb
LU-6635 lfsck: block replacing the OST-object for test
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2014, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #ifdef HAVE_UIDGID_HEADER
41 # include <linux/uidgid.h>
42 #endif
43 #include <linux/crypto.h>
44 #include <linux/key.h>
45
46 #include <libcfs/libcfs.h>
47 #include <obd.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_dlm.h>
53 #include <lustre_sec.h>
54
55 #include "ptlrpc_internal.h"
56
57 /***********************************************
58  * policy registers                            *
59  ***********************************************/
60
61 static rwlock_t policy_lock;
62 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
63         NULL,
64 };
65
66 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
67 {
68         __u16 number = policy->sp_policy;
69
70         LASSERT(policy->sp_name);
71         LASSERT(policy->sp_cops);
72         LASSERT(policy->sp_sops);
73
74         if (number >= SPTLRPC_POLICY_MAX)
75                 return -EINVAL;
76
77         write_lock(&policy_lock);
78         if (unlikely(policies[number])) {
79                 write_unlock(&policy_lock);
80                 return -EALREADY;
81         }
82         policies[number] = policy;
83         write_unlock(&policy_lock);
84
85         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
86         return 0;
87 }
88 EXPORT_SYMBOL(sptlrpc_register_policy);
89
90 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
91 {
92         __u16 number = policy->sp_policy;
93
94         LASSERT(number < SPTLRPC_POLICY_MAX);
95
96         write_lock(&policy_lock);
97         if (unlikely(policies[number] == NULL)) {
98                 write_unlock(&policy_lock);
99                 CERROR("%s: already unregistered\n", policy->sp_name);
100                 return -EINVAL;
101         }
102
103         LASSERT(policies[number] == policy);
104         policies[number] = NULL;
105         write_unlock(&policy_lock);
106
107         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
108         return 0;
109 }
110 EXPORT_SYMBOL(sptlrpc_unregister_policy);
111
112 static
113 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
114 {
115         static DEFINE_MUTEX(load_mutex);
116         static atomic_t           loaded = ATOMIC_INIT(0);
117         struct ptlrpc_sec_policy *policy;
118         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
119         __u16                     flag = 0;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 if (policy == NULL)
130                         flag = atomic_read(&loaded);
131                 read_unlock(&policy_lock);
132
133                 if (policy != NULL || flag != 0 ||
134                     number != SPTLRPC_POLICY_GSS)
135                         break;
136
137                 /* try to load gss module, once */
138                 mutex_lock(&load_mutex);
139                 if (atomic_read(&loaded) == 0) {
140                         if (request_module("ptlrpc_gss") == 0)
141                                 CDEBUG(D_SEC,
142                                        "module ptlrpc_gss loaded on demand\n");
143                         else
144                                 CERROR("Unable to load module ptlrpc_gss\n");
145
146                         atomic_set(&loaded, 1);
147                 }
148                 mutex_unlock(&load_mutex);
149         }
150
151         return policy;
152 }
153
154 __u32 sptlrpc_name2flavor_base(const char *name)
155 {
156         if (!strcmp(name, "null"))
157                 return SPTLRPC_FLVR_NULL;
158         if (!strcmp(name, "plain"))
159                 return SPTLRPC_FLVR_PLAIN;
160         if (!strcmp(name, "gssnull"))
161                 return SPTLRPC_FLVR_GSSNULL;
162         if (!strcmp(name, "krb5n"))
163                 return SPTLRPC_FLVR_KRB5N;
164         if (!strcmp(name, "krb5a"))
165                 return SPTLRPC_FLVR_KRB5A;
166         if (!strcmp(name, "krb5i"))
167                 return SPTLRPC_FLVR_KRB5I;
168         if (!strcmp(name, "krb5p"))
169                 return SPTLRPC_FLVR_KRB5P;
170         if (!strcmp(name, "skn"))
171                 return SPTLRPC_FLVR_SKN;
172         if (!strcmp(name, "ska"))
173                 return SPTLRPC_FLVR_SKA;
174         if (!strcmp(name, "ski"))
175                 return SPTLRPC_FLVR_SKI;
176         if (!strcmp(name, "skpi"))
177                 return SPTLRPC_FLVR_SKPI;
178
179         return SPTLRPC_FLVR_INVALID;
180 }
181 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
182
183 const char *sptlrpc_flavor2name_base(__u32 flvr)
184 {
185         __u32   base = SPTLRPC_FLVR_BASE(flvr);
186
187         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
188                 return "null";
189         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
190                 return "plain";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
192                 return "gssnull";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
194                 return "krb5n";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
196                 return "krb5a";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
198                 return "krb5i";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
200                 return "krb5p";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
202                 return "skn";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
204                 return "ska";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
206                 return "ski";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
208                 return "skpi";
209
210         CERROR("invalid wire flavor 0x%x\n", flvr);
211         return "invalid";
212 }
213 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
214
215 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
216                                char *buf, int bufsize)
217 {
218         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
219                 snprintf(buf, bufsize, "hash:%s",
220                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
221         else
222                 snprintf(buf, bufsize, "%s",
223                          sptlrpc_flavor2name_base(sf->sf_rpc));
224
225         buf[bufsize - 1] = '\0';
226         return buf;
227 }
228 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
229
230 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
231 {
232         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
233
234         /*
235          * currently we don't support customized bulk specification for
236          * flavors other than plain
237          */
238         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
239                 char bspec[16];
240
241                 bspec[0] = '-';
242                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
243                 strncat(buf, bspec, bufsize);
244         }
245
246         buf[bufsize - 1] = '\0';
247         return buf;
248 }
249 EXPORT_SYMBOL(sptlrpc_flavor2name);
250
251 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
252 {
253         buf[0] = '\0';
254
255         if (flags & PTLRPC_SEC_FL_REVERSE)
256                 strlcat(buf, "reverse,", bufsize);
257         if (flags & PTLRPC_SEC_FL_ROOTONLY)
258                 strlcat(buf, "rootonly,", bufsize);
259         if (flags & PTLRPC_SEC_FL_UDESC)
260                 strlcat(buf, "udesc,", bufsize);
261         if (flags & PTLRPC_SEC_FL_BULK)
262                 strlcat(buf, "bulk,", bufsize);
263         if (buf[0] == '\0')
264                 strlcat(buf, "-,", bufsize);
265
266         return buf;
267 }
268 EXPORT_SYMBOL(sptlrpc_secflags2str);
269
270 /**************************************************
271  * client context APIs                            *
272  **************************************************/
273
274 static
275 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
276 {
277         struct vfs_cred vcred;
278         int create = 1, remove_dead = 1;
279
280         LASSERT(sec);
281         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
282
283         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
284                                      PTLRPC_SEC_FL_ROOTONLY)) {
285                 vcred.vc_uid = 0;
286                 vcred.vc_gid = 0;
287                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
288                         create = 0;
289                         remove_dead = 0;
290                 }
291         } else {
292                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
293                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
294         }
295
296         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
297                                                    remove_dead);
298 }
299
300 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
301 {
302         atomic_inc(&ctx->cc_refcount);
303         return ctx;
304 }
305 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
306
307 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
308 {
309         struct ptlrpc_sec *sec = ctx->cc_sec;
310
311         LASSERT(sec);
312         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
313
314         if (!atomic_dec_and_test(&ctx->cc_refcount))
315                 return;
316
317         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
318 }
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
320
321 /**
322  * Expire the client context immediately.
323  *
324  * \pre Caller must hold at least 1 reference on the \a ctx.
325  */
326 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
327 {
328         LASSERT(ctx->cc_ops->die);
329         ctx->cc_ops->die(ctx, 0);
330 }
331 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
332
333 /**
334  * To wake up the threads who are waiting for this client context. Called
335  * after some status change happened on \a ctx.
336  */
337 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
338 {
339         struct ptlrpc_request *req, *next;
340
341         spin_lock(&ctx->cc_lock);
342         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
343                                      rq_ctx_chain) {
344                 list_del_init(&req->rq_ctx_chain);
345                 ptlrpc_client_wake_req(req);
346         }
347         spin_unlock(&ctx->cc_lock);
348 }
349 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
350
351 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
352 {
353         LASSERT(ctx->cc_ops);
354
355         if (ctx->cc_ops->display == NULL)
356                 return 0;
357
358         return ctx->cc_ops->display(ctx, buf, bufsize);
359 }
360
361 static int import_sec_check_expire(struct obd_import *imp)
362 {
363         int     adapt = 0;
364
365         spin_lock(&imp->imp_lock);
366         if (imp->imp_sec_expire &&
367             imp->imp_sec_expire < cfs_time_current_sec()) {
368                 adapt = 1;
369                 imp->imp_sec_expire = 0;
370         }
371         spin_unlock(&imp->imp_lock);
372
373         if (!adapt)
374                 return 0;
375
376         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
377         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
378 }
379
380 /**
381  * Get and validate the client side ptlrpc security facilities from
382  * \a imp. There is a race condition on client reconnect when the import is
383  * being destroyed while there are outstanding client bound requests. In
384  * this case do not output any error messages if import secuity is not
385  * found.
386  *
387  * \param[in] imp obd import associated with client
388  * \param[out] sec client side ptlrpc security
389  *
390  * \retval 0 if security retrieved successfully
391  * \retval -ve errno if there was a problem
392  */
393 static int import_sec_validate_get(struct obd_import *imp,
394                                    struct ptlrpc_sec **sec)
395 {
396         int     rc;
397
398         if (unlikely(imp->imp_sec_expire)) {
399                 rc = import_sec_check_expire(imp);
400                 if (rc)
401                         return rc;
402         }
403
404         *sec = sptlrpc_import_sec_ref(imp);
405         /* Only output an error when the import is still active */
406         if (*sec == NULL) {
407                 if (list_empty(&imp->imp_zombie_chain))
408                         CERROR("import %p (%s) with no sec\n",
409                                 imp, ptlrpc_import_state_name(imp->imp_state));
410                 return -EACCES;
411         }
412
413         if (unlikely((*sec)->ps_dying)) {
414                 CERROR("attempt to use dying sec %p\n", sec);
415                 sptlrpc_sec_put(*sec);
416                 return -EACCES;
417         }
418
419         return 0;
420 }
421
422 /**
423  * Given a \a req, find or allocate an appropriate context for it.
424  * \pre req->rq_cli_ctx == NULL.
425  *
426  * \retval 0 succeed, and req->rq_cli_ctx is set.
427  * \retval -ev error number, and req->rq_cli_ctx == NULL.
428  */
429 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
430 {
431         struct obd_import *imp = req->rq_import;
432         struct ptlrpc_sec *sec;
433         int                rc;
434         ENTRY;
435
436         LASSERT(!req->rq_cli_ctx);
437         LASSERT(imp);
438
439         rc = import_sec_validate_get(imp, &sec);
440         if (rc)
441                 RETURN(rc);
442
443         req->rq_cli_ctx = get_my_ctx(sec);
444
445         sptlrpc_sec_put(sec);
446
447         if (!req->rq_cli_ctx) {
448                 CERROR("req %p: fail to get context\n", req);
449                 RETURN(-ECONNREFUSED);
450         }
451
452         RETURN(0);
453 }
454
455 /**
456  * Drop the context for \a req.
457  * \pre req->rq_cli_ctx != NULL.
458  * \post req->rq_cli_ctx == NULL.
459  *
460  * If \a sync == 0, this function should return quickly without sleep;
461  * otherwise it might trigger and wait for the whole process of sending
462  * an context-destroying rpc to server.
463  */
464 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
465 {
466         ENTRY;
467
468         LASSERT(req);
469         LASSERT(req->rq_cli_ctx);
470
471         /* request might be asked to release earlier while still
472          * in the context waiting list.
473          */
474         if (!list_empty(&req->rq_ctx_chain)) {
475                 spin_lock(&req->rq_cli_ctx->cc_lock);
476                 list_del_init(&req->rq_ctx_chain);
477                 spin_unlock(&req->rq_cli_ctx->cc_lock);
478         }
479
480         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
481         req->rq_cli_ctx = NULL;
482         EXIT;
483 }
484
485 static
486 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
487                            struct ptlrpc_cli_ctx *oldctx,
488                            struct ptlrpc_cli_ctx *newctx)
489 {
490         struct sptlrpc_flavor   old_flvr;
491         char                   *reqmsg = NULL; /* to workaround old gcc */
492         int                     reqmsg_size;
493         int                     rc = 0;
494
495         LASSERT(req->rq_reqmsg);
496         LASSERT(req->rq_reqlen);
497         LASSERT(req->rq_replen);
498
499         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
500                "switch sec %p(%s) -> %p(%s)\n", req,
501                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
502                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
503                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
504                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
505
506         /* save flavor */
507         old_flvr = req->rq_flvr;
508
509         /* save request message */
510         reqmsg_size = req->rq_reqlen;
511         if (reqmsg_size != 0) {
512                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
513                 if (reqmsg == NULL)
514                         return -ENOMEM;
515                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
516         }
517
518         /* release old req/rep buf */
519         req->rq_cli_ctx = oldctx;
520         sptlrpc_cli_free_reqbuf(req);
521         sptlrpc_cli_free_repbuf(req);
522         req->rq_cli_ctx = newctx;
523
524         /* recalculate the flavor */
525         sptlrpc_req_set_flavor(req, 0);
526
527         /* alloc new request buffer
528          * we don't need to alloc reply buffer here, leave it to the
529          * rest procedure of ptlrpc */
530         if (reqmsg_size != 0) {
531                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
532                 if (!rc) {
533                         LASSERT(req->rq_reqmsg);
534                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
535                 } else {
536                         CWARN("failed to alloc reqbuf: %d\n", rc);
537                         req->rq_flvr = old_flvr;
538                 }
539
540                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
541         }
542         return rc;
543 }
544
545 /**
546  * If current context of \a req is dead somehow, e.g. we just switched flavor
547  * thus marked original contexts dead, we'll find a new context for it. if
548  * no switch is needed, \a req will end up with the same context.
549  *
550  * \note a request must have a context, to keep other parts of code happy.
551  * In any case of failure during the switching, we must restore the old one.
552  */
553 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
554 {
555         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
556         struct ptlrpc_cli_ctx *newctx;
557         int                    rc;
558         ENTRY;
559
560         LASSERT(oldctx);
561
562         sptlrpc_cli_ctx_get(oldctx);
563         sptlrpc_req_put_ctx(req, 0);
564
565         rc = sptlrpc_req_get_ctx(req);
566         if (unlikely(rc)) {
567                 LASSERT(!req->rq_cli_ctx);
568
569                 /* restore old ctx */
570                 req->rq_cli_ctx = oldctx;
571                 RETURN(rc);
572         }
573
574         newctx = req->rq_cli_ctx;
575         LASSERT(newctx);
576
577         if (unlikely(newctx == oldctx &&
578                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
579                 /*
580                  * still get the old dead ctx, usually means system too busy
581                  */
582                 CDEBUG(D_SEC,
583                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
584                        newctx, newctx->cc_flags);
585
586                 set_current_state(TASK_INTERRUPTIBLE);
587                 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
588         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
589                             == 0)) {
590                 /*
591                  * new ctx not up to date yet
592                  */
593                 CDEBUG(D_SEC,
594                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
595                        newctx, newctx->cc_flags);
596         } else {
597                 /*
598                  * it's possible newctx == oldctx if we're switching
599                  * subflavor with the same sec.
600                  */
601                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
602                 if (rc) {
603                         /* restore old ctx */
604                         sptlrpc_req_put_ctx(req, 0);
605                         req->rq_cli_ctx = oldctx;
606                         RETURN(rc);
607                 }
608
609                 LASSERT(req->rq_cli_ctx == newctx);
610         }
611
612         sptlrpc_cli_ctx_put(oldctx, 1);
613         RETURN(0);
614 }
615 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
616
617 static
618 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
619 {
620         if (cli_ctx_is_refreshed(ctx))
621                 return 1;
622         return 0;
623 }
624
625 static
626 int ctx_refresh_timeout(void *data)
627 {
628         struct ptlrpc_request *req = data;
629         int rc;
630
631         /* conn_cnt is needed in expire_one_request */
632         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
633
634         rc = ptlrpc_expire_one_request(req, 1);
635         /* if we started recovery, we should mark this ctx dead; otherwise
636          * in case of lgssd died nobody would retire this ctx, following
637          * connecting will still find the same ctx thus cause deadlock.
638          * there's an assumption that expire time of the request should be
639          * later than the context refresh expire time.
640          */
641         if (rc == 0)
642                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
643         return rc;
644 }
645
646 static
647 void ctx_refresh_interrupt(void *data)
648 {
649         struct ptlrpc_request *req = data;
650
651         spin_lock(&req->rq_lock);
652         req->rq_intr = 1;
653         spin_unlock(&req->rq_lock);
654 }
655
656 static
657 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
658 {
659         spin_lock(&ctx->cc_lock);
660         if (!list_empty(&req->rq_ctx_chain))
661                 list_del_init(&req->rq_ctx_chain);
662         spin_unlock(&ctx->cc_lock);
663 }
664
665 /**
666  * To refresh the context of \req, if it's not up-to-date.
667  * \param timeout
668  * - < 0: don't wait
669  * - = 0: wait until success or fatal error occur
670  * - > 0: timeout value (in seconds)
671  *
672  * The status of the context could be subject to be changed by other threads
673  * at any time. We allow this race, but once we return with 0, the caller will
674  * suppose it's uptodated and keep using it until the owning rpc is done.
675  *
676  * \retval 0 only if the context is uptodated.
677  * \retval -ev error number.
678  */
679 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
680 {
681         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
682         struct ptlrpc_sec      *sec;
683         struct l_wait_info      lwi;
684         int                     rc;
685         ENTRY;
686
687         LASSERT(ctx);
688
689         if (req->rq_ctx_init || req->rq_ctx_fini)
690                 RETURN(0);
691
692         /*
693          * during the process a request's context might change type even
694          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
695          * everything
696          */
697 again:
698         rc = import_sec_validate_get(req->rq_import, &sec);
699         if (rc)
700                 RETURN(rc);
701
702         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
703                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
704                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
705                 req_off_ctx_list(req, ctx);
706                 sptlrpc_req_replace_dead_ctx(req);
707                 ctx = req->rq_cli_ctx;
708         }
709         sptlrpc_sec_put(sec);
710
711         if (cli_ctx_is_eternal(ctx))
712                 RETURN(0);
713
714         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
715                 LASSERT(ctx->cc_ops->refresh);
716                 ctx->cc_ops->refresh(ctx);
717         }
718         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
719
720         LASSERT(ctx->cc_ops->validate);
721         if (ctx->cc_ops->validate(ctx) == 0) {
722                 req_off_ctx_list(req, ctx);
723                 RETURN(0);
724         }
725
726         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
727                 spin_lock(&req->rq_lock);
728                 req->rq_err = 1;
729                 spin_unlock(&req->rq_lock);
730                 req_off_ctx_list(req, ctx);
731                 RETURN(-EPERM);
732         }
733
734         /*
735          * There's a subtle issue for resending RPCs, suppose following
736          * situation:
737          *  1. the request was sent to server.
738          *  2. recovery was kicked start, after finished the request was
739          *     marked as resent.
740          *  3. resend the request.
741          *  4. old reply from server received, we accept and verify the reply.
742          *     this has to be success, otherwise the error will be aware
743          *     by application.
744          *  5. new reply from server received, dropped by LNet.
745          *
746          * Note the xid of old & new request is the same. We can't simply
747          * change xid for the resent request because the server replies on
748          * it for reply reconstruction.
749          *
750          * Commonly the original context should be uptodate because we
751          * have an expiry nice time; server will keep its context because
752          * we at least hold a ref of old context which prevent context
753          * from destroying RPC being sent. So server still can accept the
754          * request and finish the RPC. But if that's not the case:
755          *  1. If server side context has been trimmed, a NO_CONTEXT will
756          *     be returned, gss_cli_ctx_verify/unseal will switch to new
757          *     context by force.
758          *  2. Current context never be refreshed, then we are fine: we
759          *     never really send request with old context before.
760          */
761         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
762             unlikely(req->rq_reqmsg) &&
763             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
764                 req_off_ctx_list(req, ctx);
765                 RETURN(0);
766         }
767
768         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
769                 req_off_ctx_list(req, ctx);
770                 /*
771                  * don't switch ctx if import was deactivated
772                  */
773                 if (req->rq_import->imp_deactive) {
774                         spin_lock(&req->rq_lock);
775                         req->rq_err = 1;
776                         spin_unlock(&req->rq_lock);
777                         RETURN(-EINTR);
778                 }
779
780                 rc = sptlrpc_req_replace_dead_ctx(req);
781                 if (rc) {
782                         LASSERT(ctx == req->rq_cli_ctx);
783                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
784                                req, ctx, rc);
785                         spin_lock(&req->rq_lock);
786                         req->rq_err = 1;
787                         spin_unlock(&req->rq_lock);
788                         RETURN(rc);
789                 }
790
791                 ctx = req->rq_cli_ctx;
792                 goto again;
793         }
794
795         /*
796          * Now we're sure this context is during upcall, add myself into
797          * waiting list
798          */
799         spin_lock(&ctx->cc_lock);
800         if (list_empty(&req->rq_ctx_chain))
801                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
802         spin_unlock(&ctx->cc_lock);
803
804         if (timeout < 0)
805                 RETURN(-EWOULDBLOCK);
806
807         /* Clear any flags that may be present from previous sends */
808         LASSERT(req->rq_receiving_reply == 0);
809         spin_lock(&req->rq_lock);
810         req->rq_err = 0;
811         req->rq_timedout = 0;
812         req->rq_resend = 0;
813         req->rq_restart = 0;
814         spin_unlock(&req->rq_lock);
815
816         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
817                                ctx_refresh_timeout,
818                                ctx_refresh_interrupt, req);
819         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
820
821         /*
822          * following cases could lead us here:
823          * - successfully refreshed;
824          * - interrupted;
825          * - timedout, and we don't want recover from the failure;
826          * - timedout, and waked up upon recovery finished;
827          * - someone else mark this ctx dead by force;
828          * - someone invalidate the req and call ptlrpc_client_wake_req(),
829          *   e.g. ptlrpc_abort_inflight();
830          */
831         if (!cli_ctx_is_refreshed(ctx)) {
832                 /* timed out or interruptted */
833                 req_off_ctx_list(req, ctx);
834
835                 LASSERT(rc != 0);
836                 RETURN(rc);
837         }
838
839         goto again;
840 }
841
842 /**
843  * Initialize flavor settings for \a req, according to \a opcode.
844  *
845  * \note this could be called in two situations:
846  * - new request from ptlrpc_pre_req(), with proper @opcode
847  * - old request which changed ctx in the middle, with @opcode == 0
848  */
849 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
850 {
851         struct ptlrpc_sec *sec;
852
853         LASSERT(req->rq_import);
854         LASSERT(req->rq_cli_ctx);
855         LASSERT(req->rq_cli_ctx->cc_sec);
856         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
857
858         /* special security flags according to opcode */
859         switch (opcode) {
860         case OST_READ:
861         case MDS_READPAGE:
862         case MGS_CONFIG_READ:
863         case OBD_IDX_READ:
864                 req->rq_bulk_read = 1;
865                 break;
866         case OST_WRITE:
867         case MDS_WRITEPAGE:
868                 req->rq_bulk_write = 1;
869                 break;
870         case SEC_CTX_INIT:
871                 req->rq_ctx_init = 1;
872                 break;
873         case SEC_CTX_FINI:
874                 req->rq_ctx_fini = 1;
875                 break;
876         case 0:
877                 /* init/fini rpc won't be resend, so can't be here */
878                 LASSERT(req->rq_ctx_init == 0);
879                 LASSERT(req->rq_ctx_fini == 0);
880
881                 /* cleanup flags, which should be recalculated */
882                 req->rq_pack_udesc = 0;
883                 req->rq_pack_bulk = 0;
884                 break;
885         }
886
887         sec = req->rq_cli_ctx->cc_sec;
888
889         spin_lock(&sec->ps_lock);
890         req->rq_flvr = sec->ps_flvr;
891         spin_unlock(&sec->ps_lock);
892
893         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
894          * destruction rpc */
895         if (unlikely(req->rq_ctx_init))
896                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
897         else if (unlikely(req->rq_ctx_fini))
898                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
899
900         /* user descriptor flag, null security can't do it anyway */
901         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
902             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
903                 req->rq_pack_udesc = 1;
904
905         /* bulk security flag */
906         if ((req->rq_bulk_read || req->rq_bulk_write) &&
907             sptlrpc_flavor_has_bulk(&req->rq_flvr))
908                 req->rq_pack_bulk = 1;
909 }
910
911 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
912 {
913         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
914                 return;
915
916         LASSERT(req->rq_clrbuf);
917         if (req->rq_pool || !req->rq_reqbuf)
918                 return;
919
920         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
921         req->rq_reqbuf = NULL;
922         req->rq_reqbuf_len = 0;
923 }
924
925 /**
926  * Given an import \a imp, check whether current user has a valid context
927  * or not. We may create a new context and try to refresh it, and try
928  * repeatedly try in case of non-fatal errors. Return 0 means success.
929  */
930 int sptlrpc_import_check_ctx(struct obd_import *imp)
931 {
932         struct ptlrpc_sec     *sec;
933         struct ptlrpc_cli_ctx *ctx;
934         struct ptlrpc_request *req = NULL;
935         int rc;
936         ENTRY;
937
938         might_sleep();
939
940         sec = sptlrpc_import_sec_ref(imp);
941         ctx = get_my_ctx(sec);
942         sptlrpc_sec_put(sec);
943
944         if (!ctx)
945                 RETURN(-ENOMEM);
946
947         if (cli_ctx_is_eternal(ctx) ||
948             ctx->cc_ops->validate(ctx) == 0) {
949                 sptlrpc_cli_ctx_put(ctx, 1);
950                 RETURN(0);
951         }
952
953         if (cli_ctx_is_error(ctx)) {
954                 sptlrpc_cli_ctx_put(ctx, 1);
955                 RETURN(-EACCES);
956         }
957
958         req = ptlrpc_request_cache_alloc(GFP_NOFS);
959         if (!req)
960                 RETURN(-ENOMEM);
961
962         ptlrpc_cli_req_init(req);
963         atomic_set(&req->rq_refcount, 10000);
964
965         req->rq_import = imp;
966         req->rq_flvr = sec->ps_flvr;
967         req->rq_cli_ctx = ctx;
968
969         rc = sptlrpc_req_refresh_ctx(req, 0);
970         LASSERT(list_empty(&req->rq_ctx_chain));
971         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
972         ptlrpc_request_cache_free(req);
973
974         RETURN(rc);
975 }
976
977 /**
978  * Used by ptlrpc client, to perform the pre-defined security transformation
979  * upon the request message of \a req. After this function called,
980  * req->rq_reqmsg is still accessible as clear text.
981  */
982 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
983 {
984         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
985         int rc = 0;
986         ENTRY;
987
988         LASSERT(ctx);
989         LASSERT(ctx->cc_sec);
990         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
991
992         /* we wrap bulk request here because now we can be sure
993          * the context is uptodate.
994          */
995         if (req->rq_bulk) {
996                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
997                 if (rc)
998                         RETURN(rc);
999         }
1000
1001         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1002         case SPTLRPC_SVC_NULL:
1003         case SPTLRPC_SVC_AUTH:
1004         case SPTLRPC_SVC_INTG:
1005                 LASSERT(ctx->cc_ops->sign);
1006                 rc = ctx->cc_ops->sign(ctx, req);
1007                 break;
1008         case SPTLRPC_SVC_PRIV:
1009                 LASSERT(ctx->cc_ops->seal);
1010                 rc = ctx->cc_ops->seal(ctx, req);
1011                 break;
1012         default:
1013                 LBUG();
1014         }
1015
1016         if (rc == 0) {
1017                 LASSERT(req->rq_reqdata_len);
1018                 LASSERT(req->rq_reqdata_len % 8 == 0);
1019                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1020         }
1021
1022         RETURN(rc);
1023 }
1024
1025 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1026 {
1027         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1028         int                    rc;
1029         ENTRY;
1030
1031         LASSERT(ctx);
1032         LASSERT(ctx->cc_sec);
1033         LASSERT(req->rq_repbuf);
1034         LASSERT(req->rq_repdata);
1035         LASSERT(req->rq_repmsg == NULL);
1036
1037         req->rq_rep_swab_mask = 0;
1038
1039         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1040         switch (rc) {
1041         case 1:
1042                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1043         case 0:
1044                 break;
1045         default:
1046                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1047                 RETURN(-EPROTO);
1048         }
1049
1050         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1051                 CERROR("replied data length %d too small\n",
1052                        req->rq_repdata_len);
1053                 RETURN(-EPROTO);
1054         }
1055
1056         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1057             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1058                 CERROR("reply policy %u doesn't match request policy %u\n",
1059                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1060                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1061                 RETURN(-EPROTO);
1062         }
1063
1064         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1065         case SPTLRPC_SVC_NULL:
1066         case SPTLRPC_SVC_AUTH:
1067         case SPTLRPC_SVC_INTG:
1068                 LASSERT(ctx->cc_ops->verify);
1069                 rc = ctx->cc_ops->verify(ctx, req);
1070                 break;
1071         case SPTLRPC_SVC_PRIV:
1072                 LASSERT(ctx->cc_ops->unseal);
1073                 rc = ctx->cc_ops->unseal(ctx, req);
1074                 break;
1075         default:
1076                 LBUG();
1077         }
1078         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1079
1080         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1081             !req->rq_ctx_init)
1082                 req->rq_rep_swab_mask = 0;
1083         RETURN(rc);
1084 }
1085
1086 /**
1087  * Used by ptlrpc client, to perform security transformation upon the reply
1088  * message of \a req. After return successfully, req->rq_repmsg points to
1089  * the reply message in clear text.
1090  *
1091  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1092  * going to change.
1093  */
1094 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1095 {
1096         LASSERT(req->rq_repbuf);
1097         LASSERT(req->rq_repdata == NULL);
1098         LASSERT(req->rq_repmsg == NULL);
1099         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1100
1101         if (req->rq_reply_off == 0 &&
1102             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1103                 CERROR("real reply with offset 0\n");
1104                 return -EPROTO;
1105         }
1106
1107         if (req->rq_reply_off % 8 != 0) {
1108                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1109                 return -EPROTO;
1110         }
1111
1112         req->rq_repdata = (struct lustre_msg *)
1113                                 (req->rq_repbuf + req->rq_reply_off);
1114         req->rq_repdata_len = req->rq_nob_received;
1115
1116         return do_cli_unwrap_reply(req);
1117 }
1118
1119 /**
1120  * Used by ptlrpc client, to perform security transformation upon the early
1121  * reply message of \a req. We expect the rq_reply_off is 0, and
1122  * rq_nob_received is the early reply size.
1123  * 
1124  * Because the receive buffer might be still posted, the reply data might be
1125  * changed at any time, no matter we're holding rq_lock or not. For this reason
1126  * we allocate a separate ptlrpc_request and reply buffer for early reply
1127  * processing.
1128  *
1129  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1130  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1131  * \a *req_ret to release it.
1132  * \retval -ev error number, and \a req_ret will not be set.
1133  */
1134 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1135                                    struct ptlrpc_request **req_ret)
1136 {
1137         struct ptlrpc_request  *early_req;
1138         char                   *early_buf;
1139         int                     early_bufsz, early_size;
1140         int                     rc;
1141         ENTRY;
1142
1143         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1144         if (early_req == NULL)
1145                 RETURN(-ENOMEM);
1146
1147         ptlrpc_cli_req_init(early_req);
1148
1149         early_size = req->rq_nob_received;
1150         early_bufsz = size_roundup_power2(early_size);
1151         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1152         if (early_buf == NULL)
1153                 GOTO(err_req, rc = -ENOMEM);
1154
1155         /* sanity checkings and copy data out, do it inside spinlock */
1156         spin_lock(&req->rq_lock);
1157
1158         if (req->rq_replied) {
1159                 spin_unlock(&req->rq_lock);
1160                 GOTO(err_buf, rc = -EALREADY);
1161         }
1162
1163         LASSERT(req->rq_repbuf);
1164         LASSERT(req->rq_repdata == NULL);
1165         LASSERT(req->rq_repmsg == NULL);
1166
1167         if (req->rq_reply_off != 0) {
1168                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1169                 spin_unlock(&req->rq_lock);
1170                 GOTO(err_buf, rc = -EPROTO);
1171         }
1172
1173         if (req->rq_nob_received != early_size) {
1174                 /* even another early arrived the size should be the same */
1175                 CERROR("data size has changed from %u to %u\n",
1176                        early_size, req->rq_nob_received);
1177                 spin_unlock(&req->rq_lock);
1178                 GOTO(err_buf, rc = -EINVAL);
1179         }
1180
1181         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1182                 CERROR("early reply length %d too small\n",
1183                        req->rq_nob_received);
1184                 spin_unlock(&req->rq_lock);
1185                 GOTO(err_buf, rc = -EALREADY);
1186         }
1187
1188         memcpy(early_buf, req->rq_repbuf, early_size);
1189         spin_unlock(&req->rq_lock);
1190
1191         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1192         early_req->rq_flvr = req->rq_flvr;
1193         early_req->rq_repbuf = early_buf;
1194         early_req->rq_repbuf_len = early_bufsz;
1195         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1196         early_req->rq_repdata_len = early_size;
1197         early_req->rq_early = 1;
1198         early_req->rq_reqmsg = req->rq_reqmsg;
1199
1200         rc = do_cli_unwrap_reply(early_req);
1201         if (rc) {
1202                 DEBUG_REQ(D_ADAPTTO, early_req,
1203                           "error %d unwrap early reply", rc);
1204                 GOTO(err_ctx, rc);
1205         }
1206
1207         LASSERT(early_req->rq_repmsg);
1208         *req_ret = early_req;
1209         RETURN(0);
1210
1211 err_ctx:
1212         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1213 err_buf:
1214         OBD_FREE_LARGE(early_buf, early_bufsz);
1215 err_req:
1216         ptlrpc_request_cache_free(early_req);
1217         RETURN(rc);
1218 }
1219
1220 /**
1221  * Used by ptlrpc client, to release a processed early reply \a early_req.
1222  *
1223  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1224  */
1225 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1226 {
1227         LASSERT(early_req->rq_repbuf);
1228         LASSERT(early_req->rq_repdata);
1229         LASSERT(early_req->rq_repmsg);
1230
1231         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1232         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1233         ptlrpc_request_cache_free(early_req);
1234 }
1235
1236 /**************************************************
1237  * sec ID                                         *
1238  **************************************************/
1239
1240 /*
1241  * "fixed" sec (e.g. null) use sec_id < 0
1242  */
1243 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1244
1245 int sptlrpc_get_next_secid(void)
1246 {
1247         return atomic_inc_return(&sptlrpc_sec_id);
1248 }
1249 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1250
1251 /**************************************************
1252  * client side high-level security APIs           *
1253  **************************************************/
1254
1255 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1256                                    int grace, int force)
1257 {
1258         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1259
1260         LASSERT(policy->sp_cops);
1261         LASSERT(policy->sp_cops->flush_ctx_cache);
1262
1263         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1264 }
1265
1266 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1267 {
1268         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1269
1270         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1271         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1272         LASSERT(policy->sp_cops->destroy_sec);
1273
1274         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1275
1276         policy->sp_cops->destroy_sec(sec);
1277         sptlrpc_policy_put(policy);
1278 }
1279
1280 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1281 {
1282         sec_cop_destroy_sec(sec);
1283 }
1284 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1285
1286 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1287 {
1288         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1289
1290         if (sec->ps_policy->sp_cops->kill_sec) {
1291                 sec->ps_policy->sp_cops->kill_sec(sec);
1292
1293                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1294         }
1295 }
1296
1297 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1298 {
1299         if (sec)
1300                 atomic_inc(&sec->ps_refcount);
1301
1302         return sec;
1303 }
1304 EXPORT_SYMBOL(sptlrpc_sec_get);
1305
1306 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1307 {
1308         if (sec) {
1309                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1310
1311                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1312                         sptlrpc_gc_del_sec(sec);
1313                         sec_cop_destroy_sec(sec);
1314                 }
1315         }
1316 }
1317 EXPORT_SYMBOL(sptlrpc_sec_put);
1318
1319 /*
1320  * policy module is responsible for taking refrence of import
1321  */
1322 static
1323 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1324                                        struct ptlrpc_svc_ctx *svc_ctx,
1325                                        struct sptlrpc_flavor *sf,
1326                                        enum lustre_sec_part sp)
1327 {
1328         struct ptlrpc_sec_policy *policy;
1329         struct ptlrpc_sec        *sec;
1330         char                      str[32];
1331         ENTRY;
1332
1333         if (svc_ctx) {
1334                 LASSERT(imp->imp_dlm_fake == 1);
1335
1336                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1337                        imp->imp_obd->obd_type->typ_name,
1338                        imp->imp_obd->obd_name,
1339                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1340
1341                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1342                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1343         } else {
1344                 LASSERT(imp->imp_dlm_fake == 0);
1345
1346                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1347                        imp->imp_obd->obd_type->typ_name,
1348                        imp->imp_obd->obd_name,
1349                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1350
1351                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1352                 if (!policy) {
1353                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1354                         RETURN(NULL);
1355                 }
1356         }
1357
1358         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1359         if (sec) {
1360                 atomic_inc(&sec->ps_refcount);
1361
1362                 sec->ps_part = sp;
1363
1364                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1365                         sptlrpc_gc_add_sec(sec);
1366         } else {
1367                 sptlrpc_policy_put(policy);
1368         }
1369
1370         RETURN(sec);
1371 }
1372
1373 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1374 {
1375         struct ptlrpc_sec *sec;
1376
1377         spin_lock(&imp->imp_lock);
1378         sec = sptlrpc_sec_get(imp->imp_sec);
1379         spin_unlock(&imp->imp_lock);
1380
1381         return sec;
1382 }
1383 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1384
1385 static void sptlrpc_import_sec_install(struct obd_import *imp,
1386                                        struct ptlrpc_sec *sec)
1387 {
1388         struct ptlrpc_sec *old_sec;
1389
1390         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1391
1392         spin_lock(&imp->imp_lock);
1393         old_sec = imp->imp_sec;
1394         imp->imp_sec = sec;
1395         spin_unlock(&imp->imp_lock);
1396
1397         if (old_sec) {
1398                 sptlrpc_sec_kill(old_sec);
1399
1400                 /* balance the ref taken by this import */
1401                 sptlrpc_sec_put(old_sec);
1402         }
1403 }
1404
1405 static inline
1406 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1407 {
1408         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1409 }
1410
1411 static inline
1412 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1413 {
1414         *dst = *src;
1415 }
1416
1417 /**
1418  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1419  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1420  *
1421  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1422  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1423  */
1424 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1425                              struct ptlrpc_svc_ctx *svc_ctx,
1426                              struct sptlrpc_flavor *flvr)
1427 {
1428         struct ptlrpc_connection   *conn;
1429         struct sptlrpc_flavor       sf;
1430         struct ptlrpc_sec          *sec, *newsec;
1431         enum lustre_sec_part        sp;
1432         char                        str[24];
1433         int                         rc = 0;
1434         ENTRY;
1435
1436         might_sleep();
1437
1438         if (imp == NULL)
1439                 RETURN(0);
1440
1441         conn = imp->imp_connection;
1442
1443         if (svc_ctx == NULL) {
1444                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1445                 /*
1446                  * normal import, determine flavor from rule set, except
1447                  * for mgc the flavor is predetermined.
1448                  */
1449                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1450                         sf = cliobd->cl_flvr_mgc;
1451                 else 
1452                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1453                                                    cliobd->cl_sp_to,
1454                                                    &cliobd->cl_target_uuid,
1455                                                    conn->c_self, &sf);
1456
1457                 sp = imp->imp_obd->u.cli.cl_sp_me;
1458         } else {
1459                 /* reverse import, determine flavor from incoming reqeust */
1460                 sf = *flvr;
1461
1462                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1463                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1464                                       PTLRPC_SEC_FL_ROOTONLY;
1465
1466                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1467         }
1468
1469         sec = sptlrpc_import_sec_ref(imp);
1470         if (sec) {
1471                 char    str2[24];
1472
1473                 if (flavor_equal(&sf, &sec->ps_flvr))
1474                         GOTO(out, rc);
1475
1476                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1477                        imp->imp_obd->obd_name,
1478                        obd_uuid2str(&conn->c_remote_uuid),
1479                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1480                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1481         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1482                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1483                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1484                        imp->imp_obd->obd_name,
1485                        obd_uuid2str(&conn->c_remote_uuid),
1486                        LNET_NIDNET(conn->c_self),
1487                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1488         }
1489
1490         mutex_lock(&imp->imp_sec_mutex);
1491
1492         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1493         if (newsec) {
1494                 sptlrpc_import_sec_install(imp, newsec);
1495         } else {
1496                 CERROR("import %s->%s: failed to create new sec\n",
1497                        imp->imp_obd->obd_name,
1498                        obd_uuid2str(&conn->c_remote_uuid));
1499                 rc = -EPERM;
1500         }
1501
1502         mutex_unlock(&imp->imp_sec_mutex);
1503 out:
1504         sptlrpc_sec_put(sec);
1505         RETURN(rc);
1506 }
1507
1508 void sptlrpc_import_sec_put(struct obd_import *imp)
1509 {
1510         if (imp->imp_sec) {
1511                 sptlrpc_sec_kill(imp->imp_sec);
1512
1513                 sptlrpc_sec_put(imp->imp_sec);
1514                 imp->imp_sec = NULL;
1515         }
1516 }
1517
1518 static void import_flush_ctx_common(struct obd_import *imp,
1519                                     uid_t uid, int grace, int force)
1520 {
1521         struct ptlrpc_sec *sec;
1522
1523         if (imp == NULL)
1524                 return;
1525
1526         sec = sptlrpc_import_sec_ref(imp);
1527         if (sec == NULL)
1528                 return;
1529
1530         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1531         sptlrpc_sec_put(sec);
1532 }
1533
1534 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1535 {
1536         /* it's important to use grace mode, see explain in
1537          * sptlrpc_req_refresh_ctx() */
1538         import_flush_ctx_common(imp, 0, 1, 1);
1539 }
1540
1541 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1542 {
1543         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1544                                 1, 1);
1545 }
1546 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1547
1548 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1549 {
1550         import_flush_ctx_common(imp, -1, 1, 1);
1551 }
1552 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1553
1554 /**
1555  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1556  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1557  */
1558 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1559 {
1560         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1561         struct ptlrpc_sec_policy *policy;
1562         int rc;
1563
1564         LASSERT(ctx);
1565         LASSERT(ctx->cc_sec);
1566         LASSERT(ctx->cc_sec->ps_policy);
1567         LASSERT(req->rq_reqmsg == NULL);
1568         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1569
1570         policy = ctx->cc_sec->ps_policy;
1571         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1572         if (!rc) {
1573                 LASSERT(req->rq_reqmsg);
1574                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1575
1576                 /* zeroing preallocated buffer */
1577                 if (req->rq_pool)
1578                         memset(req->rq_reqmsg, 0, msgsize);
1579         }
1580
1581         return rc;
1582 }
1583
1584 /**
1585  * Used by ptlrpc client to free request buffer of \a req. After this
1586  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1587  */
1588 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1589 {
1590         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1591         struct ptlrpc_sec_policy *policy;
1592
1593         LASSERT(ctx);
1594         LASSERT(ctx->cc_sec);
1595         LASSERT(ctx->cc_sec->ps_policy);
1596         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1597
1598         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1599                 return;
1600
1601         policy = ctx->cc_sec->ps_policy;
1602         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1603         req->rq_reqmsg = NULL;
1604 }
1605
1606 /*
1607  * NOTE caller must guarantee the buffer size is enough for the enlargement
1608  */
1609 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1610                                   int segment, int newsize)
1611 {
1612         void   *src, *dst;
1613         int     oldsize, oldmsg_size, movesize;
1614
1615         LASSERT(segment < msg->lm_bufcount);
1616         LASSERT(msg->lm_buflens[segment] <= newsize);
1617
1618         if (msg->lm_buflens[segment] == newsize)
1619                 return;
1620
1621         /* nothing to do if we are enlarging the last segment */
1622         if (segment == msg->lm_bufcount - 1) {
1623                 msg->lm_buflens[segment] = newsize;
1624                 return;
1625         }
1626
1627         oldsize = msg->lm_buflens[segment];
1628
1629         src = lustre_msg_buf(msg, segment + 1, 0);
1630         msg->lm_buflens[segment] = newsize;
1631         dst = lustre_msg_buf(msg, segment + 1, 0);
1632         msg->lm_buflens[segment] = oldsize;
1633
1634         /* move from segment + 1 to end segment */
1635         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1636         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1637         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1638         LASSERT(movesize >= 0);
1639
1640         if (movesize)
1641                 memmove(dst, src, movesize);
1642
1643         /* note we don't clear the ares where old data live, not secret */
1644
1645         /* finally set new segment size */
1646         msg->lm_buflens[segment] = newsize;
1647 }
1648 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1649
1650 /**
1651  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1652  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1653  * preserved after the enlargement. this must be called after original request
1654  * buffer being allocated.
1655  *
1656  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1657  * so caller should refresh its local pointers if needed.
1658  */
1659 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1660                                int segment, int newsize)
1661 {
1662         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1663         struct ptlrpc_sec_cops   *cops;
1664         struct lustre_msg        *msg = req->rq_reqmsg;
1665
1666         LASSERT(ctx);
1667         LASSERT(msg);
1668         LASSERT(msg->lm_bufcount > segment);
1669         LASSERT(msg->lm_buflens[segment] <= newsize);
1670
1671         if (msg->lm_buflens[segment] == newsize)
1672                 return 0;
1673
1674         cops = ctx->cc_sec->ps_policy->sp_cops;
1675         LASSERT(cops->enlarge_reqbuf);
1676         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1677 }
1678 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1679
1680 /**
1681  * Used by ptlrpc client to allocate reply buffer of \a req.
1682  *
1683  * \note After this, req->rq_repmsg is still not accessible.
1684  */
1685 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1686 {
1687         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1688         struct ptlrpc_sec_policy *policy;
1689         ENTRY;
1690
1691         LASSERT(ctx);
1692         LASSERT(ctx->cc_sec);
1693         LASSERT(ctx->cc_sec->ps_policy);
1694
1695         if (req->rq_repbuf)
1696                 RETURN(0);
1697
1698         policy = ctx->cc_sec->ps_policy;
1699         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1700 }
1701
1702 /**
1703  * Used by ptlrpc client to free reply buffer of \a req. After this
1704  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1705  */
1706 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1707 {
1708         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1709         struct ptlrpc_sec_policy *policy;
1710         ENTRY;
1711
1712         LASSERT(ctx);
1713         LASSERT(ctx->cc_sec);
1714         LASSERT(ctx->cc_sec->ps_policy);
1715         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1716
1717         if (req->rq_repbuf == NULL)
1718                 return;
1719         LASSERT(req->rq_repbuf_len);
1720
1721         policy = ctx->cc_sec->ps_policy;
1722         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1723         req->rq_repmsg = NULL;
1724         EXIT;
1725 }
1726
1727 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1728                                 struct ptlrpc_cli_ctx *ctx)
1729 {
1730         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1731
1732         if (!policy->sp_cops->install_rctx)
1733                 return 0;
1734         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1735 }
1736
1737 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1738                                 struct ptlrpc_svc_ctx *ctx)
1739 {
1740         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1741
1742         if (!policy->sp_sops->install_rctx)
1743                 return 0;
1744         return policy->sp_sops->install_rctx(imp, ctx);
1745 }
1746
1747 /****************************************
1748  * server side security                 *
1749  ****************************************/
1750
1751 static int flavor_allowed(struct sptlrpc_flavor *exp,
1752                           struct ptlrpc_request *req)
1753 {
1754         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1755
1756         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1757                 return 1;
1758
1759         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1760             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1761             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1762             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1763                 return 1;
1764
1765         return 0;
1766 }
1767
1768 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1769
1770 /**
1771  * Given an export \a exp, check whether the flavor of incoming \a req
1772  * is allowed by the export \a exp. Main logic is about taking care of
1773  * changing configurations. Return 0 means success.
1774  */
1775 int sptlrpc_target_export_check(struct obd_export *exp,
1776                                 struct ptlrpc_request *req)
1777 {
1778         struct sptlrpc_flavor   flavor;
1779
1780         if (exp == NULL)
1781                 return 0;
1782
1783         /* client side export has no imp_reverse, skip
1784          * FIXME maybe we should check flavor this as well??? */
1785         if (exp->exp_imp_reverse == NULL)
1786                 return 0;
1787
1788         /* don't care about ctx fini rpc */
1789         if (req->rq_ctx_fini)
1790                 return 0;
1791
1792         spin_lock(&exp->exp_lock);
1793
1794         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1795          * the first req with the new flavor, then treat it as current flavor,
1796          * adapt reverse sec according to it.
1797          * note the first rpc with new flavor might not be with root ctx, in
1798          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1799         if (unlikely(exp->exp_flvr_changed) &&
1800             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1801                 /* make the new flavor as "current", and old ones as
1802                  * about-to-expire */
1803                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1804                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1805                 flavor = exp->exp_flvr_old[1];
1806                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1807                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1808                 exp->exp_flvr_old[0] = exp->exp_flvr;
1809                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1810                                           EXP_FLVR_UPDATE_EXPIRE;
1811                 exp->exp_flvr = flavor;
1812
1813                 /* flavor change finished */
1814                 exp->exp_flvr_changed = 0;
1815                 LASSERT(exp->exp_flvr_adapt == 1);
1816
1817                 /* if it's gss, we only interested in root ctx init */
1818                 if (req->rq_auth_gss &&
1819                     !(req->rq_ctx_init &&
1820                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1821                        req->rq_auth_usr_ost))) {
1822                         spin_unlock(&exp->exp_lock);
1823                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1824                                req->rq_auth_gss, req->rq_ctx_init,
1825                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1826                                req->rq_auth_usr_ost);
1827                         return 0;
1828                 }
1829
1830                 exp->exp_flvr_adapt = 0;
1831                 spin_unlock(&exp->exp_lock);
1832
1833                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1834                                                 req->rq_svc_ctx, &flavor);
1835         }
1836
1837         /* if it equals to the current flavor, we accept it, but need to
1838          * dealing with reverse sec/ctx */
1839         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1840                 /* most cases should return here, we only interested in
1841                  * gss root ctx init */
1842                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1843                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1844                      !req->rq_auth_usr_ost)) {
1845                         spin_unlock(&exp->exp_lock);
1846                         return 0;
1847                 }
1848
1849                 /* if flavor just changed, we should not proceed, just leave
1850                  * it and current flavor will be discovered and replaced
1851                  * shortly, and let _this_ rpc pass through */
1852                 if (exp->exp_flvr_changed) {
1853                         LASSERT(exp->exp_flvr_adapt);
1854                         spin_unlock(&exp->exp_lock);
1855                         return 0;
1856                 }
1857
1858                 if (exp->exp_flvr_adapt) {
1859                         exp->exp_flvr_adapt = 0;
1860                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1861                                exp, exp->exp_flvr.sf_rpc,
1862                                exp->exp_flvr_old[0].sf_rpc,
1863                                exp->exp_flvr_old[1].sf_rpc);
1864                         flavor = exp->exp_flvr;
1865                         spin_unlock(&exp->exp_lock);
1866
1867                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1868                                                         req->rq_svc_ctx,
1869                                                         &flavor);
1870                 } else {
1871                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1872                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1873                                exp->exp_flvr_old[0].sf_rpc,
1874                                exp->exp_flvr_old[1].sf_rpc);
1875                         spin_unlock(&exp->exp_lock);
1876
1877                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1878                                                            req->rq_svc_ctx);
1879                 }
1880         }
1881
1882         if (exp->exp_flvr_expire[0]) {
1883                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1884                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1885                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1886                                        "middle one ("CFS_DURATION_T")\n", exp,
1887                                        exp->exp_flvr.sf_rpc,
1888                                        exp->exp_flvr_old[0].sf_rpc,
1889                                        exp->exp_flvr_old[1].sf_rpc,
1890                                        exp->exp_flvr_expire[0] -
1891                                                 cfs_time_current_sec());
1892                                 spin_unlock(&exp->exp_lock);
1893                                 return 0;
1894                         }
1895                 } else {
1896                         CDEBUG(D_SEC, "mark middle expired\n");
1897                         exp->exp_flvr_expire[0] = 0;
1898                 }
1899                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1900                        exp->exp_flvr.sf_rpc,
1901                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1902                        req->rq_flvr.sf_rpc);
1903         }
1904
1905         /* now it doesn't match the current flavor, the only chance we can
1906          * accept it is match the old flavors which is not expired. */
1907         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1908                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1909                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1910                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1911                                        "oldest one ("CFS_DURATION_T")\n", exp,
1912                                        exp->exp_flvr.sf_rpc,
1913                                        exp->exp_flvr_old[0].sf_rpc,
1914                                        exp->exp_flvr_old[1].sf_rpc,
1915                                        exp->exp_flvr_expire[1] -
1916                                                 cfs_time_current_sec());
1917                                 spin_unlock(&exp->exp_lock);
1918                                 return 0;
1919                         }
1920                 } else {
1921                         CDEBUG(D_SEC, "mark oldest expired\n");
1922                         exp->exp_flvr_expire[1] = 0;
1923                 }
1924                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1925                        exp, exp->exp_flvr.sf_rpc,
1926                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1927                        req->rq_flvr.sf_rpc);
1928         } else {
1929                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1930                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1931                        exp->exp_flvr_old[1].sf_rpc);
1932         }
1933
1934         spin_unlock(&exp->exp_lock);
1935
1936         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1937               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1938               exp, exp->exp_obd->obd_name,
1939               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1940               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1941               req->rq_flvr.sf_rpc,
1942               exp->exp_flvr.sf_rpc,
1943               exp->exp_flvr_old[0].sf_rpc,
1944               exp->exp_flvr_expire[0] ?
1945               (unsigned long) (exp->exp_flvr_expire[0] -
1946                                cfs_time_current_sec()) : 0,
1947               exp->exp_flvr_old[1].sf_rpc,
1948               exp->exp_flvr_expire[1] ?
1949               (unsigned long) (exp->exp_flvr_expire[1] -
1950                                cfs_time_current_sec()) : 0);
1951         return -EACCES;
1952 }
1953 EXPORT_SYMBOL(sptlrpc_target_export_check);
1954
1955 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1956                                       struct sptlrpc_rule_set *rset)
1957 {
1958         struct obd_export       *exp;
1959         struct sptlrpc_flavor    new_flvr;
1960
1961         LASSERT(obd);
1962
1963         spin_lock(&obd->obd_dev_lock);
1964
1965         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1966                 if (exp->exp_connection == NULL)
1967                         continue;
1968
1969                 /* note if this export had just been updated flavor
1970                  * (exp_flvr_changed == 1), this will override the
1971                  * previous one. */
1972                 spin_lock(&exp->exp_lock);
1973                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1974                                              exp->exp_connection->c_peer.nid,
1975                                              &new_flvr);
1976                 if (exp->exp_flvr_changed ||
1977                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1978                         exp->exp_flvr_old[1] = new_flvr;
1979                         exp->exp_flvr_expire[1] = 0;
1980                         exp->exp_flvr_changed = 1;
1981                         exp->exp_flvr_adapt = 1;
1982
1983                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1984                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1985                                exp->exp_flvr.sf_rpc,
1986                                exp->exp_flvr_old[1].sf_rpc);
1987                 }
1988                 spin_unlock(&exp->exp_lock);
1989         }
1990
1991         spin_unlock(&obd->obd_dev_lock);
1992 }
1993 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1994
1995 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1996 {
1997         /* peer's claim is unreliable unless gss is being used */
1998         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1999                 return svc_rc;
2000
2001         switch (req->rq_sp_from) {
2002         case LUSTRE_SP_CLI:
2003                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2004                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2005                         svc_rc = SECSVC_DROP;
2006                 }
2007                 break;
2008         case LUSTRE_SP_MDT:
2009                 if (!req->rq_auth_usr_mdt) {
2010                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2011                         svc_rc = SECSVC_DROP;
2012                 }
2013                 break;
2014         case LUSTRE_SP_OST:
2015                 if (!req->rq_auth_usr_ost) {
2016                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2017                         svc_rc = SECSVC_DROP;
2018                 }
2019                 break;
2020         case LUSTRE_SP_MGS:
2021         case LUSTRE_SP_MGC:
2022                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2023                     !req->rq_auth_usr_ost) {
2024                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2025                         svc_rc = SECSVC_DROP;
2026                 }
2027                 break;
2028         case LUSTRE_SP_ANY:
2029         default:
2030                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2031                 svc_rc = SECSVC_DROP;
2032         }
2033
2034         return svc_rc;
2035 }
2036
2037 /**
2038  * Used by ptlrpc server, to perform transformation upon request message of
2039  * incoming \a req. This must be the first thing to do with an incoming
2040  * request in ptlrpc layer.
2041  *
2042  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2043  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2044  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2045  * reply message has been prepared.
2046  * \retval SECSVC_DROP failed, this request should be dropped.
2047  */
2048 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2049 {
2050         struct ptlrpc_sec_policy *policy;
2051         struct lustre_msg        *msg = req->rq_reqbuf;
2052         int                       rc;
2053         ENTRY;
2054
2055         LASSERT(msg);
2056         LASSERT(req->rq_reqmsg == NULL);
2057         LASSERT(req->rq_repmsg == NULL);
2058         LASSERT(req->rq_svc_ctx == NULL);
2059
2060         req->rq_req_swab_mask = 0;
2061
2062         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2063         switch (rc) {
2064         case 1:
2065                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2066         case 0:
2067                 break;
2068         default:
2069                 CERROR("error unpacking request from %s x%llu\n",
2070                        libcfs_id2str(req->rq_peer), req->rq_xid);
2071                 RETURN(SECSVC_DROP);
2072         }
2073
2074         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2075         req->rq_sp_from = LUSTRE_SP_ANY;
2076         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2077         req->rq_auth_mapped_uid = -1;
2078
2079         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2080         if (!policy) {
2081                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2082                 RETURN(SECSVC_DROP);
2083         }
2084
2085         LASSERT(policy->sp_sops->accept);
2086         rc = policy->sp_sops->accept(req);
2087         sptlrpc_policy_put(policy);
2088         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2089         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2090
2091         /*
2092          * if it's not null flavor (which means embedded packing msg),
2093          * reset the swab mask for the comming inner msg unpacking.
2094          */
2095         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2096                 req->rq_req_swab_mask = 0;
2097
2098         /* sanity check for the request source */
2099         rc = sptlrpc_svc_check_from(req, rc);
2100         RETURN(rc);
2101 }
2102
2103 /**
2104  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2105  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2106  * a buffer of \a msglen size.
2107  */
2108 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2109 {
2110         struct ptlrpc_sec_policy *policy;
2111         struct ptlrpc_reply_state *rs;
2112         int rc;
2113         ENTRY;
2114
2115         LASSERT(req->rq_svc_ctx);
2116         LASSERT(req->rq_svc_ctx->sc_policy);
2117
2118         policy = req->rq_svc_ctx->sc_policy;
2119         LASSERT(policy->sp_sops->alloc_rs);
2120
2121         rc = policy->sp_sops->alloc_rs(req, msglen);
2122         if (unlikely(rc == -ENOMEM)) {
2123                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2124                 if (svcpt->scp_service->srv_max_reply_size <
2125                    msglen + sizeof(struct ptlrpc_reply_state)) {
2126                         /* Just return failure if the size is too big */
2127                         CERROR("size of message is too big (%zd), %d allowed\n",
2128                                 msglen + sizeof(struct ptlrpc_reply_state),
2129                                 svcpt->scp_service->srv_max_reply_size);
2130                         RETURN(-ENOMEM);
2131                 }
2132
2133                 /* failed alloc, try emergency pool */
2134                 rs = lustre_get_emerg_rs(svcpt);
2135                 if (rs == NULL)
2136                         RETURN(-ENOMEM);
2137
2138                 req->rq_reply_state = rs;
2139                 rc = policy->sp_sops->alloc_rs(req, msglen);
2140                 if (rc) {
2141                         lustre_put_emerg_rs(rs);
2142                         req->rq_reply_state = NULL;
2143                 }
2144         }
2145
2146         LASSERT(rc != 0 ||
2147                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2148
2149         RETURN(rc);
2150 }
2151
2152 /**
2153  * Used by ptlrpc server, to perform transformation upon reply message.
2154  *
2155  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2156  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2157  */
2158 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2159 {
2160         struct ptlrpc_sec_policy *policy;
2161         int rc;
2162         ENTRY;
2163
2164         LASSERT(req->rq_svc_ctx);
2165         LASSERT(req->rq_svc_ctx->sc_policy);
2166
2167         policy = req->rq_svc_ctx->sc_policy;
2168         LASSERT(policy->sp_sops->authorize);
2169
2170         rc = policy->sp_sops->authorize(req);
2171         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2172
2173         RETURN(rc);
2174 }
2175
2176 /**
2177  * Used by ptlrpc server, to free reply_state.
2178  */
2179 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2180 {
2181         struct ptlrpc_sec_policy *policy;
2182         unsigned int prealloc;
2183         ENTRY;
2184
2185         LASSERT(rs->rs_svc_ctx);
2186         LASSERT(rs->rs_svc_ctx->sc_policy);
2187
2188         policy = rs->rs_svc_ctx->sc_policy;
2189         LASSERT(policy->sp_sops->free_rs);
2190
2191         prealloc = rs->rs_prealloc;
2192         policy->sp_sops->free_rs(rs);
2193
2194         if (prealloc)
2195                 lustre_put_emerg_rs(rs);
2196         EXIT;
2197 }
2198
2199 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2200 {
2201         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2202
2203         if (ctx != NULL)
2204                 atomic_inc(&ctx->sc_refcount);
2205 }
2206
2207 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2208 {
2209         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2210
2211         if (ctx == NULL)
2212                 return;
2213
2214         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2215         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2216                 if (ctx->sc_policy->sp_sops->free_ctx)
2217                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2218         }
2219         req->rq_svc_ctx = NULL;
2220 }
2221
2222 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2223 {
2224         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2225
2226         if (ctx == NULL)
2227                 return;
2228
2229         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2230         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2231                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2232 }
2233 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2234
2235 /****************************************
2236  * bulk security                        *
2237  ****************************************/
2238
2239 /**
2240  * Perform transformation upon bulk data pointed by \a desc. This is called
2241  * before transforming the request message.
2242  */
2243 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2244                           struct ptlrpc_bulk_desc *desc)
2245 {
2246         struct ptlrpc_cli_ctx *ctx;
2247
2248         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2249
2250         if (!req->rq_pack_bulk)
2251                 return 0;
2252
2253         ctx = req->rq_cli_ctx;
2254         if (ctx->cc_ops->wrap_bulk)
2255                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2256         return 0;
2257 }
2258 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2259
2260 /**
2261  * This is called after unwrap the reply message.
2262  * return nob of actual plain text size received, or error code.
2263  */
2264 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2265                                  struct ptlrpc_bulk_desc *desc,
2266                                  int nob)
2267 {
2268         struct ptlrpc_cli_ctx  *ctx;
2269         int                     rc;
2270
2271         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2272
2273         if (!req->rq_pack_bulk)
2274                 return desc->bd_nob_transferred;
2275
2276         ctx = req->rq_cli_ctx;
2277         if (ctx->cc_ops->unwrap_bulk) {
2278                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2279                 if (rc < 0)
2280                         return rc;
2281         }
2282         return desc->bd_nob_transferred;
2283 }
2284 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2285
2286 /**
2287  * This is called after unwrap the reply message.
2288  * return 0 for success or error code.
2289  */
2290 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2291                                   struct ptlrpc_bulk_desc *desc)
2292 {
2293         struct ptlrpc_cli_ctx  *ctx;
2294         int                     rc;
2295
2296         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2297
2298         if (!req->rq_pack_bulk)
2299                 return 0;
2300
2301         ctx = req->rq_cli_ctx;
2302         if (ctx->cc_ops->unwrap_bulk) {
2303                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2304                 if (rc < 0)
2305                         return rc;
2306         }
2307
2308         /*
2309          * if everything is going right, nob should equals to nob_transferred.
2310          * in case of privacy mode, nob_transferred needs to be adjusted.
2311          */
2312         if (desc->bd_nob != desc->bd_nob_transferred) {
2313                 CERROR("nob %d doesn't match transferred nob %d\n",
2314                        desc->bd_nob, desc->bd_nob_transferred);
2315                 return -EPROTO;
2316         }
2317
2318         return 0;
2319 }
2320 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2321
2322 #ifdef HAVE_SERVER_SUPPORT
2323 /**
2324  * Performe transformation upon outgoing bulk read.
2325  */
2326 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2327                           struct ptlrpc_bulk_desc *desc)
2328 {
2329         struct ptlrpc_svc_ctx *ctx;
2330
2331         LASSERT(req->rq_bulk_read);
2332
2333         if (!req->rq_pack_bulk)
2334                 return 0;
2335
2336         ctx = req->rq_svc_ctx;
2337         if (ctx->sc_policy->sp_sops->wrap_bulk)
2338                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2339
2340         return 0;
2341 }
2342 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2343
2344 /**
2345  * Performe transformation upon incoming bulk write.
2346  */
2347 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2348                             struct ptlrpc_bulk_desc *desc)
2349 {
2350         struct ptlrpc_svc_ctx *ctx;
2351         int                    rc;
2352
2353         LASSERT(req->rq_bulk_write);
2354
2355         /*
2356          * if it's in privacy mode, transferred should >= expected; otherwise
2357          * transferred should == expected.
2358          */
2359         if (desc->bd_nob_transferred < desc->bd_nob ||
2360             (desc->bd_nob_transferred > desc->bd_nob &&
2361              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2362              SPTLRPC_BULK_SVC_PRIV)) {
2363                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2364                           desc->bd_nob_transferred, desc->bd_nob);
2365                 return -ETIMEDOUT;
2366         }
2367
2368         if (!req->rq_pack_bulk)
2369                 return 0;
2370
2371         ctx = req->rq_svc_ctx;
2372         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2373                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2374                 if (rc)
2375                         CERROR("error unwrap bulk: %d\n", rc);
2376         }
2377
2378         /* return 0 to allow reply be sent */
2379         return 0;
2380 }
2381 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2382
2383 /**
2384  * Prepare buffers for incoming bulk write.
2385  */
2386 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2387                           struct ptlrpc_bulk_desc *desc)
2388 {
2389         struct ptlrpc_svc_ctx *ctx;
2390
2391         LASSERT(req->rq_bulk_write);
2392
2393         if (!req->rq_pack_bulk)
2394                 return 0;
2395
2396         ctx = req->rq_svc_ctx;
2397         if (ctx->sc_policy->sp_sops->prep_bulk)
2398                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2399
2400         return 0;
2401 }
2402 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2403
2404 #endif /* HAVE_SERVER_SUPPORT */
2405
2406 /****************************************
2407  * user descriptor helpers              *
2408  ****************************************/
2409
2410 int sptlrpc_current_user_desc_size(void)
2411 {
2412         int ngroups;
2413
2414         ngroups = current_ngroups;
2415
2416         if (ngroups > LUSTRE_MAX_GROUPS)
2417                 ngroups = LUSTRE_MAX_GROUPS;
2418         return sptlrpc_user_desc_size(ngroups);
2419 }
2420 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2421
2422 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2423 {
2424         struct ptlrpc_user_desc *pud;
2425
2426         pud = lustre_msg_buf(msg, offset, 0);
2427
2428         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2429         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2430         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2431         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2432         pud->pud_cap = cfs_curproc_cap_pack();
2433         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2434
2435         task_lock(current);
2436         if (pud->pud_ngroups > current_ngroups)
2437                 pud->pud_ngroups = current_ngroups;
2438         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2439                pud->pud_ngroups * sizeof(__u32));
2440         task_unlock(current);
2441
2442         return 0;
2443 }
2444 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2445
2446 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2447 {
2448         struct ptlrpc_user_desc *pud;
2449         int                      i;
2450
2451         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2452         if (!pud)
2453                 return -EINVAL;
2454
2455         if (swabbed) {
2456                 __swab32s(&pud->pud_uid);
2457                 __swab32s(&pud->pud_gid);
2458                 __swab32s(&pud->pud_fsuid);
2459                 __swab32s(&pud->pud_fsgid);
2460                 __swab32s(&pud->pud_cap);
2461                 __swab32s(&pud->pud_ngroups);
2462         }
2463
2464         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2465                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2466                 return -EINVAL;
2467         }
2468
2469         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2470             msg->lm_buflens[offset]) {
2471                 CERROR("%u groups are claimed but bufsize only %u\n",
2472                        pud->pud_ngroups, msg->lm_buflens[offset]);
2473                 return -EINVAL;
2474         }
2475
2476         if (swabbed) {
2477                 for (i = 0; i < pud->pud_ngroups; i++)
2478                         __swab32s(&pud->pud_groups[i]);
2479         }
2480
2481         return 0;
2482 }
2483 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2484
2485 /****************************************
2486  * misc helpers                         *
2487  ****************************************/
2488
2489 const char * sec2target_str(struct ptlrpc_sec *sec)
2490 {
2491         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2492                 return "*";
2493         if (sec_is_reverse(sec))
2494                 return "c";
2495         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2496 }
2497 EXPORT_SYMBOL(sec2target_str);
2498
2499 /*
2500  * return true if the bulk data is protected
2501  */
2502 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2503 {
2504         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2505         case SPTLRPC_BULK_SVC_INTG:
2506         case SPTLRPC_BULK_SVC_PRIV:
2507                 return 1;
2508         default:
2509                 return 0;
2510         }
2511 }
2512 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2513
2514 /****************************************
2515  * crypto API helper/alloc blkciper     *
2516  ****************************************/
2517
2518 /****************************************
2519  * initialize/finalize                  *
2520  ****************************************/
2521
2522 int sptlrpc_init(void)
2523 {
2524         int rc;
2525
2526         rwlock_init(&policy_lock);
2527
2528         rc = sptlrpc_gc_init();
2529         if (rc)
2530                 goto out;
2531
2532         rc = sptlrpc_conf_init();
2533         if (rc)
2534                 goto out_gc;
2535
2536         rc = sptlrpc_enc_pool_init();
2537         if (rc)
2538                 goto out_conf;
2539
2540         rc = sptlrpc_null_init();
2541         if (rc)
2542                 goto out_pool;
2543
2544         rc = sptlrpc_plain_init();
2545         if (rc)
2546                 goto out_null;
2547
2548         rc = sptlrpc_lproc_init();
2549         if (rc)
2550                 goto out_plain;
2551
2552         return 0;
2553
2554 out_plain:
2555         sptlrpc_plain_fini();
2556 out_null:
2557         sptlrpc_null_fini();
2558 out_pool:
2559         sptlrpc_enc_pool_fini();
2560 out_conf:
2561         sptlrpc_conf_fini();
2562 out_gc:
2563         sptlrpc_gc_fini();
2564 out:
2565         return rc;
2566 }
2567
2568 void sptlrpc_fini(void)
2569 {
2570         sptlrpc_lproc_fini();
2571         sptlrpc_plain_fini();
2572         sptlrpc_null_fini();
2573         sptlrpc_enc_pool_fini();
2574         sptlrpc_conf_fini();
2575         sptlrpc_gc_fini();
2576 }