Whamcloud - gitweb
LU-7988 hsm: run HSM coordinator once per second at most
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2014, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #ifdef HAVE_UIDGID_HEADER
41 # include <linux/uidgid.h>
42 #endif
43 #include <linux/crypto.h>
44 #include <linux/key.h>
45
46 #include <libcfs/libcfs.h>
47 #include <obd.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_dlm.h>
53 #include <lustre_sec.h>
54
55 #include "ptlrpc_internal.h"
56
57 /***********************************************
58  * policy registers                            *
59  ***********************************************/
60
61 static rwlock_t policy_lock;
62 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
63         NULL,
64 };
65
66 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
67 {
68         __u16 number = policy->sp_policy;
69
70         LASSERT(policy->sp_name);
71         LASSERT(policy->sp_cops);
72         LASSERT(policy->sp_sops);
73
74         if (number >= SPTLRPC_POLICY_MAX)
75                 return -EINVAL;
76
77         write_lock(&policy_lock);
78         if (unlikely(policies[number])) {
79                 write_unlock(&policy_lock);
80                 return -EALREADY;
81         }
82         policies[number] = policy;
83         write_unlock(&policy_lock);
84
85         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
86         return 0;
87 }
88 EXPORT_SYMBOL(sptlrpc_register_policy);
89
90 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
91 {
92         __u16 number = policy->sp_policy;
93
94         LASSERT(number < SPTLRPC_POLICY_MAX);
95
96         write_lock(&policy_lock);
97         if (unlikely(policies[number] == NULL)) {
98                 write_unlock(&policy_lock);
99                 CERROR("%s: already unregistered\n", policy->sp_name);
100                 return -EINVAL;
101         }
102
103         LASSERT(policies[number] == policy);
104         policies[number] = NULL;
105         write_unlock(&policy_lock);
106
107         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
108         return 0;
109 }
110 EXPORT_SYMBOL(sptlrpc_unregister_policy);
111
112 static
113 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
114 {
115         static DEFINE_MUTEX(load_mutex);
116         static atomic_t           loaded = ATOMIC_INIT(0);
117         struct ptlrpc_sec_policy *policy;
118         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
119         __u16                     flag = 0;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 if (policy == NULL)
130                         flag = atomic_read(&loaded);
131                 read_unlock(&policy_lock);
132
133                 if (policy != NULL || flag != 0 ||
134                     number != SPTLRPC_POLICY_GSS)
135                         break;
136
137                 /* try to load gss module, once */
138                 mutex_lock(&load_mutex);
139                 if (atomic_read(&loaded) == 0) {
140                         if (request_module("ptlrpc_gss") == 0)
141                                 CDEBUG(D_SEC,
142                                        "module ptlrpc_gss loaded on demand\n");
143                         else
144                                 CERROR("Unable to load module ptlrpc_gss\n");
145
146                         atomic_set(&loaded, 1);
147                 }
148                 mutex_unlock(&load_mutex);
149         }
150
151         return policy;
152 }
153
154 __u32 sptlrpc_name2flavor_base(const char *name)
155 {
156         if (!strcmp(name, "null"))
157                 return SPTLRPC_FLVR_NULL;
158         if (!strcmp(name, "plain"))
159                 return SPTLRPC_FLVR_PLAIN;
160         if (!strcmp(name, "gssnull"))
161                 return SPTLRPC_FLVR_GSSNULL;
162         if (!strcmp(name, "krb5n"))
163                 return SPTLRPC_FLVR_KRB5N;
164         if (!strcmp(name, "krb5a"))
165                 return SPTLRPC_FLVR_KRB5A;
166         if (!strcmp(name, "krb5i"))
167                 return SPTLRPC_FLVR_KRB5I;
168         if (!strcmp(name, "krb5p"))
169                 return SPTLRPC_FLVR_KRB5P;
170         if (!strcmp(name, "skn"))
171                 return SPTLRPC_FLVR_SKN;
172         if (!strcmp(name, "ska"))
173                 return SPTLRPC_FLVR_SKA;
174         if (!strcmp(name, "ski"))
175                 return SPTLRPC_FLVR_SKI;
176         if (!strcmp(name, "skpi"))
177                 return SPTLRPC_FLVR_SKPI;
178
179         return SPTLRPC_FLVR_INVALID;
180 }
181 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
182
183 const char *sptlrpc_flavor2name_base(__u32 flvr)
184 {
185         __u32   base = SPTLRPC_FLVR_BASE(flvr);
186
187         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
188                 return "null";
189         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
190                 return "plain";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
192                 return "gssnull";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
194                 return "krb5n";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
196                 return "krb5a";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
198                 return "krb5i";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
200                 return "krb5p";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
202                 return "skn";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
204                 return "ska";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
206                 return "ski";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
208                 return "skpi";
209
210         CERROR("invalid wire flavor 0x%x\n", flvr);
211         return "invalid";
212 }
213 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
214
215 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
216                                char *buf, int bufsize)
217 {
218         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
219                 snprintf(buf, bufsize, "hash:%s",
220                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
221         else
222                 snprintf(buf, bufsize, "%s",
223                          sptlrpc_flavor2name_base(sf->sf_rpc));
224
225         buf[bufsize - 1] = '\0';
226         return buf;
227 }
228 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
229
230 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
231 {
232         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
233
234         /*
235          * currently we don't support customized bulk specification for
236          * flavors other than plain
237          */
238         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
239                 char bspec[16];
240
241                 bspec[0] = '-';
242                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
243                 strncat(buf, bspec, bufsize);
244         }
245
246         buf[bufsize - 1] = '\0';
247         return buf;
248 }
249 EXPORT_SYMBOL(sptlrpc_flavor2name);
250
251 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
252 {
253         buf[0] = '\0';
254
255         if (flags & PTLRPC_SEC_FL_REVERSE)
256                 strlcat(buf, "reverse,", bufsize);
257         if (flags & PTLRPC_SEC_FL_ROOTONLY)
258                 strlcat(buf, "rootonly,", bufsize);
259         if (flags & PTLRPC_SEC_FL_UDESC)
260                 strlcat(buf, "udesc,", bufsize);
261         if (flags & PTLRPC_SEC_FL_BULK)
262                 strlcat(buf, "bulk,", bufsize);
263         if (buf[0] == '\0')
264                 strlcat(buf, "-,", bufsize);
265
266         return buf;
267 }
268 EXPORT_SYMBOL(sptlrpc_secflags2str);
269
270 /**************************************************
271  * client context APIs                            *
272  **************************************************/
273
274 static
275 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
276 {
277         struct vfs_cred vcred;
278         int create = 1, remove_dead = 1;
279
280         LASSERT(sec);
281         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
282
283         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
284                                      PTLRPC_SEC_FL_ROOTONLY)) {
285                 vcred.vc_uid = 0;
286                 vcred.vc_gid = 0;
287                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
288                         create = 0;
289                         remove_dead = 0;
290                 }
291         } else {
292                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
293                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
294         }
295
296         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
297                                                    remove_dead);
298 }
299
300 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
301 {
302         atomic_inc(&ctx->cc_refcount);
303         return ctx;
304 }
305 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
306
307 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
308 {
309         struct ptlrpc_sec *sec = ctx->cc_sec;
310
311         LASSERT(sec);
312         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
313
314         if (!atomic_dec_and_test(&ctx->cc_refcount))
315                 return;
316
317         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
318 }
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
320
321 /**
322  * Expire the client context immediately.
323  *
324  * \pre Caller must hold at least 1 reference on the \a ctx.
325  */
326 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
327 {
328         LASSERT(ctx->cc_ops->die);
329         ctx->cc_ops->die(ctx, 0);
330 }
331 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
332
333 /**
334  * To wake up the threads who are waiting for this client context. Called
335  * after some status change happened on \a ctx.
336  */
337 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
338 {
339         struct ptlrpc_request *req, *next;
340
341         spin_lock(&ctx->cc_lock);
342         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
343                                      rq_ctx_chain) {
344                 list_del_init(&req->rq_ctx_chain);
345                 ptlrpc_client_wake_req(req);
346         }
347         spin_unlock(&ctx->cc_lock);
348 }
349 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
350
351 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
352 {
353         LASSERT(ctx->cc_ops);
354
355         if (ctx->cc_ops->display == NULL)
356                 return 0;
357
358         return ctx->cc_ops->display(ctx, buf, bufsize);
359 }
360
361 static int import_sec_check_expire(struct obd_import *imp)
362 {
363         int     adapt = 0;
364
365         spin_lock(&imp->imp_lock);
366         if (imp->imp_sec_expire &&
367             imp->imp_sec_expire < ktime_get_real_seconds()) {
368                 adapt = 1;
369                 imp->imp_sec_expire = 0;
370         }
371         spin_unlock(&imp->imp_lock);
372
373         if (!adapt)
374                 return 0;
375
376         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
377         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
378 }
379
380 /**
381  * Get and validate the client side ptlrpc security facilities from
382  * \a imp. There is a race condition on client reconnect when the import is
383  * being destroyed while there are outstanding client bound requests. In
384  * this case do not output any error messages if import secuity is not
385  * found.
386  *
387  * \param[in] imp obd import associated with client
388  * \param[out] sec client side ptlrpc security
389  *
390  * \retval 0 if security retrieved successfully
391  * \retval -ve errno if there was a problem
392  */
393 static int import_sec_validate_get(struct obd_import *imp,
394                                    struct ptlrpc_sec **sec)
395 {
396         int     rc;
397
398         if (unlikely(imp->imp_sec_expire)) {
399                 rc = import_sec_check_expire(imp);
400                 if (rc)
401                         return rc;
402         }
403
404         *sec = sptlrpc_import_sec_ref(imp);
405         /* Only output an error when the import is still active */
406         if (*sec == NULL) {
407                 if (list_empty(&imp->imp_zombie_chain))
408                         CERROR("import %p (%s) with no sec\n",
409                                 imp, ptlrpc_import_state_name(imp->imp_state));
410                 return -EACCES;
411         }
412
413         if (unlikely((*sec)->ps_dying)) {
414                 CERROR("attempt to use dying sec %p\n", sec);
415                 sptlrpc_sec_put(*sec);
416                 return -EACCES;
417         }
418
419         return 0;
420 }
421
422 /**
423  * Given a \a req, find or allocate an appropriate context for it.
424  * \pre req->rq_cli_ctx == NULL.
425  *
426  * \retval 0 succeed, and req->rq_cli_ctx is set.
427  * \retval -ev error number, and req->rq_cli_ctx == NULL.
428  */
429 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
430 {
431         struct obd_import *imp = req->rq_import;
432         struct ptlrpc_sec *sec;
433         int                rc;
434         ENTRY;
435
436         LASSERT(!req->rq_cli_ctx);
437         LASSERT(imp);
438
439         rc = import_sec_validate_get(imp, &sec);
440         if (rc)
441                 RETURN(rc);
442
443         req->rq_cli_ctx = get_my_ctx(sec);
444
445         sptlrpc_sec_put(sec);
446
447         if (!req->rq_cli_ctx) {
448                 CERROR("req %p: fail to get context\n", req);
449                 RETURN(-ECONNREFUSED);
450         }
451
452         RETURN(0);
453 }
454
455 /**
456  * Drop the context for \a req.
457  * \pre req->rq_cli_ctx != NULL.
458  * \post req->rq_cli_ctx == NULL.
459  *
460  * If \a sync == 0, this function should return quickly without sleep;
461  * otherwise it might trigger and wait for the whole process of sending
462  * an context-destroying rpc to server.
463  */
464 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
465 {
466         ENTRY;
467
468         LASSERT(req);
469         LASSERT(req->rq_cli_ctx);
470
471         /* request might be asked to release earlier while still
472          * in the context waiting list.
473          */
474         if (!list_empty(&req->rq_ctx_chain)) {
475                 spin_lock(&req->rq_cli_ctx->cc_lock);
476                 list_del_init(&req->rq_ctx_chain);
477                 spin_unlock(&req->rq_cli_ctx->cc_lock);
478         }
479
480         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
481         req->rq_cli_ctx = NULL;
482         EXIT;
483 }
484
485 static
486 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
487                            struct ptlrpc_cli_ctx *oldctx,
488                            struct ptlrpc_cli_ctx *newctx)
489 {
490         struct sptlrpc_flavor   old_flvr;
491         char                   *reqmsg = NULL; /* to workaround old gcc */
492         int                     reqmsg_size;
493         int                     rc = 0;
494
495         LASSERT(req->rq_reqmsg);
496         LASSERT(req->rq_reqlen);
497         LASSERT(req->rq_replen);
498
499         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
500                "switch sec %p(%s) -> %p(%s)\n", req,
501                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
502                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
503                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
504                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
505
506         /* save flavor */
507         old_flvr = req->rq_flvr;
508
509         /* save request message */
510         reqmsg_size = req->rq_reqlen;
511         if (reqmsg_size != 0) {
512                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
513                 if (reqmsg == NULL)
514                         return -ENOMEM;
515                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
516         }
517
518         /* release old req/rep buf */
519         req->rq_cli_ctx = oldctx;
520         sptlrpc_cli_free_reqbuf(req);
521         sptlrpc_cli_free_repbuf(req);
522         req->rq_cli_ctx = newctx;
523
524         /* recalculate the flavor */
525         sptlrpc_req_set_flavor(req, 0);
526
527         /* alloc new request buffer
528          * we don't need to alloc reply buffer here, leave it to the
529          * rest procedure of ptlrpc */
530         if (reqmsg_size != 0) {
531                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
532                 if (!rc) {
533                         LASSERT(req->rq_reqmsg);
534                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
535                 } else {
536                         CWARN("failed to alloc reqbuf: %d\n", rc);
537                         req->rq_flvr = old_flvr;
538                 }
539
540                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
541         }
542         return rc;
543 }
544
545 /**
546  * If current context of \a req is dead somehow, e.g. we just switched flavor
547  * thus marked original contexts dead, we'll find a new context for it. if
548  * no switch is needed, \a req will end up with the same context.
549  *
550  * \note a request must have a context, to keep other parts of code happy.
551  * In any case of failure during the switching, we must restore the old one.
552  */
553 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
554 {
555         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
556         struct ptlrpc_cli_ctx *newctx;
557         int                    rc;
558         ENTRY;
559
560         LASSERT(oldctx);
561
562         sptlrpc_cli_ctx_get(oldctx);
563         sptlrpc_req_put_ctx(req, 0);
564
565         rc = sptlrpc_req_get_ctx(req);
566         if (unlikely(rc)) {
567                 LASSERT(!req->rq_cli_ctx);
568
569                 /* restore old ctx */
570                 req->rq_cli_ctx = oldctx;
571                 RETURN(rc);
572         }
573
574         newctx = req->rq_cli_ctx;
575         LASSERT(newctx);
576
577         if (unlikely(newctx == oldctx &&
578                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
579                 /*
580                  * still get the old dead ctx, usually means system too busy
581                  */
582                 CDEBUG(D_SEC,
583                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
584                        newctx, newctx->cc_flags);
585
586                 set_current_state(TASK_INTERRUPTIBLE);
587                 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
588         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
589                             == 0)) {
590                 /*
591                  * new ctx not up to date yet
592                  */
593                 CDEBUG(D_SEC,
594                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
595                        newctx, newctx->cc_flags);
596         } else {
597                 /*
598                  * it's possible newctx == oldctx if we're switching
599                  * subflavor with the same sec.
600                  */
601                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
602                 if (rc) {
603                         /* restore old ctx */
604                         sptlrpc_req_put_ctx(req, 0);
605                         req->rq_cli_ctx = oldctx;
606                         RETURN(rc);
607                 }
608
609                 LASSERT(req->rq_cli_ctx == newctx);
610         }
611
612         sptlrpc_cli_ctx_put(oldctx, 1);
613         RETURN(0);
614 }
615 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
616
617 static
618 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
619 {
620         if (cli_ctx_is_refreshed(ctx))
621                 return 1;
622         return 0;
623 }
624
625 static
626 int ctx_refresh_timeout(void *data)
627 {
628         struct ptlrpc_request *req = data;
629         int rc;
630
631         /* conn_cnt is needed in expire_one_request */
632         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
633
634         rc = ptlrpc_expire_one_request(req, 1);
635         /* if we started recovery, we should mark this ctx dead; otherwise
636          * in case of lgssd died nobody would retire this ctx, following
637          * connecting will still find the same ctx thus cause deadlock.
638          * there's an assumption that expire time of the request should be
639          * later than the context refresh expire time.
640          */
641         if (rc == 0)
642                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
643         return rc;
644 }
645
646 static
647 void ctx_refresh_interrupt(void *data)
648 {
649         struct ptlrpc_request *req = data;
650
651         spin_lock(&req->rq_lock);
652         req->rq_intr = 1;
653         spin_unlock(&req->rq_lock);
654 }
655
656 static
657 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
658 {
659         spin_lock(&ctx->cc_lock);
660         if (!list_empty(&req->rq_ctx_chain))
661                 list_del_init(&req->rq_ctx_chain);
662         spin_unlock(&ctx->cc_lock);
663 }
664
665 /**
666  * To refresh the context of \req, if it's not up-to-date.
667  * \param timeout
668  * - < 0: don't wait
669  * - = 0: wait until success or fatal error occur
670  * - > 0: timeout value (in seconds)
671  *
672  * The status of the context could be subject to be changed by other threads
673  * at any time. We allow this race, but once we return with 0, the caller will
674  * suppose it's uptodated and keep using it until the owning rpc is done.
675  *
676  * \retval 0 only if the context is uptodated.
677  * \retval -ev error number.
678  */
679 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
680 {
681         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
682         struct ptlrpc_sec      *sec;
683         struct l_wait_info      lwi;
684         int                     rc;
685         ENTRY;
686
687         LASSERT(ctx);
688
689         if (req->rq_ctx_init || req->rq_ctx_fini)
690                 RETURN(0);
691
692         /*
693          * during the process a request's context might change type even
694          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
695          * everything
696          */
697 again:
698         rc = import_sec_validate_get(req->rq_import, &sec);
699         if (rc)
700                 RETURN(rc);
701
702         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
703                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
704                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
705                 req_off_ctx_list(req, ctx);
706                 sptlrpc_req_replace_dead_ctx(req);
707                 ctx = req->rq_cli_ctx;
708         }
709         sptlrpc_sec_put(sec);
710
711         if (cli_ctx_is_eternal(ctx))
712                 RETURN(0);
713
714         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
715                 LASSERT(ctx->cc_ops->refresh);
716                 ctx->cc_ops->refresh(ctx);
717         }
718         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
719
720         LASSERT(ctx->cc_ops->validate);
721         if (ctx->cc_ops->validate(ctx) == 0) {
722                 req_off_ctx_list(req, ctx);
723                 RETURN(0);
724         }
725
726         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
727                 spin_lock(&req->rq_lock);
728                 req->rq_err = 1;
729                 spin_unlock(&req->rq_lock);
730                 req_off_ctx_list(req, ctx);
731                 RETURN(-EPERM);
732         }
733
734         /*
735          * There's a subtle issue for resending RPCs, suppose following
736          * situation:
737          *  1. the request was sent to server.
738          *  2. recovery was kicked start, after finished the request was
739          *     marked as resent.
740          *  3. resend the request.
741          *  4. old reply from server received, we accept and verify the reply.
742          *     this has to be success, otherwise the error will be aware
743          *     by application.
744          *  5. new reply from server received, dropped by LNet.
745          *
746          * Note the xid of old & new request is the same. We can't simply
747          * change xid for the resent request because the server replies on
748          * it for reply reconstruction.
749          *
750          * Commonly the original context should be uptodate because we
751          * have an expiry nice time; server will keep its context because
752          * we at least hold a ref of old context which prevent context
753          * from destroying RPC being sent. So server still can accept the
754          * request and finish the RPC. But if that's not the case:
755          *  1. If server side context has been trimmed, a NO_CONTEXT will
756          *     be returned, gss_cli_ctx_verify/unseal will switch to new
757          *     context by force.
758          *  2. Current context never be refreshed, then we are fine: we
759          *     never really send request with old context before.
760          */
761         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
762             unlikely(req->rq_reqmsg) &&
763             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
764                 req_off_ctx_list(req, ctx);
765                 RETURN(0);
766         }
767
768         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
769                 req_off_ctx_list(req, ctx);
770                 /*
771                  * don't switch ctx if import was deactivated
772                  */
773                 if (req->rq_import->imp_deactive) {
774                         spin_lock(&req->rq_lock);
775                         req->rq_err = 1;
776                         spin_unlock(&req->rq_lock);
777                         RETURN(-EINTR);
778                 }
779
780                 rc = sptlrpc_req_replace_dead_ctx(req);
781                 if (rc) {
782                         LASSERT(ctx == req->rq_cli_ctx);
783                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
784                                req, ctx, rc);
785                         spin_lock(&req->rq_lock);
786                         req->rq_err = 1;
787                         spin_unlock(&req->rq_lock);
788                         RETURN(rc);
789                 }
790
791                 ctx = req->rq_cli_ctx;
792                 goto again;
793         }
794
795         /*
796          * Now we're sure this context is during upcall, add myself into
797          * waiting list
798          */
799         spin_lock(&ctx->cc_lock);
800         if (list_empty(&req->rq_ctx_chain))
801                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
802         spin_unlock(&ctx->cc_lock);
803
804         if (timeout < 0)
805                 RETURN(-EWOULDBLOCK);
806
807         /* Clear any flags that may be present from previous sends */
808         LASSERT(req->rq_receiving_reply == 0);
809         spin_lock(&req->rq_lock);
810         req->rq_err = 0;
811         req->rq_timedout = 0;
812         req->rq_resend = 0;
813         req->rq_restart = 0;
814         spin_unlock(&req->rq_lock);
815
816         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
817                                ctx_refresh_timeout,
818                                ctx_refresh_interrupt, req);
819         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
820
821         /*
822          * following cases could lead us here:
823          * - successfully refreshed;
824          * - interrupted;
825          * - timedout, and we don't want recover from the failure;
826          * - timedout, and waked up upon recovery finished;
827          * - someone else mark this ctx dead by force;
828          * - someone invalidate the req and call ptlrpc_client_wake_req(),
829          *   e.g. ptlrpc_abort_inflight();
830          */
831         if (!cli_ctx_is_refreshed(ctx)) {
832                 /* timed out or interruptted */
833                 req_off_ctx_list(req, ctx);
834
835                 LASSERT(rc != 0);
836                 RETURN(rc);
837         }
838
839         goto again;
840 }
841
842 /**
843  * Initialize flavor settings for \a req, according to \a opcode.
844  *
845  * \note this could be called in two situations:
846  * - new request from ptlrpc_pre_req(), with proper @opcode
847  * - old request which changed ctx in the middle, with @opcode == 0
848  */
849 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
850 {
851         struct ptlrpc_sec *sec;
852
853         LASSERT(req->rq_import);
854         LASSERT(req->rq_cli_ctx);
855         LASSERT(req->rq_cli_ctx->cc_sec);
856         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
857
858         /* special security flags according to opcode */
859         switch (opcode) {
860         case OST_READ:
861         case MDS_READPAGE:
862         case MGS_CONFIG_READ:
863         case OBD_IDX_READ:
864                 req->rq_bulk_read = 1;
865                 break;
866         case OST_WRITE:
867         case MDS_WRITEPAGE:
868                 req->rq_bulk_write = 1;
869                 break;
870         case SEC_CTX_INIT:
871                 req->rq_ctx_init = 1;
872                 break;
873         case SEC_CTX_FINI:
874                 req->rq_ctx_fini = 1;
875                 break;
876         case 0:
877                 /* init/fini rpc won't be resend, so can't be here */
878                 LASSERT(req->rq_ctx_init == 0);
879                 LASSERT(req->rq_ctx_fini == 0);
880
881                 /* cleanup flags, which should be recalculated */
882                 req->rq_pack_udesc = 0;
883                 req->rq_pack_bulk = 0;
884                 break;
885         }
886
887         sec = req->rq_cli_ctx->cc_sec;
888
889         spin_lock(&sec->ps_lock);
890         req->rq_flvr = sec->ps_flvr;
891         spin_unlock(&sec->ps_lock);
892
893         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
894          * destruction rpc */
895         if (unlikely(req->rq_ctx_init))
896                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
897         else if (unlikely(req->rq_ctx_fini))
898                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
899
900         /* user descriptor flag, null security can't do it anyway */
901         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
902             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
903                 req->rq_pack_udesc = 1;
904
905         /* bulk security flag */
906         if ((req->rq_bulk_read || req->rq_bulk_write) &&
907             sptlrpc_flavor_has_bulk(&req->rq_flvr))
908                 req->rq_pack_bulk = 1;
909 }
910
911 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
912 {
913         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
914                 return;
915
916         LASSERT(req->rq_clrbuf);
917         if (req->rq_pool || !req->rq_reqbuf)
918                 return;
919
920         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
921         req->rq_reqbuf = NULL;
922         req->rq_reqbuf_len = 0;
923 }
924
925 /**
926  * Given an import \a imp, check whether current user has a valid context
927  * or not. We may create a new context and try to refresh it, and try
928  * repeatedly try in case of non-fatal errors. Return 0 means success.
929  */
930 int sptlrpc_import_check_ctx(struct obd_import *imp)
931 {
932         struct ptlrpc_sec     *sec;
933         struct ptlrpc_cli_ctx *ctx;
934         struct ptlrpc_request *req = NULL;
935         int rc;
936         ENTRY;
937
938         might_sleep();
939
940         sec = sptlrpc_import_sec_ref(imp);
941         ctx = get_my_ctx(sec);
942         sptlrpc_sec_put(sec);
943
944         if (!ctx)
945                 RETURN(-ENOMEM);
946
947         if (cli_ctx_is_eternal(ctx) ||
948             ctx->cc_ops->validate(ctx) == 0) {
949                 sptlrpc_cli_ctx_put(ctx, 1);
950                 RETURN(0);
951         }
952
953         if (cli_ctx_is_error(ctx)) {
954                 sptlrpc_cli_ctx_put(ctx, 1);
955                 RETURN(-EACCES);
956         }
957
958         req = ptlrpc_request_cache_alloc(GFP_NOFS);
959         if (!req)
960                 RETURN(-ENOMEM);
961
962         ptlrpc_cli_req_init(req);
963         atomic_set(&req->rq_refcount, 10000);
964
965         req->rq_import = imp;
966         req->rq_flvr = sec->ps_flvr;
967         req->rq_cli_ctx = ctx;
968
969         rc = sptlrpc_req_refresh_ctx(req, 0);
970         LASSERT(list_empty(&req->rq_ctx_chain));
971         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
972         ptlrpc_request_cache_free(req);
973
974         RETURN(rc);
975 }
976
977 /**
978  * Used by ptlrpc client, to perform the pre-defined security transformation
979  * upon the request message of \a req. After this function called,
980  * req->rq_reqmsg is still accessible as clear text.
981  */
982 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
983 {
984         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
985         int rc = 0;
986         ENTRY;
987
988         LASSERT(ctx);
989         LASSERT(ctx->cc_sec);
990         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
991
992         /* we wrap bulk request here because now we can be sure
993          * the context is uptodate.
994          */
995         if (req->rq_bulk) {
996                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
997                 if (rc)
998                         RETURN(rc);
999         }
1000
1001         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1002         case SPTLRPC_SVC_NULL:
1003         case SPTLRPC_SVC_AUTH:
1004         case SPTLRPC_SVC_INTG:
1005                 LASSERT(ctx->cc_ops->sign);
1006                 rc = ctx->cc_ops->sign(ctx, req);
1007                 break;
1008         case SPTLRPC_SVC_PRIV:
1009                 LASSERT(ctx->cc_ops->seal);
1010                 rc = ctx->cc_ops->seal(ctx, req);
1011                 break;
1012         default:
1013                 LBUG();
1014         }
1015
1016         if (rc == 0) {
1017                 LASSERT(req->rq_reqdata_len);
1018                 LASSERT(req->rq_reqdata_len % 8 == 0);
1019                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1020         }
1021
1022         RETURN(rc);
1023 }
1024
1025 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1026 {
1027         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1028         int                    rc;
1029         ENTRY;
1030
1031         LASSERT(ctx);
1032         LASSERT(ctx->cc_sec);
1033         LASSERT(req->rq_repbuf);
1034         LASSERT(req->rq_repdata);
1035         LASSERT(req->rq_repmsg == NULL);
1036
1037         req->rq_rep_swab_mask = 0;
1038
1039         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1040         switch (rc) {
1041         case 1:
1042                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1043         case 0:
1044                 break;
1045         default:
1046                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1047                 RETURN(-EPROTO);
1048         }
1049
1050         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1051                 CERROR("replied data length %d too small\n",
1052                        req->rq_repdata_len);
1053                 RETURN(-EPROTO);
1054         }
1055
1056         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1057             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1058                 CERROR("reply policy %u doesn't match request policy %u\n",
1059                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1060                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1061                 RETURN(-EPROTO);
1062         }
1063
1064         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1065         case SPTLRPC_SVC_NULL:
1066         case SPTLRPC_SVC_AUTH:
1067         case SPTLRPC_SVC_INTG:
1068                 LASSERT(ctx->cc_ops->verify);
1069                 rc = ctx->cc_ops->verify(ctx, req);
1070                 break;
1071         case SPTLRPC_SVC_PRIV:
1072                 LASSERT(ctx->cc_ops->unseal);
1073                 rc = ctx->cc_ops->unseal(ctx, req);
1074                 break;
1075         default:
1076                 LBUG();
1077         }
1078         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1079
1080         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1081             !req->rq_ctx_init)
1082                 req->rq_rep_swab_mask = 0;
1083         RETURN(rc);
1084 }
1085
1086 /**
1087  * Used by ptlrpc client, to perform security transformation upon the reply
1088  * message of \a req. After return successfully, req->rq_repmsg points to
1089  * the reply message in clear text.
1090  *
1091  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1092  * going to change.
1093  */
1094 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1095 {
1096         LASSERT(req->rq_repbuf);
1097         LASSERT(req->rq_repdata == NULL);
1098         LASSERT(req->rq_repmsg == NULL);
1099         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1100
1101         if (req->rq_reply_off == 0 &&
1102             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1103                 CERROR("real reply with offset 0\n");
1104                 return -EPROTO;
1105         }
1106
1107         if (req->rq_reply_off % 8 != 0) {
1108                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1109                 return -EPROTO;
1110         }
1111
1112         req->rq_repdata = (struct lustre_msg *)
1113                                 (req->rq_repbuf + req->rq_reply_off);
1114         req->rq_repdata_len = req->rq_nob_received;
1115
1116         return do_cli_unwrap_reply(req);
1117 }
1118
1119 /**
1120  * Used by ptlrpc client, to perform security transformation upon the early
1121  * reply message of \a req. We expect the rq_reply_off is 0, and
1122  * rq_nob_received is the early reply size.
1123  * 
1124  * Because the receive buffer might be still posted, the reply data might be
1125  * changed at any time, no matter we're holding rq_lock or not. For this reason
1126  * we allocate a separate ptlrpc_request and reply buffer for early reply
1127  * processing.
1128  *
1129  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1130  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1131  * \a *req_ret to release it.
1132  * \retval -ev error number, and \a req_ret will not be set.
1133  */
1134 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1135                                    struct ptlrpc_request **req_ret)
1136 {
1137         struct ptlrpc_request  *early_req;
1138         char                   *early_buf;
1139         int                     early_bufsz, early_size;
1140         int                     rc;
1141         ENTRY;
1142
1143         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1144         if (early_req == NULL)
1145                 RETURN(-ENOMEM);
1146
1147         ptlrpc_cli_req_init(early_req);
1148
1149         early_size = req->rq_nob_received;
1150         early_bufsz = size_roundup_power2(early_size);
1151         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1152         if (early_buf == NULL)
1153                 GOTO(err_req, rc = -ENOMEM);
1154
1155         /* sanity checkings and copy data out, do it inside spinlock */
1156         spin_lock(&req->rq_lock);
1157
1158         if (req->rq_replied) {
1159                 spin_unlock(&req->rq_lock);
1160                 GOTO(err_buf, rc = -EALREADY);
1161         }
1162
1163         LASSERT(req->rq_repbuf);
1164         LASSERT(req->rq_repdata == NULL);
1165         LASSERT(req->rq_repmsg == NULL);
1166
1167         if (req->rq_reply_off != 0) {
1168                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1169                 spin_unlock(&req->rq_lock);
1170                 GOTO(err_buf, rc = -EPROTO);
1171         }
1172
1173         if (req->rq_nob_received != early_size) {
1174                 /* even another early arrived the size should be the same */
1175                 CERROR("data size has changed from %u to %u\n",
1176                        early_size, req->rq_nob_received);
1177                 spin_unlock(&req->rq_lock);
1178                 GOTO(err_buf, rc = -EINVAL);
1179         }
1180
1181         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1182                 CERROR("early reply length %d too small\n",
1183                        req->rq_nob_received);
1184                 spin_unlock(&req->rq_lock);
1185                 GOTO(err_buf, rc = -EALREADY);
1186         }
1187
1188         memcpy(early_buf, req->rq_repbuf, early_size);
1189         spin_unlock(&req->rq_lock);
1190
1191         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1192         early_req->rq_flvr = req->rq_flvr;
1193         early_req->rq_repbuf = early_buf;
1194         early_req->rq_repbuf_len = early_bufsz;
1195         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1196         early_req->rq_repdata_len = early_size;
1197         early_req->rq_early = 1;
1198         early_req->rq_reqmsg = req->rq_reqmsg;
1199
1200         rc = do_cli_unwrap_reply(early_req);
1201         if (rc) {
1202                 DEBUG_REQ(D_ADAPTTO, early_req,
1203                           "error %d unwrap early reply", rc);
1204                 GOTO(err_ctx, rc);
1205         }
1206
1207         LASSERT(early_req->rq_repmsg);
1208         *req_ret = early_req;
1209         RETURN(0);
1210
1211 err_ctx:
1212         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1213 err_buf:
1214         OBD_FREE_LARGE(early_buf, early_bufsz);
1215 err_req:
1216         ptlrpc_request_cache_free(early_req);
1217         RETURN(rc);
1218 }
1219
1220 /**
1221  * Used by ptlrpc client, to release a processed early reply \a early_req.
1222  *
1223  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1224  */
1225 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1226 {
1227         LASSERT(early_req->rq_repbuf);
1228         LASSERT(early_req->rq_repdata);
1229         LASSERT(early_req->rq_repmsg);
1230
1231         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1232         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1233         ptlrpc_request_cache_free(early_req);
1234 }
1235
1236 /**************************************************
1237  * sec ID                                         *
1238  **************************************************/
1239
1240 /*
1241  * "fixed" sec (e.g. null) use sec_id < 0
1242  */
1243 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1244
1245 int sptlrpc_get_next_secid(void)
1246 {
1247         return atomic_inc_return(&sptlrpc_sec_id);
1248 }
1249 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1250
1251 /**************************************************
1252  * client side high-level security APIs           *
1253  **************************************************/
1254
1255 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1256                                    int grace, int force)
1257 {
1258         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1259
1260         LASSERT(policy->sp_cops);
1261         LASSERT(policy->sp_cops->flush_ctx_cache);
1262
1263         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1264 }
1265
1266 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1267 {
1268         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1269
1270         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1271         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1272         LASSERT(policy->sp_cops->destroy_sec);
1273
1274         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1275
1276         policy->sp_cops->destroy_sec(sec);
1277         sptlrpc_policy_put(policy);
1278 }
1279
1280 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1281 {
1282         sec_cop_destroy_sec(sec);
1283 }
1284 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1285
1286 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1287 {
1288         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1289
1290         if (sec->ps_policy->sp_cops->kill_sec) {
1291                 sec->ps_policy->sp_cops->kill_sec(sec);
1292
1293                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1294         }
1295 }
1296
1297 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1298 {
1299         if (sec)
1300                 atomic_inc(&sec->ps_refcount);
1301
1302         return sec;
1303 }
1304 EXPORT_SYMBOL(sptlrpc_sec_get);
1305
1306 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1307 {
1308         if (sec) {
1309                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1310
1311                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1312                         sptlrpc_gc_del_sec(sec);
1313                         sec_cop_destroy_sec(sec);
1314                 }
1315         }
1316 }
1317 EXPORT_SYMBOL(sptlrpc_sec_put);
1318
1319 /*
1320  * policy module is responsible for taking refrence of import
1321  */
1322 static
1323 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1324                                        struct ptlrpc_svc_ctx *svc_ctx,
1325                                        struct sptlrpc_flavor *sf,
1326                                        enum lustre_sec_part sp)
1327 {
1328         struct ptlrpc_sec_policy *policy;
1329         struct ptlrpc_sec        *sec;
1330         char                      str[32];
1331         ENTRY;
1332
1333         if (svc_ctx) {
1334                 LASSERT(imp->imp_dlm_fake == 1);
1335
1336                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1337                        imp->imp_obd->obd_type->typ_name,
1338                        imp->imp_obd->obd_name,
1339                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1340
1341                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1342                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1343         } else {
1344                 LASSERT(imp->imp_dlm_fake == 0);
1345
1346                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1347                        imp->imp_obd->obd_type->typ_name,
1348                        imp->imp_obd->obd_name,
1349                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1350
1351                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1352                 if (!policy) {
1353                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1354                         RETURN(NULL);
1355                 }
1356         }
1357
1358         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1359         if (sec) {
1360                 atomic_inc(&sec->ps_refcount);
1361
1362                 sec->ps_part = sp;
1363
1364                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1365                         sptlrpc_gc_add_sec(sec);
1366         } else {
1367                 sptlrpc_policy_put(policy);
1368         }
1369
1370         RETURN(sec);
1371 }
1372
1373 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1374 {
1375         struct ptlrpc_sec *sec;
1376
1377         spin_lock(&imp->imp_lock);
1378         sec = sptlrpc_sec_get(imp->imp_sec);
1379         spin_unlock(&imp->imp_lock);
1380
1381         return sec;
1382 }
1383 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1384
1385 static void sptlrpc_import_sec_install(struct obd_import *imp,
1386                                        struct ptlrpc_sec *sec)
1387 {
1388         struct ptlrpc_sec *old_sec;
1389
1390         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1391
1392         spin_lock(&imp->imp_lock);
1393         old_sec = imp->imp_sec;
1394         imp->imp_sec = sec;
1395         spin_unlock(&imp->imp_lock);
1396
1397         if (old_sec) {
1398                 sptlrpc_sec_kill(old_sec);
1399
1400                 /* balance the ref taken by this import */
1401                 sptlrpc_sec_put(old_sec);
1402         }
1403 }
1404
1405 static inline
1406 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1407 {
1408         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1409 }
1410
1411 static inline
1412 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1413 {
1414         *dst = *src;
1415 }
1416
1417 /**
1418  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1419  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1420  *
1421  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1422  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1423  */
1424 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1425                              struct ptlrpc_svc_ctx *svc_ctx,
1426                              struct sptlrpc_flavor *flvr)
1427 {
1428         struct ptlrpc_connection   *conn;
1429         struct sptlrpc_flavor       sf;
1430         struct ptlrpc_sec          *sec, *newsec;
1431         enum lustre_sec_part        sp;
1432         char                        str[24];
1433         int                         rc = 0;
1434         ENTRY;
1435
1436         might_sleep();
1437
1438         if (imp == NULL)
1439                 RETURN(0);
1440
1441         conn = imp->imp_connection;
1442
1443         if (svc_ctx == NULL) {
1444                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1445                 /*
1446                  * normal import, determine flavor from rule set, except
1447                  * for mgc the flavor is predetermined.
1448                  */
1449                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1450                         sf = cliobd->cl_flvr_mgc;
1451                 else 
1452                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1453                                                    cliobd->cl_sp_to,
1454                                                    &cliobd->cl_target_uuid,
1455                                                    conn->c_self, &sf);
1456
1457                 sp = imp->imp_obd->u.cli.cl_sp_me;
1458         } else {
1459                 /* reverse import, determine flavor from incoming reqeust */
1460                 sf = *flvr;
1461
1462                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1463                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1464                                       PTLRPC_SEC_FL_ROOTONLY;
1465
1466                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1467         }
1468
1469         sec = sptlrpc_import_sec_ref(imp);
1470         if (sec) {
1471                 char    str2[24];
1472
1473                 if (flavor_equal(&sf, &sec->ps_flvr))
1474                         GOTO(out, rc);
1475
1476                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1477                        imp->imp_obd->obd_name,
1478                        obd_uuid2str(&conn->c_remote_uuid),
1479                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1480                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1481         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1482                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1483                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1484                        imp->imp_obd->obd_name,
1485                        obd_uuid2str(&conn->c_remote_uuid),
1486                        LNET_NIDNET(conn->c_self),
1487                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1488         }
1489
1490         mutex_lock(&imp->imp_sec_mutex);
1491
1492         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1493         if (newsec) {
1494                 sptlrpc_import_sec_install(imp, newsec);
1495         } else {
1496                 CERROR("import %s->%s: failed to create new sec\n",
1497                        imp->imp_obd->obd_name,
1498                        obd_uuid2str(&conn->c_remote_uuid));
1499                 rc = -EPERM;
1500         }
1501
1502         mutex_unlock(&imp->imp_sec_mutex);
1503 out:
1504         sptlrpc_sec_put(sec);
1505         RETURN(rc);
1506 }
1507
1508 void sptlrpc_import_sec_put(struct obd_import *imp)
1509 {
1510         if (imp->imp_sec) {
1511                 sptlrpc_sec_kill(imp->imp_sec);
1512
1513                 sptlrpc_sec_put(imp->imp_sec);
1514                 imp->imp_sec = NULL;
1515         }
1516 }
1517
1518 static void import_flush_ctx_common(struct obd_import *imp,
1519                                     uid_t uid, int grace, int force)
1520 {
1521         struct ptlrpc_sec *sec;
1522
1523         if (imp == NULL)
1524                 return;
1525
1526         sec = sptlrpc_import_sec_ref(imp);
1527         if (sec == NULL)
1528                 return;
1529
1530         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1531         sptlrpc_sec_put(sec);
1532 }
1533
1534 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1535 {
1536         /* it's important to use grace mode, see explain in
1537          * sptlrpc_req_refresh_ctx() */
1538         import_flush_ctx_common(imp, 0, 1, 1);
1539 }
1540
1541 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1542 {
1543         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1544                                 1, 1);
1545 }
1546 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1547
1548 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1549 {
1550         import_flush_ctx_common(imp, -1, 1, 1);
1551 }
1552 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1553
1554 /**
1555  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1556  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1557  */
1558 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1559 {
1560         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1561         struct ptlrpc_sec_policy *policy;
1562         int rc;
1563
1564         LASSERT(ctx);
1565         LASSERT(ctx->cc_sec);
1566         LASSERT(ctx->cc_sec->ps_policy);
1567         LASSERT(req->rq_reqmsg == NULL);
1568         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1569
1570         policy = ctx->cc_sec->ps_policy;
1571         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1572         if (!rc) {
1573                 LASSERT(req->rq_reqmsg);
1574                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1575
1576                 /* zeroing preallocated buffer */
1577                 if (req->rq_pool)
1578                         memset(req->rq_reqmsg, 0, msgsize);
1579         }
1580
1581         return rc;
1582 }
1583
1584 /**
1585  * Used by ptlrpc client to free request buffer of \a req. After this
1586  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1587  */
1588 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1589 {
1590         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1591         struct ptlrpc_sec_policy *policy;
1592
1593         LASSERT(ctx);
1594         LASSERT(ctx->cc_sec);
1595         LASSERT(ctx->cc_sec->ps_policy);
1596         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1597
1598         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1599                 return;
1600
1601         policy = ctx->cc_sec->ps_policy;
1602         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1603         req->rq_reqmsg = NULL;
1604 }
1605
1606 /*
1607  * NOTE caller must guarantee the buffer size is enough for the enlargement
1608  */
1609 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1610                                   int segment, int newsize)
1611 {
1612         void   *src, *dst;
1613         int     oldsize, oldmsg_size, movesize;
1614
1615         LASSERT(segment < msg->lm_bufcount);
1616         LASSERT(msg->lm_buflens[segment] <= newsize);
1617
1618         if (msg->lm_buflens[segment] == newsize)
1619                 return;
1620
1621         /* nothing to do if we are enlarging the last segment */
1622         if (segment == msg->lm_bufcount - 1) {
1623                 msg->lm_buflens[segment] = newsize;
1624                 return;
1625         }
1626
1627         oldsize = msg->lm_buflens[segment];
1628
1629         src = lustre_msg_buf(msg, segment + 1, 0);
1630         msg->lm_buflens[segment] = newsize;
1631         dst = lustre_msg_buf(msg, segment + 1, 0);
1632         msg->lm_buflens[segment] = oldsize;
1633
1634         /* move from segment + 1 to end segment */
1635         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1636         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1637         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1638         LASSERT(movesize >= 0);
1639
1640         if (movesize)
1641                 memmove(dst, src, movesize);
1642
1643         /* note we don't clear the ares where old data live, not secret */
1644
1645         /* finally set new segment size */
1646         msg->lm_buflens[segment] = newsize;
1647 }
1648 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1649
1650 /**
1651  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1652  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1653  * preserved after the enlargement. this must be called after original request
1654  * buffer being allocated.
1655  *
1656  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1657  * so caller should refresh its local pointers if needed.
1658  */
1659 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1660                                const struct req_msg_field *field,
1661                                int newsize)
1662 {
1663         struct req_capsule *pill = &req->rq_pill;
1664         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1665         struct ptlrpc_sec_cops *cops;
1666         struct lustre_msg *msg = req->rq_reqmsg;
1667         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1668
1669         LASSERT(ctx);
1670         LASSERT(msg);
1671         LASSERT(msg->lm_bufcount > segment);
1672         LASSERT(msg->lm_buflens[segment] <= newsize);
1673
1674         if (msg->lm_buflens[segment] == newsize)
1675                 return 0;
1676
1677         cops = ctx->cc_sec->ps_policy->sp_cops;
1678         LASSERT(cops->enlarge_reqbuf);
1679         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1680 }
1681 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1682
1683 /**
1684  * Used by ptlrpc client to allocate reply buffer of \a req.
1685  *
1686  * \note After this, req->rq_repmsg is still not accessible.
1687  */
1688 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1689 {
1690         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1691         struct ptlrpc_sec_policy *policy;
1692         ENTRY;
1693
1694         LASSERT(ctx);
1695         LASSERT(ctx->cc_sec);
1696         LASSERT(ctx->cc_sec->ps_policy);
1697
1698         if (req->rq_repbuf)
1699                 RETURN(0);
1700
1701         policy = ctx->cc_sec->ps_policy;
1702         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1703 }
1704
1705 /**
1706  * Used by ptlrpc client to free reply buffer of \a req. After this
1707  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1708  */
1709 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1710 {
1711         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1712         struct ptlrpc_sec_policy *policy;
1713         ENTRY;
1714
1715         LASSERT(ctx);
1716         LASSERT(ctx->cc_sec);
1717         LASSERT(ctx->cc_sec->ps_policy);
1718         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1719
1720         if (req->rq_repbuf == NULL)
1721                 return;
1722         LASSERT(req->rq_repbuf_len);
1723
1724         policy = ctx->cc_sec->ps_policy;
1725         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1726         req->rq_repmsg = NULL;
1727         EXIT;
1728 }
1729
1730 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1731                                 struct ptlrpc_cli_ctx *ctx)
1732 {
1733         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1734
1735         if (!policy->sp_cops->install_rctx)
1736                 return 0;
1737         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1738 }
1739
1740 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1741                                 struct ptlrpc_svc_ctx *ctx)
1742 {
1743         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1744
1745         if (!policy->sp_sops->install_rctx)
1746                 return 0;
1747         return policy->sp_sops->install_rctx(imp, ctx);
1748 }
1749
1750 /****************************************
1751  * server side security                 *
1752  ****************************************/
1753
1754 static int flavor_allowed(struct sptlrpc_flavor *exp,
1755                           struct ptlrpc_request *req)
1756 {
1757         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1758
1759         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1760                 return 1;
1761
1762         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1763             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1764             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1765             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1766                 return 1;
1767
1768         return 0;
1769 }
1770
1771 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1772
1773 /**
1774  * Given an export \a exp, check whether the flavor of incoming \a req
1775  * is allowed by the export \a exp. Main logic is about taking care of
1776  * changing configurations. Return 0 means success.
1777  */
1778 int sptlrpc_target_export_check(struct obd_export *exp,
1779                                 struct ptlrpc_request *req)
1780 {
1781         struct sptlrpc_flavor   flavor;
1782
1783         if (exp == NULL)
1784                 return 0;
1785
1786         /* client side export has no imp_reverse, skip
1787          * FIXME maybe we should check flavor this as well??? */
1788         if (exp->exp_imp_reverse == NULL)
1789                 return 0;
1790
1791         /* don't care about ctx fini rpc */
1792         if (req->rq_ctx_fini)
1793                 return 0;
1794
1795         spin_lock(&exp->exp_lock);
1796
1797         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1798          * the first req with the new flavor, then treat it as current flavor,
1799          * adapt reverse sec according to it.
1800          * note the first rpc with new flavor might not be with root ctx, in
1801          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1802         if (unlikely(exp->exp_flvr_changed) &&
1803             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1804                 /* make the new flavor as "current", and old ones as
1805                  * about-to-expire */
1806                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1807                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1808                 flavor = exp->exp_flvr_old[1];
1809                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1810                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1811                 exp->exp_flvr_old[0] = exp->exp_flvr;
1812                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1813                                           EXP_FLVR_UPDATE_EXPIRE;
1814                 exp->exp_flvr = flavor;
1815
1816                 /* flavor change finished */
1817                 exp->exp_flvr_changed = 0;
1818                 LASSERT(exp->exp_flvr_adapt == 1);
1819
1820                 /* if it's gss, we only interested in root ctx init */
1821                 if (req->rq_auth_gss &&
1822                     !(req->rq_ctx_init &&
1823                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1824                        req->rq_auth_usr_ost))) {
1825                         spin_unlock(&exp->exp_lock);
1826                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1827                                req->rq_auth_gss, req->rq_ctx_init,
1828                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1829                                req->rq_auth_usr_ost);
1830                         return 0;
1831                 }
1832
1833                 exp->exp_flvr_adapt = 0;
1834                 spin_unlock(&exp->exp_lock);
1835
1836                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1837                                                 req->rq_svc_ctx, &flavor);
1838         }
1839
1840         /* if it equals to the current flavor, we accept it, but need to
1841          * dealing with reverse sec/ctx */
1842         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1843                 /* most cases should return here, we only interested in
1844                  * gss root ctx init */
1845                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1846                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1847                      !req->rq_auth_usr_ost)) {
1848                         spin_unlock(&exp->exp_lock);
1849                         return 0;
1850                 }
1851
1852                 /* if flavor just changed, we should not proceed, just leave
1853                  * it and current flavor will be discovered and replaced
1854                  * shortly, and let _this_ rpc pass through */
1855                 if (exp->exp_flvr_changed) {
1856                         LASSERT(exp->exp_flvr_adapt);
1857                         spin_unlock(&exp->exp_lock);
1858                         return 0;
1859                 }
1860
1861                 if (exp->exp_flvr_adapt) {
1862                         exp->exp_flvr_adapt = 0;
1863                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1864                                exp, exp->exp_flvr.sf_rpc,
1865                                exp->exp_flvr_old[0].sf_rpc,
1866                                exp->exp_flvr_old[1].sf_rpc);
1867                         flavor = exp->exp_flvr;
1868                         spin_unlock(&exp->exp_lock);
1869
1870                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1871                                                         req->rq_svc_ctx,
1872                                                         &flavor);
1873                 } else {
1874                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1875                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1876                                exp->exp_flvr_old[0].sf_rpc,
1877                                exp->exp_flvr_old[1].sf_rpc);
1878                         spin_unlock(&exp->exp_lock);
1879
1880                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1881                                                            req->rq_svc_ctx);
1882                 }
1883         }
1884
1885         if (exp->exp_flvr_expire[0]) {
1886                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
1887                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1888                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
1889                                        exp->exp_flvr.sf_rpc,
1890                                        exp->exp_flvr_old[0].sf_rpc,
1891                                        exp->exp_flvr_old[1].sf_rpc,
1892                                        (s64)(exp->exp_flvr_expire[0] -
1893                                        ktime_get_real_seconds()));
1894                                 spin_unlock(&exp->exp_lock);
1895                                 return 0;
1896                         }
1897                 } else {
1898                         CDEBUG(D_SEC, "mark middle expired\n");
1899                         exp->exp_flvr_expire[0] = 0;
1900                 }
1901                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1902                        exp->exp_flvr.sf_rpc,
1903                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1904                        req->rq_flvr.sf_rpc);
1905         }
1906
1907         /* now it doesn't match the current flavor, the only chance we can
1908          * accept it is match the old flavors which is not expired. */
1909         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1910                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
1911                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1912                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
1913                                        exp,
1914                                        exp->exp_flvr.sf_rpc,
1915                                        exp->exp_flvr_old[0].sf_rpc,
1916                                        exp->exp_flvr_old[1].sf_rpc,
1917                                        (s64)(exp->exp_flvr_expire[1] -
1918                                        ktime_get_real_seconds()));
1919                                 spin_unlock(&exp->exp_lock);
1920                                 return 0;
1921                         }
1922                 } else {
1923                         CDEBUG(D_SEC, "mark oldest expired\n");
1924                         exp->exp_flvr_expire[1] = 0;
1925                 }
1926                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1927                        exp, exp->exp_flvr.sf_rpc,
1928                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1929                        req->rq_flvr.sf_rpc);
1930         } else {
1931                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1932                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1933                        exp->exp_flvr_old[1].sf_rpc);
1934         }
1935
1936         spin_unlock(&exp->exp_lock);
1937
1938         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
1939               exp, exp->exp_obd->obd_name,
1940               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1941               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1942               req->rq_flvr.sf_rpc,
1943               exp->exp_flvr.sf_rpc,
1944               exp->exp_flvr_old[0].sf_rpc,
1945               exp->exp_flvr_expire[0] ?
1946               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
1947               exp->exp_flvr_old[1].sf_rpc,
1948               exp->exp_flvr_expire[1] ?
1949               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
1950         return -EACCES;
1951 }
1952 EXPORT_SYMBOL(sptlrpc_target_export_check);
1953
1954 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1955                                       struct sptlrpc_rule_set *rset)
1956 {
1957         struct obd_export       *exp;
1958         struct sptlrpc_flavor    new_flvr;
1959
1960         LASSERT(obd);
1961
1962         spin_lock(&obd->obd_dev_lock);
1963
1964         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1965                 if (exp->exp_connection == NULL)
1966                         continue;
1967
1968                 /* note if this export had just been updated flavor
1969                  * (exp_flvr_changed == 1), this will override the
1970                  * previous one. */
1971                 spin_lock(&exp->exp_lock);
1972                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1973                                              exp->exp_connection->c_peer.nid,
1974                                              &new_flvr);
1975                 if (exp->exp_flvr_changed ||
1976                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1977                         exp->exp_flvr_old[1] = new_flvr;
1978                         exp->exp_flvr_expire[1] = 0;
1979                         exp->exp_flvr_changed = 1;
1980                         exp->exp_flvr_adapt = 1;
1981
1982                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1983                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1984                                exp->exp_flvr.sf_rpc,
1985                                exp->exp_flvr_old[1].sf_rpc);
1986                 }
1987                 spin_unlock(&exp->exp_lock);
1988         }
1989
1990         spin_unlock(&obd->obd_dev_lock);
1991 }
1992 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1993
1994 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1995 {
1996         /* peer's claim is unreliable unless gss is being used */
1997         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1998                 return svc_rc;
1999
2000         switch (req->rq_sp_from) {
2001         case LUSTRE_SP_CLI:
2002                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2003                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2004                         svc_rc = SECSVC_DROP;
2005                 }
2006                 break;
2007         case LUSTRE_SP_MDT:
2008                 if (!req->rq_auth_usr_mdt) {
2009                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2010                         svc_rc = SECSVC_DROP;
2011                 }
2012                 break;
2013         case LUSTRE_SP_OST:
2014                 if (!req->rq_auth_usr_ost) {
2015                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2016                         svc_rc = SECSVC_DROP;
2017                 }
2018                 break;
2019         case LUSTRE_SP_MGS:
2020         case LUSTRE_SP_MGC:
2021                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2022                     !req->rq_auth_usr_ost) {
2023                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2024                         svc_rc = SECSVC_DROP;
2025                 }
2026                 break;
2027         case LUSTRE_SP_ANY:
2028         default:
2029                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2030                 svc_rc = SECSVC_DROP;
2031         }
2032
2033         return svc_rc;
2034 }
2035
2036 /**
2037  * Used by ptlrpc server, to perform transformation upon request message of
2038  * incoming \a req. This must be the first thing to do with an incoming
2039  * request in ptlrpc layer.
2040  *
2041  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2042  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2043  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2044  * reply message has been prepared.
2045  * \retval SECSVC_DROP failed, this request should be dropped.
2046  */
2047 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2048 {
2049         struct ptlrpc_sec_policy *policy;
2050         struct lustre_msg        *msg = req->rq_reqbuf;
2051         int                       rc;
2052         ENTRY;
2053
2054         LASSERT(msg);
2055         LASSERT(req->rq_reqmsg == NULL);
2056         LASSERT(req->rq_repmsg == NULL);
2057         LASSERT(req->rq_svc_ctx == NULL);
2058
2059         req->rq_req_swab_mask = 0;
2060
2061         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2062         switch (rc) {
2063         case 1:
2064                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2065         case 0:
2066                 break;
2067         default:
2068                 CERROR("error unpacking request from %s x%llu\n",
2069                        libcfs_id2str(req->rq_peer), req->rq_xid);
2070                 RETURN(SECSVC_DROP);
2071         }
2072
2073         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2074         req->rq_sp_from = LUSTRE_SP_ANY;
2075         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2076         req->rq_auth_mapped_uid = -1;
2077
2078         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2079         if (!policy) {
2080                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2081                 RETURN(SECSVC_DROP);
2082         }
2083
2084         LASSERT(policy->sp_sops->accept);
2085         rc = policy->sp_sops->accept(req);
2086         sptlrpc_policy_put(policy);
2087         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2088         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2089
2090         /*
2091          * if it's not null flavor (which means embedded packing msg),
2092          * reset the swab mask for the comming inner msg unpacking.
2093          */
2094         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2095                 req->rq_req_swab_mask = 0;
2096
2097         /* sanity check for the request source */
2098         rc = sptlrpc_svc_check_from(req, rc);
2099         RETURN(rc);
2100 }
2101
2102 /**
2103  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2104  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2105  * a buffer of \a msglen size.
2106  */
2107 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2108 {
2109         struct ptlrpc_sec_policy *policy;
2110         struct ptlrpc_reply_state *rs;
2111         int rc;
2112         ENTRY;
2113
2114         LASSERT(req->rq_svc_ctx);
2115         LASSERT(req->rq_svc_ctx->sc_policy);
2116
2117         policy = req->rq_svc_ctx->sc_policy;
2118         LASSERT(policy->sp_sops->alloc_rs);
2119
2120         rc = policy->sp_sops->alloc_rs(req, msglen);
2121         if (unlikely(rc == -ENOMEM)) {
2122                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2123                 if (svcpt->scp_service->srv_max_reply_size <
2124                    msglen + sizeof(struct ptlrpc_reply_state)) {
2125                         /* Just return failure if the size is too big */
2126                         CERROR("size of message is too big (%zd), %d allowed\n",
2127                                 msglen + sizeof(struct ptlrpc_reply_state),
2128                                 svcpt->scp_service->srv_max_reply_size);
2129                         RETURN(-ENOMEM);
2130                 }
2131
2132                 /* failed alloc, try emergency pool */
2133                 rs = lustre_get_emerg_rs(svcpt);
2134                 if (rs == NULL)
2135                         RETURN(-ENOMEM);
2136
2137                 req->rq_reply_state = rs;
2138                 rc = policy->sp_sops->alloc_rs(req, msglen);
2139                 if (rc) {
2140                         lustre_put_emerg_rs(rs);
2141                         req->rq_reply_state = NULL;
2142                 }
2143         }
2144
2145         LASSERT(rc != 0 ||
2146                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2147
2148         RETURN(rc);
2149 }
2150
2151 /**
2152  * Used by ptlrpc server, to perform transformation upon reply message.
2153  *
2154  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2155  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2156  */
2157 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2158 {
2159         struct ptlrpc_sec_policy *policy;
2160         int rc;
2161         ENTRY;
2162
2163         LASSERT(req->rq_svc_ctx);
2164         LASSERT(req->rq_svc_ctx->sc_policy);
2165
2166         policy = req->rq_svc_ctx->sc_policy;
2167         LASSERT(policy->sp_sops->authorize);
2168
2169         rc = policy->sp_sops->authorize(req);
2170         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2171
2172         RETURN(rc);
2173 }
2174
2175 /**
2176  * Used by ptlrpc server, to free reply_state.
2177  */
2178 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2179 {
2180         struct ptlrpc_sec_policy *policy;
2181         unsigned int prealloc;
2182         ENTRY;
2183
2184         LASSERT(rs->rs_svc_ctx);
2185         LASSERT(rs->rs_svc_ctx->sc_policy);
2186
2187         policy = rs->rs_svc_ctx->sc_policy;
2188         LASSERT(policy->sp_sops->free_rs);
2189
2190         prealloc = rs->rs_prealloc;
2191         policy->sp_sops->free_rs(rs);
2192
2193         if (prealloc)
2194                 lustre_put_emerg_rs(rs);
2195         EXIT;
2196 }
2197
2198 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2199 {
2200         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2201
2202         if (ctx != NULL)
2203                 atomic_inc(&ctx->sc_refcount);
2204 }
2205
2206 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2207 {
2208         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2209
2210         if (ctx == NULL)
2211                 return;
2212
2213         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2214         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2215                 if (ctx->sc_policy->sp_sops->free_ctx)
2216                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2217         }
2218         req->rq_svc_ctx = NULL;
2219 }
2220
2221 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2222 {
2223         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2224
2225         if (ctx == NULL)
2226                 return;
2227
2228         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2229         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2230                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2231 }
2232 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2233
2234 /****************************************
2235  * bulk security                        *
2236  ****************************************/
2237
2238 /**
2239  * Perform transformation upon bulk data pointed by \a desc. This is called
2240  * before transforming the request message.
2241  */
2242 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2243                           struct ptlrpc_bulk_desc *desc)
2244 {
2245         struct ptlrpc_cli_ctx *ctx;
2246
2247         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2248
2249         if (!req->rq_pack_bulk)
2250                 return 0;
2251
2252         ctx = req->rq_cli_ctx;
2253         if (ctx->cc_ops->wrap_bulk)
2254                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2255         return 0;
2256 }
2257 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2258
2259 /**
2260  * This is called after unwrap the reply message.
2261  * return nob of actual plain text size received, or error code.
2262  */
2263 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2264                                  struct ptlrpc_bulk_desc *desc,
2265                                  int nob)
2266 {
2267         struct ptlrpc_cli_ctx  *ctx;
2268         int                     rc;
2269
2270         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2271
2272         if (!req->rq_pack_bulk)
2273                 return desc->bd_nob_transferred;
2274
2275         ctx = req->rq_cli_ctx;
2276         if (ctx->cc_ops->unwrap_bulk) {
2277                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2278                 if (rc < 0)
2279                         return rc;
2280         }
2281         return desc->bd_nob_transferred;
2282 }
2283 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2284
2285 /**
2286  * This is called after unwrap the reply message.
2287  * return 0 for success or error code.
2288  */
2289 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2290                                   struct ptlrpc_bulk_desc *desc)
2291 {
2292         struct ptlrpc_cli_ctx  *ctx;
2293         int                     rc;
2294
2295         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2296
2297         if (!req->rq_pack_bulk)
2298                 return 0;
2299
2300         ctx = req->rq_cli_ctx;
2301         if (ctx->cc_ops->unwrap_bulk) {
2302                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2303                 if (rc < 0)
2304                         return rc;
2305         }
2306
2307         /*
2308          * if everything is going right, nob should equals to nob_transferred.
2309          * in case of privacy mode, nob_transferred needs to be adjusted.
2310          */
2311         if (desc->bd_nob != desc->bd_nob_transferred) {
2312                 CERROR("nob %d doesn't match transferred nob %d\n",
2313                        desc->bd_nob, desc->bd_nob_transferred);
2314                 return -EPROTO;
2315         }
2316
2317         return 0;
2318 }
2319 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2320
2321 #ifdef HAVE_SERVER_SUPPORT
2322 /**
2323  * Performe transformation upon outgoing bulk read.
2324  */
2325 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2326                           struct ptlrpc_bulk_desc *desc)
2327 {
2328         struct ptlrpc_svc_ctx *ctx;
2329
2330         LASSERT(req->rq_bulk_read);
2331
2332         if (!req->rq_pack_bulk)
2333                 return 0;
2334
2335         ctx = req->rq_svc_ctx;
2336         if (ctx->sc_policy->sp_sops->wrap_bulk)
2337                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2338
2339         return 0;
2340 }
2341 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2342
2343 /**
2344  * Performe transformation upon incoming bulk write.
2345  */
2346 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2347                             struct ptlrpc_bulk_desc *desc)
2348 {
2349         struct ptlrpc_svc_ctx *ctx;
2350         int                    rc;
2351
2352         LASSERT(req->rq_bulk_write);
2353
2354         /*
2355          * if it's in privacy mode, transferred should >= expected; otherwise
2356          * transferred should == expected.
2357          */
2358         if (desc->bd_nob_transferred < desc->bd_nob ||
2359             (desc->bd_nob_transferred > desc->bd_nob &&
2360              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2361              SPTLRPC_BULK_SVC_PRIV)) {
2362                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2363                           desc->bd_nob_transferred, desc->bd_nob);
2364                 return -ETIMEDOUT;
2365         }
2366
2367         if (!req->rq_pack_bulk)
2368                 return 0;
2369
2370         ctx = req->rq_svc_ctx;
2371         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2372                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2373                 if (rc)
2374                         CERROR("error unwrap bulk: %d\n", rc);
2375         }
2376
2377         /* return 0 to allow reply be sent */
2378         return 0;
2379 }
2380 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2381
2382 /**
2383  * Prepare buffers for incoming bulk write.
2384  */
2385 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2386                           struct ptlrpc_bulk_desc *desc)
2387 {
2388         struct ptlrpc_svc_ctx *ctx;
2389
2390         LASSERT(req->rq_bulk_write);
2391
2392         if (!req->rq_pack_bulk)
2393                 return 0;
2394
2395         ctx = req->rq_svc_ctx;
2396         if (ctx->sc_policy->sp_sops->prep_bulk)
2397                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2398
2399         return 0;
2400 }
2401 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2402
2403 #endif /* HAVE_SERVER_SUPPORT */
2404
2405 /****************************************
2406  * user descriptor helpers              *
2407  ****************************************/
2408
2409 int sptlrpc_current_user_desc_size(void)
2410 {
2411         int ngroups;
2412
2413         ngroups = current_ngroups;
2414
2415         if (ngroups > LUSTRE_MAX_GROUPS)
2416                 ngroups = LUSTRE_MAX_GROUPS;
2417         return sptlrpc_user_desc_size(ngroups);
2418 }
2419 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2420
2421 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2422 {
2423         struct ptlrpc_user_desc *pud;
2424
2425         pud = lustre_msg_buf(msg, offset, 0);
2426
2427         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2428         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2429         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2430         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2431         pud->pud_cap = cfs_curproc_cap_pack();
2432         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2433
2434         task_lock(current);
2435         if (pud->pud_ngroups > current_ngroups)
2436                 pud->pud_ngroups = current_ngroups;
2437 #ifdef HAVE_GROUP_INFO_GID
2438         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2439                pud->pud_ngroups * sizeof(__u32));
2440 #else /* !HAVE_GROUP_INFO_GID */
2441         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2442                pud->pud_ngroups * sizeof(__u32));
2443 #endif /* HAVE_GROUP_INFO_GID */
2444         task_unlock(current);
2445
2446         return 0;
2447 }
2448 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2449
2450 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2451 {
2452         struct ptlrpc_user_desc *pud;
2453         int                      i;
2454
2455         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2456         if (!pud)
2457                 return -EINVAL;
2458
2459         if (swabbed) {
2460                 __swab32s(&pud->pud_uid);
2461                 __swab32s(&pud->pud_gid);
2462                 __swab32s(&pud->pud_fsuid);
2463                 __swab32s(&pud->pud_fsgid);
2464                 __swab32s(&pud->pud_cap);
2465                 __swab32s(&pud->pud_ngroups);
2466         }
2467
2468         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2469                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2470                 return -EINVAL;
2471         }
2472
2473         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2474             msg->lm_buflens[offset]) {
2475                 CERROR("%u groups are claimed but bufsize only %u\n",
2476                        pud->pud_ngroups, msg->lm_buflens[offset]);
2477                 return -EINVAL;
2478         }
2479
2480         if (swabbed) {
2481                 for (i = 0; i < pud->pud_ngroups; i++)
2482                         __swab32s(&pud->pud_groups[i]);
2483         }
2484
2485         return 0;
2486 }
2487 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2488
2489 /****************************************
2490  * misc helpers                         *
2491  ****************************************/
2492
2493 const char * sec2target_str(struct ptlrpc_sec *sec)
2494 {
2495         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2496                 return "*";
2497         if (sec_is_reverse(sec))
2498                 return "c";
2499         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2500 }
2501 EXPORT_SYMBOL(sec2target_str);
2502
2503 /*
2504  * return true if the bulk data is protected
2505  */
2506 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2507 {
2508         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2509         case SPTLRPC_BULK_SVC_INTG:
2510         case SPTLRPC_BULK_SVC_PRIV:
2511                 return 1;
2512         default:
2513                 return 0;
2514         }
2515 }
2516 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2517
2518 /****************************************
2519  * crypto API helper/alloc blkciper     *
2520  ****************************************/
2521
2522 /****************************************
2523  * initialize/finalize                  *
2524  ****************************************/
2525
2526 int sptlrpc_init(void)
2527 {
2528         int rc;
2529
2530         rwlock_init(&policy_lock);
2531
2532         rc = sptlrpc_gc_init();
2533         if (rc)
2534                 goto out;
2535
2536         rc = sptlrpc_conf_init();
2537         if (rc)
2538                 goto out_gc;
2539
2540         rc = sptlrpc_enc_pool_init();
2541         if (rc)
2542                 goto out_conf;
2543
2544         rc = sptlrpc_null_init();
2545         if (rc)
2546                 goto out_pool;
2547
2548         rc = sptlrpc_plain_init();
2549         if (rc)
2550                 goto out_null;
2551
2552         rc = sptlrpc_lproc_init();
2553         if (rc)
2554                 goto out_plain;
2555
2556         return 0;
2557
2558 out_plain:
2559         sptlrpc_plain_fini();
2560 out_null:
2561         sptlrpc_null_fini();
2562 out_pool:
2563         sptlrpc_enc_pool_fini();
2564 out_conf:
2565         sptlrpc_conf_fini();
2566 out_gc:
2567         sptlrpc_gc_fini();
2568 out:
2569         return rc;
2570 }
2571
2572 void sptlrpc_fini(void)
2573 {
2574         sptlrpc_lproc_fini();
2575         sptlrpc_plain_fini();
2576         sptlrpc_null_fini();
2577         sptlrpc_enc_pool_fini();
2578         sptlrpc_conf_fini();
2579         sptlrpc_gc_fini();
2580 }