Whamcloud - gitweb
LU-7030 security: put imp_sec after all requests drained off
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_SEC
42
43 #include <linux/user_namespace.h>
44 #ifdef HAVE_UIDGID_HEADER
45 # include <linux/uidgid.h>
46 #endif
47 #include <linux/crypto.h>
48 #include <linux/key.h>
49
50 #include <libcfs/libcfs.h>
51 #include <obd.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_dlm.h>
57 #include <lustre_sec.h>
58
59 #include "ptlrpc_internal.h"
60
61 /***********************************************
62  * policy registers                            *
63  ***********************************************/
64
65 static rwlock_t policy_lock;
66 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
67         NULL,
68 };
69
70 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
71 {
72         __u16 number = policy->sp_policy;
73
74         LASSERT(policy->sp_name);
75         LASSERT(policy->sp_cops);
76         LASSERT(policy->sp_sops);
77
78         if (number >= SPTLRPC_POLICY_MAX)
79                 return -EINVAL;
80
81         write_lock(&policy_lock);
82         if (unlikely(policies[number])) {
83                 write_unlock(&policy_lock);
84                 return -EALREADY;
85         }
86         policies[number] = policy;
87         write_unlock(&policy_lock);
88
89         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
90         return 0;
91 }
92 EXPORT_SYMBOL(sptlrpc_register_policy);
93
94 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
95 {
96         __u16 number = policy->sp_policy;
97
98         LASSERT(number < SPTLRPC_POLICY_MAX);
99
100         write_lock(&policy_lock);
101         if (unlikely(policies[number] == NULL)) {
102                 write_unlock(&policy_lock);
103                 CERROR("%s: already unregistered\n", policy->sp_name);
104                 return -EINVAL;
105         }
106
107         LASSERT(policies[number] == policy);
108         policies[number] = NULL;
109         write_unlock(&policy_lock);
110
111         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
112         return 0;
113 }
114 EXPORT_SYMBOL(sptlrpc_unregister_policy);
115
116 static
117 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
118 {
119         static DEFINE_MUTEX(load_mutex);
120         static atomic_t           loaded = ATOMIC_INIT(0);
121         struct ptlrpc_sec_policy *policy;
122         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
123         __u16                     flag = 0;
124
125         if (number >= SPTLRPC_POLICY_MAX)
126                 return NULL;
127
128         while (1) {
129                 read_lock(&policy_lock);
130                 policy = policies[number];
131                 if (policy && !try_module_get(policy->sp_owner))
132                         policy = NULL;
133                 if (policy == NULL)
134                         flag = atomic_read(&loaded);
135                 read_unlock(&policy_lock);
136
137                 if (policy != NULL || flag != 0 ||
138                     number != SPTLRPC_POLICY_GSS)
139                         break;
140
141                 /* try to load gss module, once */
142                 mutex_lock(&load_mutex);
143                 if (atomic_read(&loaded) == 0) {
144                         if (request_module("ptlrpc_gss") == 0)
145                                 CDEBUG(D_SEC,
146                                        "module ptlrpc_gss loaded on demand\n");
147                         else
148                                 CERROR("Unable to load module ptlrpc_gss\n");
149
150                         atomic_set(&loaded, 1);
151                 }
152                 mutex_unlock(&load_mutex);
153         }
154
155         return policy;
156 }
157
158 __u32 sptlrpc_name2flavor_base(const char *name)
159 {
160         if (!strcmp(name, "null"))
161                 return SPTLRPC_FLVR_NULL;
162         if (!strcmp(name, "plain"))
163                 return SPTLRPC_FLVR_PLAIN;
164         if (!strcmp(name, "gssnull"))
165                 return SPTLRPC_FLVR_GSSNULL;
166         if (!strcmp(name, "krb5n"))
167                 return SPTLRPC_FLVR_KRB5N;
168         if (!strcmp(name, "krb5a"))
169                 return SPTLRPC_FLVR_KRB5A;
170         if (!strcmp(name, "krb5i"))
171                 return SPTLRPC_FLVR_KRB5I;
172         if (!strcmp(name, "krb5p"))
173                 return SPTLRPC_FLVR_KRB5P;
174         if (!strcmp(name, "ski"))
175                 return SPTLRPC_FLVR_SKI;
176         if (!strcmp(name, "skpi"))
177                 return SPTLRPC_FLVR_SKPI;
178
179         return SPTLRPC_FLVR_INVALID;
180 }
181 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
182
183 const char *sptlrpc_flavor2name_base(__u32 flvr)
184 {
185         __u32   base = SPTLRPC_FLVR_BASE(flvr);
186
187         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
188                 return "null";
189         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
190                 return "plain";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
192                 return "gssnull";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
194                 return "krb5n";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
196                 return "krb5a";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
198                 return "krb5i";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
200                 return "krb5p";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
202                 return "ski";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
204                 return "skpi";
205
206         CERROR("invalid wire flavor 0x%x\n", flvr);
207         return "invalid";
208 }
209 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
210
211 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
212                                char *buf, int bufsize)
213 {
214         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
215                 snprintf(buf, bufsize, "hash:%s",
216                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
217         else
218                 snprintf(buf, bufsize, "%s",
219                          sptlrpc_flavor2name_base(sf->sf_rpc));
220
221         buf[bufsize - 1] = '\0';
222         return buf;
223 }
224 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
225
226 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
227 {
228         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
229
230         /*
231          * currently we don't support customized bulk specification for
232          * flavors other than plain
233          */
234         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
235                 char bspec[16];
236
237                 bspec[0] = '-';
238                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
239                 strncat(buf, bspec, bufsize);
240         }
241
242         buf[bufsize - 1] = '\0';
243         return buf;
244 }
245 EXPORT_SYMBOL(sptlrpc_flavor2name);
246
247 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
248 {
249         buf[0] = '\0';
250
251         if (flags & PTLRPC_SEC_FL_REVERSE)
252                 strlcat(buf, "reverse,", bufsize);
253         if (flags & PTLRPC_SEC_FL_ROOTONLY)
254                 strlcat(buf, "rootonly,", bufsize);
255         if (flags & PTLRPC_SEC_FL_UDESC)
256                 strlcat(buf, "udesc,", bufsize);
257         if (flags & PTLRPC_SEC_FL_BULK)
258                 strlcat(buf, "bulk,", bufsize);
259         if (buf[0] == '\0')
260                 strlcat(buf, "-,", bufsize);
261
262         return buf;
263 }
264 EXPORT_SYMBOL(sptlrpc_secflags2str);
265
266 /**************************************************
267  * client context APIs                            *
268  **************************************************/
269
270 static
271 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
272 {
273         struct vfs_cred vcred;
274         int create = 1, remove_dead = 1;
275
276         LASSERT(sec);
277         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
278
279         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
280                                      PTLRPC_SEC_FL_ROOTONLY)) {
281                 vcred.vc_uid = 0;
282                 vcred.vc_gid = 0;
283                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
284                         create = 0;
285                         remove_dead = 0;
286                 }
287         } else {
288                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
289                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
290         }
291
292         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
293                                                    remove_dead);
294 }
295
296 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
297 {
298         atomic_inc(&ctx->cc_refcount);
299         return ctx;
300 }
301 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
302
303 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
304 {
305         struct ptlrpc_sec *sec = ctx->cc_sec;
306
307         LASSERT(sec);
308         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
309
310         if (!atomic_dec_and_test(&ctx->cc_refcount))
311                 return;
312
313         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
314 }
315 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
316
317 /**
318  * Expire the client context immediately.
319  *
320  * \pre Caller must hold at least 1 reference on the \a ctx.
321  */
322 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
323 {
324         LASSERT(ctx->cc_ops->die);
325         ctx->cc_ops->die(ctx, 0);
326 }
327 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
328
329 /**
330  * To wake up the threads who are waiting for this client context. Called
331  * after some status change happened on \a ctx.
332  */
333 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
334 {
335         struct ptlrpc_request *req, *next;
336
337         spin_lock(&ctx->cc_lock);
338         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
339                                      rq_ctx_chain) {
340                 list_del_init(&req->rq_ctx_chain);
341                 ptlrpc_client_wake_req(req);
342         }
343         spin_unlock(&ctx->cc_lock);
344 }
345 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
346
347 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
348 {
349         LASSERT(ctx->cc_ops);
350
351         if (ctx->cc_ops->display == NULL)
352                 return 0;
353
354         return ctx->cc_ops->display(ctx, buf, bufsize);
355 }
356
357 static int import_sec_check_expire(struct obd_import *imp)
358 {
359         int     adapt = 0;
360
361         spin_lock(&imp->imp_lock);
362         if (imp->imp_sec_expire &&
363             imp->imp_sec_expire < cfs_time_current_sec()) {
364                 adapt = 1;
365                 imp->imp_sec_expire = 0;
366         }
367         spin_unlock(&imp->imp_lock);
368
369         if (!adapt)
370                 return 0;
371
372         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
373         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
374 }
375
376 /**
377  * Get and validate the client side ptlrpc security facilities from
378  * \a imp. There is a race condition on client reconnect when the import is
379  * being destroyed while there are outstanding client bound requests. In
380  * this case do not output any error messages if import secuity is not
381  * found.
382  *
383  * \param[in] imp obd import associated with client
384  * \param[out] sec client side ptlrpc security
385  *
386  * \retval 0 if security retrieved successfully
387  * \retval -ve errno if there was a problem
388  */
389 static int import_sec_validate_get(struct obd_import *imp,
390                                    struct ptlrpc_sec **sec)
391 {
392         int     rc;
393
394         if (unlikely(imp->imp_sec_expire)) {
395                 rc = import_sec_check_expire(imp);
396                 if (rc)
397                         return rc;
398         }
399
400         *sec = sptlrpc_import_sec_ref(imp);
401         if (*sec == NULL) {
402                 CERROR("import %p (%s) with no sec\n",
403                        imp, ptlrpc_import_state_name(imp->imp_state));
404                 return -EACCES;
405         }
406
407         if (unlikely((*sec)->ps_dying)) {
408                 CERROR("attempt to use dying sec %p\n", sec);
409                 sptlrpc_sec_put(*sec);
410                 return -EACCES;
411         }
412
413         return 0;
414 }
415
416 /**
417  * Given a \a req, find or allocate an appropriate context for it.
418  * \pre req->rq_cli_ctx == NULL.
419  *
420  * \retval 0 succeed, and req->rq_cli_ctx is set.
421  * \retval -ev error number, and req->rq_cli_ctx == NULL.
422  */
423 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
424 {
425         struct obd_import *imp = req->rq_import;
426         struct ptlrpc_sec *sec;
427         int                rc;
428         ENTRY;
429
430         LASSERT(!req->rq_cli_ctx);
431         LASSERT(imp);
432
433         rc = import_sec_validate_get(imp, &sec);
434         if (rc)
435                 RETURN(rc);
436
437         req->rq_cli_ctx = get_my_ctx(sec);
438
439         sptlrpc_sec_put(sec);
440
441         if (!req->rq_cli_ctx) {
442                 CERROR("req %p: fail to get context\n", req);
443                 RETURN(-ECONNREFUSED);
444         }
445
446         RETURN(0);
447 }
448
449 /**
450  * Drop the context for \a req.
451  * \pre req->rq_cli_ctx != NULL.
452  * \post req->rq_cli_ctx == NULL.
453  *
454  * If \a sync == 0, this function should return quickly without sleep;
455  * otherwise it might trigger and wait for the whole process of sending
456  * an context-destroying rpc to server.
457  */
458 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
459 {
460         ENTRY;
461
462         LASSERT(req);
463         LASSERT(req->rq_cli_ctx);
464
465         /* request might be asked to release earlier while still
466          * in the context waiting list.
467          */
468         if (!list_empty(&req->rq_ctx_chain)) {
469                 spin_lock(&req->rq_cli_ctx->cc_lock);
470                 list_del_init(&req->rq_ctx_chain);
471                 spin_unlock(&req->rq_cli_ctx->cc_lock);
472         }
473
474         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
475         req->rq_cli_ctx = NULL;
476         EXIT;
477 }
478
479 static
480 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
481                            struct ptlrpc_cli_ctx *oldctx,
482                            struct ptlrpc_cli_ctx *newctx)
483 {
484         struct sptlrpc_flavor   old_flvr;
485         char                   *reqmsg = NULL; /* to workaround old gcc */
486         int                     reqmsg_size;
487         int                     rc = 0;
488
489         LASSERT(req->rq_reqmsg);
490         LASSERT(req->rq_reqlen);
491         LASSERT(req->rq_replen);
492
493         CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
494                "switch sec %p(%s) -> %p(%s)\n", req,
495                oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
496                newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
497                oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
498                newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
499
500         /* save flavor */
501         old_flvr = req->rq_flvr;
502
503         /* save request message */
504         reqmsg_size = req->rq_reqlen;
505         if (reqmsg_size != 0) {
506                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
507                 if (reqmsg == NULL)
508                         return -ENOMEM;
509                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
510         }
511
512         /* release old req/rep buf */
513         req->rq_cli_ctx = oldctx;
514         sptlrpc_cli_free_reqbuf(req);
515         sptlrpc_cli_free_repbuf(req);
516         req->rq_cli_ctx = newctx;
517
518         /* recalculate the flavor */
519         sptlrpc_req_set_flavor(req, 0);
520
521         /* alloc new request buffer
522          * we don't need to alloc reply buffer here, leave it to the
523          * rest procedure of ptlrpc */
524         if (reqmsg_size != 0) {
525                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
526                 if (!rc) {
527                         LASSERT(req->rq_reqmsg);
528                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
529                 } else {
530                         CWARN("failed to alloc reqbuf: %d\n", rc);
531                         req->rq_flvr = old_flvr;
532                 }
533
534                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
535         }
536         return rc;
537 }
538
539 /**
540  * If current context of \a req is dead somehow, e.g. we just switched flavor
541  * thus marked original contexts dead, we'll find a new context for it. if
542  * no switch is needed, \a req will end up with the same context.
543  *
544  * \note a request must have a context, to keep other parts of code happy.
545  * In any case of failure during the switching, we must restore the old one.
546  */
547 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
548 {
549         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
550         struct ptlrpc_cli_ctx *newctx;
551         int                    rc;
552         ENTRY;
553
554         LASSERT(oldctx);
555
556         sptlrpc_cli_ctx_get(oldctx);
557         sptlrpc_req_put_ctx(req, 0);
558
559         rc = sptlrpc_req_get_ctx(req);
560         if (unlikely(rc)) {
561                 LASSERT(!req->rq_cli_ctx);
562
563                 /* restore old ctx */
564                 req->rq_cli_ctx = oldctx;
565                 RETURN(rc);
566         }
567
568         newctx = req->rq_cli_ctx;
569         LASSERT(newctx);
570
571         if (unlikely(newctx == oldctx &&
572                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
573                 /*
574                  * still get the old dead ctx, usually means system too busy
575                  */
576                 CDEBUG(D_SEC,
577                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
578                        newctx, newctx->cc_flags);
579
580                 set_current_state(TASK_INTERRUPTIBLE);
581                 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
582         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
583                             == 0)) {
584                 /*
585                  * new ctx not up to date yet
586                  */
587                 CDEBUG(D_SEC,
588                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
589                        newctx, newctx->cc_flags);
590         } else {
591                 /*
592                  * it's possible newctx == oldctx if we're switching
593                  * subflavor with the same sec.
594                  */
595                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
596                 if (rc) {
597                         /* restore old ctx */
598                         sptlrpc_req_put_ctx(req, 0);
599                         req->rq_cli_ctx = oldctx;
600                         RETURN(rc);
601                 }
602
603                 LASSERT(req->rq_cli_ctx == newctx);
604         }
605
606         sptlrpc_cli_ctx_put(oldctx, 1);
607         RETURN(0);
608 }
609 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
610
611 static
612 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
613 {
614         if (cli_ctx_is_refreshed(ctx))
615                 return 1;
616         return 0;
617 }
618
619 static
620 int ctx_refresh_timeout(void *data)
621 {
622         struct ptlrpc_request *req = data;
623         int rc;
624
625         /* conn_cnt is needed in expire_one_request */
626         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
627
628         rc = ptlrpc_expire_one_request(req, 1);
629         /* if we started recovery, we should mark this ctx dead; otherwise
630          * in case of lgssd died nobody would retire this ctx, following
631          * connecting will still find the same ctx thus cause deadlock.
632          * there's an assumption that expire time of the request should be
633          * later than the context refresh expire time.
634          */
635         if (rc == 0)
636                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
637         return rc;
638 }
639
640 static
641 void ctx_refresh_interrupt(void *data)
642 {
643         struct ptlrpc_request *req = data;
644
645         spin_lock(&req->rq_lock);
646         req->rq_intr = 1;
647         spin_unlock(&req->rq_lock);
648 }
649
650 static
651 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
652 {
653         spin_lock(&ctx->cc_lock);
654         if (!list_empty(&req->rq_ctx_chain))
655                 list_del_init(&req->rq_ctx_chain);
656         spin_unlock(&ctx->cc_lock);
657 }
658
659 /**
660  * To refresh the context of \req, if it's not up-to-date.
661  * \param timeout
662  * - < 0: don't wait
663  * - = 0: wait until success or fatal error occur
664  * - > 0: timeout value (in seconds)
665  *
666  * The status of the context could be subject to be changed by other threads
667  * at any time. We allow this race, but once we return with 0, the caller will
668  * suppose it's uptodated and keep using it until the owning rpc is done.
669  *
670  * \retval 0 only if the context is uptodated.
671  * \retval -ev error number.
672  */
673 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
674 {
675         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
676         struct ptlrpc_sec      *sec;
677         struct l_wait_info      lwi;
678         int                     rc;
679         ENTRY;
680
681         LASSERT(ctx);
682
683         if (req->rq_ctx_init || req->rq_ctx_fini)
684                 RETURN(0);
685
686         /*
687          * during the process a request's context might change type even
688          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
689          * everything
690          */
691 again:
692         rc = import_sec_validate_get(req->rq_import, &sec);
693         if (rc)
694                 RETURN(rc);
695
696         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
697                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
698                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
699                 req_off_ctx_list(req, ctx);
700                 sptlrpc_req_replace_dead_ctx(req);
701                 ctx = req->rq_cli_ctx;
702         }
703         sptlrpc_sec_put(sec);
704
705         if (cli_ctx_is_eternal(ctx))
706                 RETURN(0);
707
708         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
709                 LASSERT(ctx->cc_ops->refresh);
710                 ctx->cc_ops->refresh(ctx);
711         }
712         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
713
714         LASSERT(ctx->cc_ops->validate);
715         if (ctx->cc_ops->validate(ctx) == 0) {
716                 req_off_ctx_list(req, ctx);
717                 RETURN(0);
718         }
719
720         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
721                 spin_lock(&req->rq_lock);
722                 req->rq_err = 1;
723                 spin_unlock(&req->rq_lock);
724                 req_off_ctx_list(req, ctx);
725                 RETURN(-EPERM);
726         }
727
728         /*
729          * There's a subtle issue for resending RPCs, suppose following
730          * situation:
731          *  1. the request was sent to server.
732          *  2. recovery was kicked start, after finished the request was
733          *     marked as resent.
734          *  3. resend the request.
735          *  4. old reply from server received, we accept and verify the reply.
736          *     this has to be success, otherwise the error will be aware
737          *     by application.
738          *  5. new reply from server received, dropped by LNet.
739          *
740          * Note the xid of old & new request is the same. We can't simply
741          * change xid for the resent request because the server replies on
742          * it for reply reconstruction.
743          *
744          * Commonly the original context should be uptodate because we
745          * have an expiry nice time; server will keep its context because
746          * we at least hold a ref of old context which prevent context
747          * from destroying RPC being sent. So server still can accept the
748          * request and finish the RPC. But if that's not the case:
749          *  1. If server side context has been trimmed, a NO_CONTEXT will
750          *     be returned, gss_cli_ctx_verify/unseal will switch to new
751          *     context by force.
752          *  2. Current context never be refreshed, then we are fine: we
753          *     never really send request with old context before.
754          */
755         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
756             unlikely(req->rq_reqmsg) &&
757             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
758                 req_off_ctx_list(req, ctx);
759                 RETURN(0);
760         }
761
762         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
763                 req_off_ctx_list(req, ctx);
764                 /*
765                  * don't switch ctx if import was deactivated
766                  */
767                 if (req->rq_import->imp_deactive) {
768                         spin_lock(&req->rq_lock);
769                         req->rq_err = 1;
770                         spin_unlock(&req->rq_lock);
771                         RETURN(-EINTR);
772                 }
773
774                 rc = sptlrpc_req_replace_dead_ctx(req);
775                 if (rc) {
776                         LASSERT(ctx == req->rq_cli_ctx);
777                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
778                                req, ctx, rc);
779                         spin_lock(&req->rq_lock);
780                         req->rq_err = 1;
781                         spin_unlock(&req->rq_lock);
782                         RETURN(rc);
783                 }
784
785                 ctx = req->rq_cli_ctx;
786                 goto again;
787         }
788
789         /*
790          * Now we're sure this context is during upcall, add myself into
791          * waiting list
792          */
793         spin_lock(&ctx->cc_lock);
794         if (list_empty(&req->rq_ctx_chain))
795                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
796         spin_unlock(&ctx->cc_lock);
797
798         if (timeout < 0)
799                 RETURN(-EWOULDBLOCK);
800
801         /* Clear any flags that may be present from previous sends */
802         LASSERT(req->rq_receiving_reply == 0);
803         spin_lock(&req->rq_lock);
804         req->rq_err = 0;
805         req->rq_timedout = 0;
806         req->rq_resend = 0;
807         req->rq_restart = 0;
808         spin_unlock(&req->rq_lock);
809
810         lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
811                                ctx_refresh_timeout,
812                                ctx_refresh_interrupt, req);
813         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
814
815         /*
816          * following cases could lead us here:
817          * - successfully refreshed;
818          * - interrupted;
819          * - timedout, and we don't want recover from the failure;
820          * - timedout, and waked up upon recovery finished;
821          * - someone else mark this ctx dead by force;
822          * - someone invalidate the req and call ptlrpc_client_wake_req(),
823          *   e.g. ptlrpc_abort_inflight();
824          */
825         if (!cli_ctx_is_refreshed(ctx)) {
826                 /* timed out or interruptted */
827                 req_off_ctx_list(req, ctx);
828
829                 LASSERT(rc != 0);
830                 RETURN(rc);
831         }
832
833         goto again;
834 }
835
836 /**
837  * Initialize flavor settings for \a req, according to \a opcode.
838  *
839  * \note this could be called in two situations:
840  * - new request from ptlrpc_pre_req(), with proper @opcode
841  * - old request which changed ctx in the middle, with @opcode == 0
842  */
843 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
844 {
845         struct ptlrpc_sec *sec;
846
847         LASSERT(req->rq_import);
848         LASSERT(req->rq_cli_ctx);
849         LASSERT(req->rq_cli_ctx->cc_sec);
850         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
851
852         /* special security flags accoding to opcode */
853         switch (opcode) {
854         case OST_READ:
855         case MDS_READPAGE:
856         case MGS_CONFIG_READ:
857         case OBD_IDX_READ:
858                 req->rq_bulk_read = 1;
859                 break;
860         case OST_WRITE:
861         case MDS_WRITEPAGE:
862                 req->rq_bulk_write = 1;
863                 break;
864         case SEC_CTX_INIT:
865                 req->rq_ctx_init = 1;
866                 break;
867         case SEC_CTX_FINI:
868                 req->rq_ctx_fini = 1;
869                 break;
870         case 0:
871                 /* init/fini rpc won't be resend, so can't be here */
872                 LASSERT(req->rq_ctx_init == 0);
873                 LASSERT(req->rq_ctx_fini == 0);
874
875                 /* cleanup flags, which should be recalculated */
876                 req->rq_pack_udesc = 0;
877                 req->rq_pack_bulk = 0;
878                 break;
879         }
880
881         sec = req->rq_cli_ctx->cc_sec;
882
883         spin_lock(&sec->ps_lock);
884         req->rq_flvr = sec->ps_flvr;
885         spin_unlock(&sec->ps_lock);
886
887         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
888          * destruction rpc */
889         if (unlikely(req->rq_ctx_init))
890                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
891         else if (unlikely(req->rq_ctx_fini))
892                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
893
894         /* user descriptor flag, null security can't do it anyway */
895         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
896             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
897                 req->rq_pack_udesc = 1;
898
899         /* bulk security flag */
900         if ((req->rq_bulk_read || req->rq_bulk_write) &&
901             sptlrpc_flavor_has_bulk(&req->rq_flvr))
902                 req->rq_pack_bulk = 1;
903 }
904
905 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
906 {
907         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
908                 return;
909
910         LASSERT(req->rq_clrbuf);
911         if (req->rq_pool || !req->rq_reqbuf)
912                 return;
913
914         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
915         req->rq_reqbuf = NULL;
916         req->rq_reqbuf_len = 0;
917 }
918
919 /**
920  * Given an import \a imp, check whether current user has a valid context
921  * or not. We may create a new context and try to refresh it, and try
922  * repeatedly try in case of non-fatal errors. Return 0 means success.
923  */
924 int sptlrpc_import_check_ctx(struct obd_import *imp)
925 {
926         struct ptlrpc_sec     *sec;
927         struct ptlrpc_cli_ctx *ctx;
928         struct ptlrpc_request *req = NULL;
929         int rc;
930         ENTRY;
931
932         might_sleep();
933
934         sec = sptlrpc_import_sec_ref(imp);
935         ctx = get_my_ctx(sec);
936         sptlrpc_sec_put(sec);
937
938         if (!ctx)
939                 RETURN(-ENOMEM);
940
941         if (cli_ctx_is_eternal(ctx) ||
942             ctx->cc_ops->validate(ctx) == 0) {
943                 sptlrpc_cli_ctx_put(ctx, 1);
944                 RETURN(0);
945         }
946
947         if (cli_ctx_is_error(ctx)) {
948                 sptlrpc_cli_ctx_put(ctx, 1);
949                 RETURN(-EACCES);
950         }
951
952         req = ptlrpc_request_cache_alloc(GFP_NOFS);
953         if (!req)
954                 RETURN(-ENOMEM);
955
956         ptlrpc_cli_req_init(req);
957         atomic_set(&req->rq_refcount, 10000);
958
959         req->rq_import = imp;
960         req->rq_flvr = sec->ps_flvr;
961         req->rq_cli_ctx = ctx;
962
963         rc = sptlrpc_req_refresh_ctx(req, 0);
964         LASSERT(list_empty(&req->rq_ctx_chain));
965         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
966         ptlrpc_request_cache_free(req);
967
968         RETURN(rc);
969 }
970
971 /**
972  * Used by ptlrpc client, to perform the pre-defined security transformation
973  * upon the request message of \a req. After this function called,
974  * req->rq_reqmsg is still accessible as clear text.
975  */
976 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
977 {
978         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
979         int rc = 0;
980         ENTRY;
981
982         LASSERT(ctx);
983         LASSERT(ctx->cc_sec);
984         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
985
986         /* we wrap bulk request here because now we can be sure
987          * the context is uptodate.
988          */
989         if (req->rq_bulk) {
990                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
991                 if (rc)
992                         RETURN(rc);
993         }
994
995         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
996         case SPTLRPC_SVC_NULL:
997         case SPTLRPC_SVC_AUTH:
998         case SPTLRPC_SVC_INTG:
999                 LASSERT(ctx->cc_ops->sign);
1000                 rc = ctx->cc_ops->sign(ctx, req);
1001                 break;
1002         case SPTLRPC_SVC_PRIV:
1003                 LASSERT(ctx->cc_ops->seal);
1004                 rc = ctx->cc_ops->seal(ctx, req);
1005                 break;
1006         default:
1007                 LBUG();
1008         }
1009
1010         if (rc == 0) {
1011                 LASSERT(req->rq_reqdata_len);
1012                 LASSERT(req->rq_reqdata_len % 8 == 0);
1013                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1014         }
1015
1016         RETURN(rc);
1017 }
1018
1019 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1020 {
1021         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1022         int                    rc;
1023         ENTRY;
1024
1025         LASSERT(ctx);
1026         LASSERT(ctx->cc_sec);
1027         LASSERT(req->rq_repbuf);
1028         LASSERT(req->rq_repdata);
1029         LASSERT(req->rq_repmsg == NULL);
1030
1031         req->rq_rep_swab_mask = 0;
1032
1033         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1034         switch (rc) {
1035         case 1:
1036                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1037         case 0:
1038                 break;
1039         default:
1040                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1041                 RETURN(-EPROTO);
1042         }
1043
1044         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1045                 CERROR("replied data length %d too small\n",
1046                        req->rq_repdata_len);
1047                 RETURN(-EPROTO);
1048         }
1049
1050         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1051             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1052                 CERROR("reply policy %u doesn't match request policy %u\n",
1053                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1054                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1055                 RETURN(-EPROTO);
1056         }
1057
1058         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1059         case SPTLRPC_SVC_NULL:
1060         case SPTLRPC_SVC_AUTH:
1061         case SPTLRPC_SVC_INTG:
1062                 LASSERT(ctx->cc_ops->verify);
1063                 rc = ctx->cc_ops->verify(ctx, req);
1064                 break;
1065         case SPTLRPC_SVC_PRIV:
1066                 LASSERT(ctx->cc_ops->unseal);
1067                 rc = ctx->cc_ops->unseal(ctx, req);
1068                 break;
1069         default:
1070                 LBUG();
1071         }
1072         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1073
1074         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1075             !req->rq_ctx_init)
1076                 req->rq_rep_swab_mask = 0;
1077         RETURN(rc);
1078 }
1079
1080 /**
1081  * Used by ptlrpc client, to perform security transformation upon the reply
1082  * message of \a req. After return successfully, req->rq_repmsg points to
1083  * the reply message in clear text.
1084  *
1085  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1086  * going to change.
1087  */
1088 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1089 {
1090         LASSERT(req->rq_repbuf);
1091         LASSERT(req->rq_repdata == NULL);
1092         LASSERT(req->rq_repmsg == NULL);
1093         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1094
1095         if (req->rq_reply_off == 0 &&
1096             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1097                 CERROR("real reply with offset 0\n");
1098                 return -EPROTO;
1099         }
1100
1101         if (req->rq_reply_off % 8 != 0) {
1102                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1103                 return -EPROTO;
1104         }
1105
1106         req->rq_repdata = (struct lustre_msg *)
1107                                 (req->rq_repbuf + req->rq_reply_off);
1108         req->rq_repdata_len = req->rq_nob_received;
1109
1110         return do_cli_unwrap_reply(req);
1111 }
1112
1113 /**
1114  * Used by ptlrpc client, to perform security transformation upon the early
1115  * reply message of \a req. We expect the rq_reply_off is 0, and
1116  * rq_nob_received is the early reply size.
1117  * 
1118  * Because the receive buffer might be still posted, the reply data might be
1119  * changed at any time, no matter we're holding rq_lock or not. For this reason
1120  * we allocate a separate ptlrpc_request and reply buffer for early reply
1121  * processing.
1122  *
1123  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1124  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1125  * \a *req_ret to release it.
1126  * \retval -ev error number, and \a req_ret will not be set.
1127  */
1128 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1129                                    struct ptlrpc_request **req_ret)
1130 {
1131         struct ptlrpc_request  *early_req;
1132         char                   *early_buf;
1133         int                     early_bufsz, early_size;
1134         int                     rc;
1135         ENTRY;
1136
1137         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1138         if (early_req == NULL)
1139                 RETURN(-ENOMEM);
1140
1141         ptlrpc_cli_req_init(early_req);
1142
1143         early_size = req->rq_nob_received;
1144         early_bufsz = size_roundup_power2(early_size);
1145         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1146         if (early_buf == NULL)
1147                 GOTO(err_req, rc = -ENOMEM);
1148
1149         /* sanity checkings and copy data out, do it inside spinlock */
1150         spin_lock(&req->rq_lock);
1151
1152         if (req->rq_replied) {
1153                 spin_unlock(&req->rq_lock);
1154                 GOTO(err_buf, rc = -EALREADY);
1155         }
1156
1157         LASSERT(req->rq_repbuf);
1158         LASSERT(req->rq_repdata == NULL);
1159         LASSERT(req->rq_repmsg == NULL);
1160
1161         if (req->rq_reply_off != 0) {
1162                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1163                 spin_unlock(&req->rq_lock);
1164                 GOTO(err_buf, rc = -EPROTO);
1165         }
1166
1167         if (req->rq_nob_received != early_size) {
1168                 /* even another early arrived the size should be the same */
1169                 CERROR("data size has changed from %u to %u\n",
1170                        early_size, req->rq_nob_received);
1171                 spin_unlock(&req->rq_lock);
1172                 GOTO(err_buf, rc = -EINVAL);
1173         }
1174
1175         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1176                 CERROR("early reply length %d too small\n",
1177                        req->rq_nob_received);
1178                 spin_unlock(&req->rq_lock);
1179                 GOTO(err_buf, rc = -EALREADY);
1180         }
1181
1182         memcpy(early_buf, req->rq_repbuf, early_size);
1183         spin_unlock(&req->rq_lock);
1184
1185         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1186         early_req->rq_flvr = req->rq_flvr;
1187         early_req->rq_repbuf = early_buf;
1188         early_req->rq_repbuf_len = early_bufsz;
1189         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1190         early_req->rq_repdata_len = early_size;
1191         early_req->rq_early = 1;
1192         early_req->rq_reqmsg = req->rq_reqmsg;
1193
1194         rc = do_cli_unwrap_reply(early_req);
1195         if (rc) {
1196                 DEBUG_REQ(D_ADAPTTO, early_req,
1197                           "error %d unwrap early reply", rc);
1198                 GOTO(err_ctx, rc);
1199         }
1200
1201         LASSERT(early_req->rq_repmsg);
1202         *req_ret = early_req;
1203         RETURN(0);
1204
1205 err_ctx:
1206         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1207 err_buf:
1208         OBD_FREE_LARGE(early_buf, early_bufsz);
1209 err_req:
1210         ptlrpc_request_cache_free(early_req);
1211         RETURN(rc);
1212 }
1213
1214 /**
1215  * Used by ptlrpc client, to release a processed early reply \a early_req.
1216  *
1217  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1218  */
1219 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1220 {
1221         LASSERT(early_req->rq_repbuf);
1222         LASSERT(early_req->rq_repdata);
1223         LASSERT(early_req->rq_repmsg);
1224
1225         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1226         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1227         ptlrpc_request_cache_free(early_req);
1228 }
1229
1230 /**************************************************
1231  * sec ID                                         *
1232  **************************************************/
1233
1234 /*
1235  * "fixed" sec (e.g. null) use sec_id < 0
1236  */
1237 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1238
1239 int sptlrpc_get_next_secid(void)
1240 {
1241         return atomic_inc_return(&sptlrpc_sec_id);
1242 }
1243 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1244
1245 /**************************************************
1246  * client side high-level security APIs           *
1247  **************************************************/
1248
1249 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1250                                    int grace, int force)
1251 {
1252         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1253
1254         LASSERT(policy->sp_cops);
1255         LASSERT(policy->sp_cops->flush_ctx_cache);
1256
1257         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1258 }
1259
1260 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1261 {
1262         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1263
1264         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1265         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1266         LASSERT(policy->sp_cops->destroy_sec);
1267
1268         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1269
1270         policy->sp_cops->destroy_sec(sec);
1271         sptlrpc_policy_put(policy);
1272 }
1273
1274 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1275 {
1276         sec_cop_destroy_sec(sec);
1277 }
1278 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1279
1280 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1281 {
1282         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1283
1284         if (sec->ps_policy->sp_cops->kill_sec) {
1285                 sec->ps_policy->sp_cops->kill_sec(sec);
1286
1287                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1288         }
1289 }
1290
1291 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1292 {
1293         if (sec)
1294                 atomic_inc(&sec->ps_refcount);
1295
1296         return sec;
1297 }
1298 EXPORT_SYMBOL(sptlrpc_sec_get);
1299
1300 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1301 {
1302         if (sec) {
1303                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1304
1305                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1306                         sptlrpc_gc_del_sec(sec);
1307                         sec_cop_destroy_sec(sec);
1308                 }
1309         }
1310 }
1311 EXPORT_SYMBOL(sptlrpc_sec_put);
1312
1313 /*
1314  * policy module is responsible for taking refrence of import
1315  */
1316 static
1317 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1318                                        struct ptlrpc_svc_ctx *svc_ctx,
1319                                        struct sptlrpc_flavor *sf,
1320                                        enum lustre_sec_part sp)
1321 {
1322         struct ptlrpc_sec_policy *policy;
1323         struct ptlrpc_sec        *sec;
1324         char                      str[32];
1325         ENTRY;
1326
1327         if (svc_ctx) {
1328                 LASSERT(imp->imp_dlm_fake == 1);
1329
1330                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1331                        imp->imp_obd->obd_type->typ_name,
1332                        imp->imp_obd->obd_name,
1333                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1334
1335                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1336                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1337         } else {
1338                 LASSERT(imp->imp_dlm_fake == 0);
1339
1340                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1341                        imp->imp_obd->obd_type->typ_name,
1342                        imp->imp_obd->obd_name,
1343                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1344
1345                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1346                 if (!policy) {
1347                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1348                         RETURN(NULL);
1349                 }
1350         }
1351
1352         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1353         if (sec) {
1354                 atomic_inc(&sec->ps_refcount);
1355
1356                 sec->ps_part = sp;
1357
1358                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1359                         sptlrpc_gc_add_sec(sec);
1360         } else {
1361                 sptlrpc_policy_put(policy);
1362         }
1363
1364         RETURN(sec);
1365 }
1366
1367 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1368 {
1369         struct ptlrpc_sec *sec;
1370
1371         spin_lock(&imp->imp_lock);
1372         sec = sptlrpc_sec_get(imp->imp_sec);
1373         spin_unlock(&imp->imp_lock);
1374
1375         return sec;
1376 }
1377 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1378
1379 static void sptlrpc_import_sec_install(struct obd_import *imp,
1380                                        struct ptlrpc_sec *sec)
1381 {
1382         struct ptlrpc_sec *old_sec;
1383
1384         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1385
1386         spin_lock(&imp->imp_lock);
1387         old_sec = imp->imp_sec;
1388         imp->imp_sec = sec;
1389         spin_unlock(&imp->imp_lock);
1390
1391         if (old_sec) {
1392                 sptlrpc_sec_kill(old_sec);
1393
1394                 /* balance the ref taken by this import */
1395                 sptlrpc_sec_put(old_sec);
1396         }
1397 }
1398
1399 static inline
1400 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1401 {
1402         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1403 }
1404
1405 static inline
1406 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1407 {
1408         *dst = *src;
1409 }
1410
1411 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1412                                              struct ptlrpc_sec *sec,
1413                                              struct sptlrpc_flavor *sf)
1414 {
1415         char    str1[32], str2[32];
1416
1417         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1418                 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1419                        sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1420                                             str1, sizeof(str1)),
1421                        sptlrpc_secflags2str(sf->sf_flags,
1422                                             str2, sizeof(str2)));
1423
1424         spin_lock(&sec->ps_lock);
1425         flavor_copy(&sec->ps_flvr, sf);
1426         spin_unlock(&sec->ps_lock);
1427 }
1428
1429 /**
1430  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1431  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1432  *
1433  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1434  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1435  */
1436 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1437                              struct ptlrpc_svc_ctx *svc_ctx,
1438                              struct sptlrpc_flavor *flvr)
1439 {
1440         struct ptlrpc_connection   *conn;
1441         struct sptlrpc_flavor       sf;
1442         struct ptlrpc_sec          *sec, *newsec;
1443         enum lustre_sec_part        sp;
1444         char                        str[24];
1445         int                         rc = 0;
1446         ENTRY;
1447
1448         might_sleep();
1449
1450         if (imp == NULL)
1451                 RETURN(0);
1452
1453         conn = imp->imp_connection;
1454
1455         if (svc_ctx == NULL) {
1456                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1457                 /*
1458                  * normal import, determine flavor from rule set, except
1459                  * for mgc the flavor is predetermined.
1460                  */
1461                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1462                         sf = cliobd->cl_flvr_mgc;
1463                 else 
1464                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1465                                                    cliobd->cl_sp_to,
1466                                                    &cliobd->cl_target_uuid,
1467                                                    conn->c_self, &sf);
1468
1469                 sp = imp->imp_obd->u.cli.cl_sp_me;
1470         } else {
1471                 /* reverse import, determine flavor from incoming reqeust */
1472                 sf = *flvr;
1473
1474                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1475                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1476                                       PTLRPC_SEC_FL_ROOTONLY;
1477
1478                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1479         }
1480
1481         sec = sptlrpc_import_sec_ref(imp);
1482         if (sec) {
1483                 char    str2[24];
1484
1485                 if (flavor_equal(&sf, &sec->ps_flvr))
1486                         GOTO(out, rc);
1487
1488                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1489                        imp->imp_obd->obd_name,
1490                        obd_uuid2str(&conn->c_remote_uuid),
1491                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1492                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1493
1494                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1495                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1496                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1497                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1498                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1499                         GOTO(out, rc);
1500                 }
1501         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1502                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1503                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1504                        imp->imp_obd->obd_name,
1505                        obd_uuid2str(&conn->c_remote_uuid),
1506                        LNET_NIDNET(conn->c_self),
1507                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1508         }
1509
1510         mutex_lock(&imp->imp_sec_mutex);
1511
1512         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1513         if (newsec) {
1514                 sptlrpc_import_sec_install(imp, newsec);
1515         } else {
1516                 CERROR("import %s->%s: failed to create new sec\n",
1517                        imp->imp_obd->obd_name,
1518                        obd_uuid2str(&conn->c_remote_uuid));
1519                 rc = -EPERM;
1520         }
1521
1522         mutex_unlock(&imp->imp_sec_mutex);
1523 out:
1524         sptlrpc_sec_put(sec);
1525         RETURN(rc);
1526 }
1527
1528 void sptlrpc_import_sec_put(struct obd_import *imp)
1529 {
1530         if (imp->imp_sec) {
1531                 sptlrpc_sec_kill(imp->imp_sec);
1532
1533                 sptlrpc_sec_put(imp->imp_sec);
1534                 imp->imp_sec = NULL;
1535         }
1536 }
1537
1538 static void import_flush_ctx_common(struct obd_import *imp,
1539                                     uid_t uid, int grace, int force)
1540 {
1541         struct ptlrpc_sec *sec;
1542
1543         if (imp == NULL)
1544                 return;
1545
1546         sec = sptlrpc_import_sec_ref(imp);
1547         if (sec == NULL)
1548                 return;
1549
1550         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1551         sptlrpc_sec_put(sec);
1552 }
1553
1554 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1555 {
1556         /* it's important to use grace mode, see explain in
1557          * sptlrpc_req_refresh_ctx() */
1558         import_flush_ctx_common(imp, 0, 1, 1);
1559 }
1560
1561 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1562 {
1563         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1564                                 1, 1);
1565 }
1566 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1567
1568 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1569 {
1570         import_flush_ctx_common(imp, -1, 1, 1);
1571 }
1572 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1573
1574 /**
1575  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1576  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1577  */
1578 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1579 {
1580         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1581         struct ptlrpc_sec_policy *policy;
1582         int rc;
1583
1584         LASSERT(ctx);
1585         LASSERT(ctx->cc_sec);
1586         LASSERT(ctx->cc_sec->ps_policy);
1587         LASSERT(req->rq_reqmsg == NULL);
1588         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1589
1590         policy = ctx->cc_sec->ps_policy;
1591         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1592         if (!rc) {
1593                 LASSERT(req->rq_reqmsg);
1594                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1595
1596                 /* zeroing preallocated buffer */
1597                 if (req->rq_pool)
1598                         memset(req->rq_reqmsg, 0, msgsize);
1599         }
1600
1601         return rc;
1602 }
1603
1604 /**
1605  * Used by ptlrpc client to free request buffer of \a req. After this
1606  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1607  */
1608 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1609 {
1610         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1611         struct ptlrpc_sec_policy *policy;
1612
1613         LASSERT(ctx);
1614         LASSERT(ctx->cc_sec);
1615         LASSERT(ctx->cc_sec->ps_policy);
1616         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1617
1618         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1619                 return;
1620
1621         policy = ctx->cc_sec->ps_policy;
1622         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1623         req->rq_reqmsg = NULL;
1624 }
1625
1626 /*
1627  * NOTE caller must guarantee the buffer size is enough for the enlargement
1628  */
1629 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1630                                   int segment, int newsize)
1631 {
1632         void   *src, *dst;
1633         int     oldsize, oldmsg_size, movesize;
1634
1635         LASSERT(segment < msg->lm_bufcount);
1636         LASSERT(msg->lm_buflens[segment] <= newsize);
1637
1638         if (msg->lm_buflens[segment] == newsize)
1639                 return;
1640
1641         /* nothing to do if we are enlarging the last segment */
1642         if (segment == msg->lm_bufcount - 1) {
1643                 msg->lm_buflens[segment] = newsize;
1644                 return;
1645         }
1646
1647         oldsize = msg->lm_buflens[segment];
1648
1649         src = lustre_msg_buf(msg, segment + 1, 0);
1650         msg->lm_buflens[segment] = newsize;
1651         dst = lustre_msg_buf(msg, segment + 1, 0);
1652         msg->lm_buflens[segment] = oldsize;
1653
1654         /* move from segment + 1 to end segment */
1655         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1656         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1657         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1658         LASSERT(movesize >= 0);
1659
1660         if (movesize)
1661                 memmove(dst, src, movesize);
1662
1663         /* note we don't clear the ares where old data live, not secret */
1664
1665         /* finally set new segment size */
1666         msg->lm_buflens[segment] = newsize;
1667 }
1668 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1669
1670 /**
1671  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1672  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1673  * preserved after the enlargement. this must be called after original request
1674  * buffer being allocated.
1675  *
1676  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1677  * so caller should refresh its local pointers if needed.
1678  */
1679 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1680                                int segment, int newsize)
1681 {
1682         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1683         struct ptlrpc_sec_cops   *cops;
1684         struct lustre_msg        *msg = req->rq_reqmsg;
1685
1686         LASSERT(ctx);
1687         LASSERT(msg);
1688         LASSERT(msg->lm_bufcount > segment);
1689         LASSERT(msg->lm_buflens[segment] <= newsize);
1690
1691         if (msg->lm_buflens[segment] == newsize)
1692                 return 0;
1693
1694         cops = ctx->cc_sec->ps_policy->sp_cops;
1695         LASSERT(cops->enlarge_reqbuf);
1696         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1697 }
1698 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1699
1700 /**
1701  * Used by ptlrpc client to allocate reply buffer of \a req.
1702  *
1703  * \note After this, req->rq_repmsg is still not accessible.
1704  */
1705 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1706 {
1707         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1708         struct ptlrpc_sec_policy *policy;
1709         ENTRY;
1710
1711         LASSERT(ctx);
1712         LASSERT(ctx->cc_sec);
1713         LASSERT(ctx->cc_sec->ps_policy);
1714
1715         if (req->rq_repbuf)
1716                 RETURN(0);
1717
1718         policy = ctx->cc_sec->ps_policy;
1719         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1720 }
1721
1722 /**
1723  * Used by ptlrpc client to free reply buffer of \a req. After this
1724  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1725  */
1726 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1727 {
1728         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1729         struct ptlrpc_sec_policy *policy;
1730         ENTRY;
1731
1732         LASSERT(ctx);
1733         LASSERT(ctx->cc_sec);
1734         LASSERT(ctx->cc_sec->ps_policy);
1735         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1736
1737         if (req->rq_repbuf == NULL)
1738                 return;
1739         LASSERT(req->rq_repbuf_len);
1740
1741         policy = ctx->cc_sec->ps_policy;
1742         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1743         req->rq_repmsg = NULL;
1744         EXIT;
1745 }
1746
1747 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1748                                 struct ptlrpc_cli_ctx *ctx)
1749 {
1750         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1751
1752         if (!policy->sp_cops->install_rctx)
1753                 return 0;
1754         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1755 }
1756
1757 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1758                                 struct ptlrpc_svc_ctx *ctx)
1759 {
1760         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1761
1762         if (!policy->sp_sops->install_rctx)
1763                 return 0;
1764         return policy->sp_sops->install_rctx(imp, ctx);
1765 }
1766
1767 /****************************************
1768  * server side security                 *
1769  ****************************************/
1770
1771 static int flavor_allowed(struct sptlrpc_flavor *exp,
1772                           struct ptlrpc_request *req)
1773 {
1774         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1775
1776         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1777                 return 1;
1778
1779         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1780             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1781             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1782             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1783                 return 1;
1784
1785         return 0;
1786 }
1787
1788 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1789
1790 /**
1791  * Given an export \a exp, check whether the flavor of incoming \a req
1792  * is allowed by the export \a exp. Main logic is about taking care of
1793  * changing configurations. Return 0 means success.
1794  */
1795 int sptlrpc_target_export_check(struct obd_export *exp,
1796                                 struct ptlrpc_request *req)
1797 {
1798         struct sptlrpc_flavor   flavor;
1799
1800         if (exp == NULL)
1801                 return 0;
1802
1803         /* client side export has no imp_reverse, skip
1804          * FIXME maybe we should check flavor this as well??? */
1805         if (exp->exp_imp_reverse == NULL)
1806                 return 0;
1807
1808         /* don't care about ctx fini rpc */
1809         if (req->rq_ctx_fini)
1810                 return 0;
1811
1812         spin_lock(&exp->exp_lock);
1813
1814         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1815          * the first req with the new flavor, then treat it as current flavor,
1816          * adapt reverse sec according to it.
1817          * note the first rpc with new flavor might not be with root ctx, in
1818          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1819         if (unlikely(exp->exp_flvr_changed) &&
1820             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1821                 /* make the new flavor as "current", and old ones as
1822                  * about-to-expire */
1823                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1824                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1825                 flavor = exp->exp_flvr_old[1];
1826                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1827                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1828                 exp->exp_flvr_old[0] = exp->exp_flvr;
1829                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1830                                           EXP_FLVR_UPDATE_EXPIRE;
1831                 exp->exp_flvr = flavor;
1832
1833                 /* flavor change finished */
1834                 exp->exp_flvr_changed = 0;
1835                 LASSERT(exp->exp_flvr_adapt == 1);
1836
1837                 /* if it's gss, we only interested in root ctx init */
1838                 if (req->rq_auth_gss &&
1839                     !(req->rq_ctx_init &&
1840                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1841                        req->rq_auth_usr_ost))) {
1842                         spin_unlock(&exp->exp_lock);
1843                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1844                                req->rq_auth_gss, req->rq_ctx_init,
1845                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1846                                req->rq_auth_usr_ost);
1847                         return 0;
1848                 }
1849
1850                 exp->exp_flvr_adapt = 0;
1851                 spin_unlock(&exp->exp_lock);
1852
1853                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1854                                                 req->rq_svc_ctx, &flavor);
1855         }
1856
1857         /* if it equals to the current flavor, we accept it, but need to
1858          * dealing with reverse sec/ctx */
1859         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1860                 /* most cases should return here, we only interested in
1861                  * gss root ctx init */
1862                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1863                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1864                      !req->rq_auth_usr_ost)) {
1865                         spin_unlock(&exp->exp_lock);
1866                         return 0;
1867                 }
1868
1869                 /* if flavor just changed, we should not proceed, just leave
1870                  * it and current flavor will be discovered and replaced
1871                  * shortly, and let _this_ rpc pass through */
1872                 if (exp->exp_flvr_changed) {
1873                         LASSERT(exp->exp_flvr_adapt);
1874                         spin_unlock(&exp->exp_lock);
1875                         return 0;
1876                 }
1877
1878                 if (exp->exp_flvr_adapt) {
1879                         exp->exp_flvr_adapt = 0;
1880                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1881                                exp, exp->exp_flvr.sf_rpc,
1882                                exp->exp_flvr_old[0].sf_rpc,
1883                                exp->exp_flvr_old[1].sf_rpc);
1884                         flavor = exp->exp_flvr;
1885                         spin_unlock(&exp->exp_lock);
1886
1887                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1888                                                         req->rq_svc_ctx,
1889                                                         &flavor);
1890                 } else {
1891                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1892                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1893                                exp->exp_flvr_old[0].sf_rpc,
1894                                exp->exp_flvr_old[1].sf_rpc);
1895                         spin_unlock(&exp->exp_lock);
1896
1897                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1898                                                            req->rq_svc_ctx);
1899                 }
1900         }
1901
1902         if (exp->exp_flvr_expire[0]) {
1903                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1904                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1905                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1906                                        "middle one ("CFS_DURATION_T")\n", exp,
1907                                        exp->exp_flvr.sf_rpc,
1908                                        exp->exp_flvr_old[0].sf_rpc,
1909                                        exp->exp_flvr_old[1].sf_rpc,
1910                                        exp->exp_flvr_expire[0] -
1911                                                 cfs_time_current_sec());
1912                                 spin_unlock(&exp->exp_lock);
1913                                 return 0;
1914                         }
1915                 } else {
1916                         CDEBUG(D_SEC, "mark middle expired\n");
1917                         exp->exp_flvr_expire[0] = 0;
1918                 }
1919                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1920                        exp->exp_flvr.sf_rpc,
1921                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1922                        req->rq_flvr.sf_rpc);
1923         }
1924
1925         /* now it doesn't match the current flavor, the only chance we can
1926          * accept it is match the old flavors which is not expired. */
1927         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1928                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1929                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1930                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1931                                        "oldest one ("CFS_DURATION_T")\n", exp,
1932                                        exp->exp_flvr.sf_rpc,
1933                                        exp->exp_flvr_old[0].sf_rpc,
1934                                        exp->exp_flvr_old[1].sf_rpc,
1935                                        exp->exp_flvr_expire[1] -
1936                                                 cfs_time_current_sec());
1937                                 spin_unlock(&exp->exp_lock);
1938                                 return 0;
1939                         }
1940                 } else {
1941                         CDEBUG(D_SEC, "mark oldest expired\n");
1942                         exp->exp_flvr_expire[1] = 0;
1943                 }
1944                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1945                        exp, exp->exp_flvr.sf_rpc,
1946                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1947                        req->rq_flvr.sf_rpc);
1948         } else {
1949                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1950                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1951                        exp->exp_flvr_old[1].sf_rpc);
1952         }
1953
1954         spin_unlock(&exp->exp_lock);
1955
1956         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1957               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1958               exp, exp->exp_obd->obd_name,
1959               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1960               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1961               req->rq_flvr.sf_rpc,
1962               exp->exp_flvr.sf_rpc,
1963               exp->exp_flvr_old[0].sf_rpc,
1964               exp->exp_flvr_expire[0] ?
1965               (unsigned long) (exp->exp_flvr_expire[0] -
1966                                cfs_time_current_sec()) : 0,
1967               exp->exp_flvr_old[1].sf_rpc,
1968               exp->exp_flvr_expire[1] ?
1969               (unsigned long) (exp->exp_flvr_expire[1] -
1970                                cfs_time_current_sec()) : 0);
1971         return -EACCES;
1972 }
1973 EXPORT_SYMBOL(sptlrpc_target_export_check);
1974
1975 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1976                                       struct sptlrpc_rule_set *rset)
1977 {
1978         struct obd_export       *exp;
1979         struct sptlrpc_flavor    new_flvr;
1980
1981         LASSERT(obd);
1982
1983         spin_lock(&obd->obd_dev_lock);
1984
1985         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1986                 if (exp->exp_connection == NULL)
1987                         continue;
1988
1989                 /* note if this export had just been updated flavor
1990                  * (exp_flvr_changed == 1), this will override the
1991                  * previous one. */
1992                 spin_lock(&exp->exp_lock);
1993                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1994                                              exp->exp_connection->c_peer.nid,
1995                                              &new_flvr);
1996                 if (exp->exp_flvr_changed ||
1997                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1998                         exp->exp_flvr_old[1] = new_flvr;
1999                         exp->exp_flvr_expire[1] = 0;
2000                         exp->exp_flvr_changed = 1;
2001                         exp->exp_flvr_adapt = 1;
2002
2003                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2004                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2005                                exp->exp_flvr.sf_rpc,
2006                                exp->exp_flvr_old[1].sf_rpc);
2007                 }
2008                 spin_unlock(&exp->exp_lock);
2009         }
2010
2011         spin_unlock(&obd->obd_dev_lock);
2012 }
2013 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2014
2015 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2016 {
2017         /* peer's claim is unreliable unless gss is being used */
2018         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2019                 return svc_rc;
2020
2021         switch (req->rq_sp_from) {
2022         case LUSTRE_SP_CLI:
2023                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2024                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2025                         svc_rc = SECSVC_DROP;
2026                 }
2027                 break;
2028         case LUSTRE_SP_MDT:
2029                 if (!req->rq_auth_usr_mdt) {
2030                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2031                         svc_rc = SECSVC_DROP;
2032                 }
2033                 break;
2034         case LUSTRE_SP_OST:
2035                 if (!req->rq_auth_usr_ost) {
2036                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2037                         svc_rc = SECSVC_DROP;
2038                 }
2039                 break;
2040         case LUSTRE_SP_MGS:
2041         case LUSTRE_SP_MGC:
2042                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2043                     !req->rq_auth_usr_ost) {
2044                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2045                         svc_rc = SECSVC_DROP;
2046                 }
2047                 break;
2048         case LUSTRE_SP_ANY:
2049         default:
2050                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2051                 svc_rc = SECSVC_DROP;
2052         }
2053
2054         return svc_rc;
2055 }
2056
2057 /**
2058  * Used by ptlrpc server, to perform transformation upon request message of
2059  * incoming \a req. This must be the first thing to do with an incoming
2060  * request in ptlrpc layer.
2061  *
2062  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2063  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2064  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2065  * reply message has been prepared.
2066  * \retval SECSVC_DROP failed, this request should be dropped.
2067  */
2068 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2069 {
2070         struct ptlrpc_sec_policy *policy;
2071         struct lustre_msg        *msg = req->rq_reqbuf;
2072         int                       rc;
2073         ENTRY;
2074
2075         LASSERT(msg);
2076         LASSERT(req->rq_reqmsg == NULL);
2077         LASSERT(req->rq_repmsg == NULL);
2078         LASSERT(req->rq_svc_ctx == NULL);
2079
2080         req->rq_req_swab_mask = 0;
2081
2082         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2083         switch (rc) {
2084         case 1:
2085                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2086         case 0:
2087                 break;
2088         default:
2089                 CERROR("error unpacking request from %s x"LPU64"\n",
2090                        libcfs_id2str(req->rq_peer), req->rq_xid);
2091                 RETURN(SECSVC_DROP);
2092         }
2093
2094         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2095         req->rq_sp_from = LUSTRE_SP_ANY;
2096         req->rq_auth_uid = -1;          /* set to INVALID_UID */
2097         req->rq_auth_mapped_uid = -1;
2098
2099         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2100         if (!policy) {
2101                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2102                 RETURN(SECSVC_DROP);
2103         }
2104
2105         LASSERT(policy->sp_sops->accept);
2106         rc = policy->sp_sops->accept(req);
2107         sptlrpc_policy_put(policy);
2108         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2109         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2110
2111         /*
2112          * if it's not null flavor (which means embedded packing msg),
2113          * reset the swab mask for the comming inner msg unpacking.
2114          */
2115         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2116                 req->rq_req_swab_mask = 0;
2117
2118         /* sanity check for the request source */
2119         rc = sptlrpc_svc_check_from(req, rc);
2120         RETURN(rc);
2121 }
2122
2123 /**
2124  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2125  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2126  * a buffer of \a msglen size.
2127  */
2128 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2129 {
2130         struct ptlrpc_sec_policy *policy;
2131         struct ptlrpc_reply_state *rs;
2132         int rc;
2133         ENTRY;
2134
2135         LASSERT(req->rq_svc_ctx);
2136         LASSERT(req->rq_svc_ctx->sc_policy);
2137
2138         policy = req->rq_svc_ctx->sc_policy;
2139         LASSERT(policy->sp_sops->alloc_rs);
2140
2141         rc = policy->sp_sops->alloc_rs(req, msglen);
2142         if (unlikely(rc == -ENOMEM)) {
2143                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2144                 if (svcpt->scp_service->srv_max_reply_size <
2145                    msglen + sizeof(struct ptlrpc_reply_state)) {
2146                         /* Just return failure if the size is too big */
2147                         CERROR("size of message is too big (%zd), %d allowed\n",
2148                                 msglen + sizeof(struct ptlrpc_reply_state),
2149                                 svcpt->scp_service->srv_max_reply_size);
2150                         RETURN(-ENOMEM);
2151                 }
2152
2153                 /* failed alloc, try emergency pool */
2154                 rs = lustre_get_emerg_rs(svcpt);
2155                 if (rs == NULL)
2156                         RETURN(-ENOMEM);
2157
2158                 req->rq_reply_state = rs;
2159                 rc = policy->sp_sops->alloc_rs(req, msglen);
2160                 if (rc) {
2161                         lustre_put_emerg_rs(rs);
2162                         req->rq_reply_state = NULL;
2163                 }
2164         }
2165
2166         LASSERT(rc != 0 ||
2167                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2168
2169         RETURN(rc);
2170 }
2171
2172 /**
2173  * Used by ptlrpc server, to perform transformation upon reply message.
2174  *
2175  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2176  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2177  */
2178 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2179 {
2180         struct ptlrpc_sec_policy *policy;
2181         int rc;
2182         ENTRY;
2183
2184         LASSERT(req->rq_svc_ctx);
2185         LASSERT(req->rq_svc_ctx->sc_policy);
2186
2187         policy = req->rq_svc_ctx->sc_policy;
2188         LASSERT(policy->sp_sops->authorize);
2189
2190         rc = policy->sp_sops->authorize(req);
2191         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2192
2193         RETURN(rc);
2194 }
2195
2196 /**
2197  * Used by ptlrpc server, to free reply_state.
2198  */
2199 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2200 {
2201         struct ptlrpc_sec_policy *policy;
2202         unsigned int prealloc;
2203         ENTRY;
2204
2205         LASSERT(rs->rs_svc_ctx);
2206         LASSERT(rs->rs_svc_ctx->sc_policy);
2207
2208         policy = rs->rs_svc_ctx->sc_policy;
2209         LASSERT(policy->sp_sops->free_rs);
2210
2211         prealloc = rs->rs_prealloc;
2212         policy->sp_sops->free_rs(rs);
2213
2214         if (prealloc)
2215                 lustre_put_emerg_rs(rs);
2216         EXIT;
2217 }
2218
2219 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2220 {
2221         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2222
2223         if (ctx != NULL)
2224                 atomic_inc(&ctx->sc_refcount);
2225 }
2226
2227 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2228 {
2229         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2230
2231         if (ctx == NULL)
2232                 return;
2233
2234         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2235         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2236                 if (ctx->sc_policy->sp_sops->free_ctx)
2237                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2238         }
2239         req->rq_svc_ctx = NULL;
2240 }
2241
2242 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2243 {
2244         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2245
2246         if (ctx == NULL)
2247                 return;
2248
2249         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2250         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2251                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2252 }
2253 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2254
2255 /****************************************
2256  * bulk security                        *
2257  ****************************************/
2258
2259 /**
2260  * Perform transformation upon bulk data pointed by \a desc. This is called
2261  * before transforming the request message.
2262  */
2263 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2264                           struct ptlrpc_bulk_desc *desc)
2265 {
2266         struct ptlrpc_cli_ctx *ctx;
2267
2268         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2269
2270         if (!req->rq_pack_bulk)
2271                 return 0;
2272
2273         ctx = req->rq_cli_ctx;
2274         if (ctx->cc_ops->wrap_bulk)
2275                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2276         return 0;
2277 }
2278 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2279
2280 /**
2281  * This is called after unwrap the reply message.
2282  * return nob of actual plain text size received, or error code.
2283  */
2284 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2285                                  struct ptlrpc_bulk_desc *desc,
2286                                  int nob)
2287 {
2288         struct ptlrpc_cli_ctx  *ctx;
2289         int                     rc;
2290
2291         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2292
2293         if (!req->rq_pack_bulk)
2294                 return desc->bd_nob_transferred;
2295
2296         ctx = req->rq_cli_ctx;
2297         if (ctx->cc_ops->unwrap_bulk) {
2298                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2299                 if (rc < 0)
2300                         return rc;
2301         }
2302         return desc->bd_nob_transferred;
2303 }
2304 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2305
2306 /**
2307  * This is called after unwrap the reply message.
2308  * return 0 for success or error code.
2309  */
2310 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2311                                   struct ptlrpc_bulk_desc *desc)
2312 {
2313         struct ptlrpc_cli_ctx  *ctx;
2314         int                     rc;
2315
2316         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2317
2318         if (!req->rq_pack_bulk)
2319                 return 0;
2320
2321         ctx = req->rq_cli_ctx;
2322         if (ctx->cc_ops->unwrap_bulk) {
2323                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2324                 if (rc < 0)
2325                         return rc;
2326         }
2327
2328         /*
2329          * if everything is going right, nob should equals to nob_transferred.
2330          * in case of privacy mode, nob_transferred needs to be adjusted.
2331          */
2332         if (desc->bd_nob != desc->bd_nob_transferred) {
2333                 CERROR("nob %d doesn't match transferred nob %d\n",
2334                        desc->bd_nob, desc->bd_nob_transferred);
2335                 return -EPROTO;
2336         }
2337
2338         return 0;
2339 }
2340 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2341
2342 #ifdef HAVE_SERVER_SUPPORT
2343 /**
2344  * Performe transformation upon outgoing bulk read.
2345  */
2346 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2347                           struct ptlrpc_bulk_desc *desc)
2348 {
2349         struct ptlrpc_svc_ctx *ctx;
2350
2351         LASSERT(req->rq_bulk_read);
2352
2353         if (!req->rq_pack_bulk)
2354                 return 0;
2355
2356         ctx = req->rq_svc_ctx;
2357         if (ctx->sc_policy->sp_sops->wrap_bulk)
2358                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2359
2360         return 0;
2361 }
2362 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2363
2364 /**
2365  * Performe transformation upon incoming bulk write.
2366  */
2367 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2368                             struct ptlrpc_bulk_desc *desc)
2369 {
2370         struct ptlrpc_svc_ctx *ctx;
2371         int                    rc;
2372
2373         LASSERT(req->rq_bulk_write);
2374
2375         /*
2376          * if it's in privacy mode, transferred should >= expected; otherwise
2377          * transferred should == expected.
2378          */
2379         if (desc->bd_nob_transferred < desc->bd_nob ||
2380             (desc->bd_nob_transferred > desc->bd_nob &&
2381              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2382              SPTLRPC_BULK_SVC_PRIV)) {
2383                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2384                           desc->bd_nob_transferred, desc->bd_nob);
2385                 return -ETIMEDOUT;
2386         }
2387
2388         if (!req->rq_pack_bulk)
2389                 return 0;
2390
2391         ctx = req->rq_svc_ctx;
2392         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2393                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2394                 if (rc)
2395                         CERROR("error unwrap bulk: %d\n", rc);
2396         }
2397
2398         /* return 0 to allow reply be sent */
2399         return 0;
2400 }
2401 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2402
2403 /**
2404  * Prepare buffers for incoming bulk write.
2405  */
2406 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2407                           struct ptlrpc_bulk_desc *desc)
2408 {
2409         struct ptlrpc_svc_ctx *ctx;
2410
2411         LASSERT(req->rq_bulk_write);
2412
2413         if (!req->rq_pack_bulk)
2414                 return 0;
2415
2416         ctx = req->rq_svc_ctx;
2417         if (ctx->sc_policy->sp_sops->prep_bulk)
2418                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2419
2420         return 0;
2421 }
2422 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2423
2424 #endif /* HAVE_SERVER_SUPPORT */
2425
2426 /****************************************
2427  * user descriptor helpers              *
2428  ****************************************/
2429
2430 int sptlrpc_current_user_desc_size(void)
2431 {
2432         int ngroups;
2433
2434         ngroups = current_ngroups;
2435
2436         if (ngroups > LUSTRE_MAX_GROUPS)
2437                 ngroups = LUSTRE_MAX_GROUPS;
2438         return sptlrpc_user_desc_size(ngroups);
2439 }
2440 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2441
2442 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2443 {
2444         struct ptlrpc_user_desc *pud;
2445
2446         pud = lustre_msg_buf(msg, offset, 0);
2447
2448         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2449         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2450         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2451         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2452         pud->pud_cap = cfs_curproc_cap_pack();
2453         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2454
2455         task_lock(current);
2456         if (pud->pud_ngroups > current_ngroups)
2457                 pud->pud_ngroups = current_ngroups;
2458         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2459                pud->pud_ngroups * sizeof(__u32));
2460         task_unlock(current);
2461
2462         return 0;
2463 }
2464 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2465
2466 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2467 {
2468         struct ptlrpc_user_desc *pud;
2469         int                      i;
2470
2471         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2472         if (!pud)
2473                 return -EINVAL;
2474
2475         if (swabbed) {
2476                 __swab32s(&pud->pud_uid);
2477                 __swab32s(&pud->pud_gid);
2478                 __swab32s(&pud->pud_fsuid);
2479                 __swab32s(&pud->pud_fsgid);
2480                 __swab32s(&pud->pud_cap);
2481                 __swab32s(&pud->pud_ngroups);
2482         }
2483
2484         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2485                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2486                 return -EINVAL;
2487         }
2488
2489         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2490             msg->lm_buflens[offset]) {
2491                 CERROR("%u groups are claimed but bufsize only %u\n",
2492                        pud->pud_ngroups, msg->lm_buflens[offset]);
2493                 return -EINVAL;
2494         }
2495
2496         if (swabbed) {
2497                 for (i = 0; i < pud->pud_ngroups; i++)
2498                         __swab32s(&pud->pud_groups[i]);
2499         }
2500
2501         return 0;
2502 }
2503 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2504
2505 /****************************************
2506  * misc helpers                         *
2507  ****************************************/
2508
2509 const char * sec2target_str(struct ptlrpc_sec *sec)
2510 {
2511         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2512                 return "*";
2513         if (sec_is_reverse(sec))
2514                 return "c";
2515         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2516 }
2517 EXPORT_SYMBOL(sec2target_str);
2518
2519 /*
2520  * return true if the bulk data is protected
2521  */
2522 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2523 {
2524         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2525         case SPTLRPC_BULK_SVC_INTG:
2526         case SPTLRPC_BULK_SVC_PRIV:
2527                 return 1;
2528         default:
2529                 return 0;
2530         }
2531 }
2532 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2533
2534 /****************************************
2535  * crypto API helper/alloc blkciper     *
2536  ****************************************/
2537
2538 /****************************************
2539  * initialize/finalize                  *
2540  ****************************************/
2541
2542 int sptlrpc_init(void)
2543 {
2544         int rc;
2545
2546         rwlock_init(&policy_lock);
2547
2548         rc = sptlrpc_gc_init();
2549         if (rc)
2550                 goto out;
2551
2552         rc = sptlrpc_conf_init();
2553         if (rc)
2554                 goto out_gc;
2555
2556         rc = sptlrpc_enc_pool_init();
2557         if (rc)
2558                 goto out_conf;
2559
2560         rc = sptlrpc_null_init();
2561         if (rc)
2562                 goto out_pool;
2563
2564         rc = sptlrpc_plain_init();
2565         if (rc)
2566                 goto out_null;
2567
2568         rc = sptlrpc_lproc_init();
2569         if (rc)
2570                 goto out_plain;
2571
2572         return 0;
2573
2574 out_plain:
2575         sptlrpc_plain_fini();
2576 out_null:
2577         sptlrpc_null_fini();
2578 out_pool:
2579         sptlrpc_enc_pool_fini();
2580 out_conf:
2581         sptlrpc_conf_fini();
2582 out_gc:
2583         sptlrpc_gc_fini();
2584 out:
2585         return rc;
2586 }
2587
2588 void sptlrpc_fini(void)
2589 {
2590         sptlrpc_lproc_fini();
2591         sptlrpc_plain_fini();
2592         sptlrpc_null_fini();
2593         sptlrpc_enc_pool_fini();
2594         sptlrpc_conf_fini();
2595         sptlrpc_gc_fini();
2596 }