Whamcloud - gitweb
LU-13216 ptlrpc: sptlrpc_req_refresh_ctx's timeout semantic
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #include <linux/uidgid.h>
41 #include <linux/crypto.h>
42 #include <linux/key.h>
43
44 #include <libcfs/libcfs.h>
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_dlm.h>
51 #include <lustre_sec.h>
52
53 #include "ptlrpc_internal.h"
54
55 static int send_sepol;
56 module_param(send_sepol, int, 0644);
57 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
58
59 /*
60  * policy registers
61  */
62
63 static rwlock_t policy_lock;
64 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
65         NULL,
66 };
67
68 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
69 {
70         __u16 number = policy->sp_policy;
71
72         LASSERT(policy->sp_name);
73         LASSERT(policy->sp_cops);
74         LASSERT(policy->sp_sops);
75
76         if (number >= SPTLRPC_POLICY_MAX)
77                 return -EINVAL;
78
79         write_lock(&policy_lock);
80         if (unlikely(policies[number])) {
81                 write_unlock(&policy_lock);
82                 return -EALREADY;
83         }
84         policies[number] = policy;
85         write_unlock(&policy_lock);
86
87         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
88         return 0;
89 }
90 EXPORT_SYMBOL(sptlrpc_register_policy);
91
92 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
93 {
94         __u16 number = policy->sp_policy;
95
96         LASSERT(number < SPTLRPC_POLICY_MAX);
97
98         write_lock(&policy_lock);
99         if (unlikely(policies[number] == NULL)) {
100                 write_unlock(&policy_lock);
101                 CERROR("%s: already unregistered\n", policy->sp_name);
102                 return -EINVAL;
103         }
104
105         LASSERT(policies[number] == policy);
106         policies[number] = NULL;
107         write_unlock(&policy_lock);
108
109         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
110         return 0;
111 }
112 EXPORT_SYMBOL(sptlrpc_unregister_policy);
113
114 static
115 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
116 {
117         static DEFINE_MUTEX(load_mutex);
118         static atomic_t           loaded = ATOMIC_INIT(0);
119         struct ptlrpc_sec_policy *policy;
120         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
121         __u16                     flag = 0;
122
123         if (number >= SPTLRPC_POLICY_MAX)
124                 return NULL;
125
126         while (1) {
127                 read_lock(&policy_lock);
128                 policy = policies[number];
129                 if (policy && !try_module_get(policy->sp_owner))
130                         policy = NULL;
131                 if (policy == NULL)
132                         flag = atomic_read(&loaded);
133                 read_unlock(&policy_lock);
134
135                 if (policy != NULL || flag != 0 ||
136                     number != SPTLRPC_POLICY_GSS)
137                         break;
138
139                 /* try to load gss module, once */
140                 mutex_lock(&load_mutex);
141                 if (atomic_read(&loaded) == 0) {
142                         if (request_module("ptlrpc_gss") == 0)
143                                 CDEBUG(D_SEC,
144                                        "module ptlrpc_gss loaded on demand\n");
145                         else
146                                 CERROR("Unable to load module ptlrpc_gss\n");
147
148                         atomic_set(&loaded, 1);
149                 }
150                 mutex_unlock(&load_mutex);
151         }
152
153         return policy;
154 }
155
156 __u32 sptlrpc_name2flavor_base(const char *name)
157 {
158         if (!strcmp(name, "null"))
159                 return SPTLRPC_FLVR_NULL;
160         if (!strcmp(name, "plain"))
161                 return SPTLRPC_FLVR_PLAIN;
162         if (!strcmp(name, "gssnull"))
163                 return SPTLRPC_FLVR_GSSNULL;
164         if (!strcmp(name, "krb5n"))
165                 return SPTLRPC_FLVR_KRB5N;
166         if (!strcmp(name, "krb5a"))
167                 return SPTLRPC_FLVR_KRB5A;
168         if (!strcmp(name, "krb5i"))
169                 return SPTLRPC_FLVR_KRB5I;
170         if (!strcmp(name, "krb5p"))
171                 return SPTLRPC_FLVR_KRB5P;
172         if (!strcmp(name, "skn"))
173                 return SPTLRPC_FLVR_SKN;
174         if (!strcmp(name, "ska"))
175                 return SPTLRPC_FLVR_SKA;
176         if (!strcmp(name, "ski"))
177                 return SPTLRPC_FLVR_SKI;
178         if (!strcmp(name, "skpi"))
179                 return SPTLRPC_FLVR_SKPI;
180
181         return SPTLRPC_FLVR_INVALID;
182 }
183 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
184
185 const char *sptlrpc_flavor2name_base(__u32 flvr)
186 {
187         __u32   base = SPTLRPC_FLVR_BASE(flvr);
188
189         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
190                 return "null";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
192                 return "plain";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
194                 return "gssnull";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
196                 return "krb5n";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
198                 return "krb5a";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
200                 return "krb5i";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
202                 return "krb5p";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
204                 return "skn";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
206                 return "ska";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
208                 return "ski";
209         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
210                 return "skpi";
211
212         CERROR("invalid wire flavor 0x%x\n", flvr);
213         return "invalid";
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
216
217 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
218                                char *buf, int bufsize)
219 {
220         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
221                 snprintf(buf, bufsize, "hash:%s",
222                         sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
223         else
224                 snprintf(buf, bufsize, "%s",
225                         sptlrpc_flavor2name_base(sf->sf_rpc));
226
227         buf[bufsize - 1] = '\0';
228         return buf;
229 }
230 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
231
232 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
233 {
234         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
235
236         /*
237          * currently we don't support customized bulk specification for
238          * flavors other than plain
239          */
240         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
241                 char bspec[16];
242
243                 bspec[0] = '-';
244                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
245                 strncat(buf, bspec, bufsize);
246         }
247
248         buf[bufsize - 1] = '\0';
249         return buf;
250 }
251 EXPORT_SYMBOL(sptlrpc_flavor2name);
252
253 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
254 {
255         buf[0] = '\0';
256
257         if (flags & PTLRPC_SEC_FL_REVERSE)
258                 strlcat(buf, "reverse,", bufsize);
259         if (flags & PTLRPC_SEC_FL_ROOTONLY)
260                 strlcat(buf, "rootonly,", bufsize);
261         if (flags & PTLRPC_SEC_FL_UDESC)
262                 strlcat(buf, "udesc,", bufsize);
263         if (flags & PTLRPC_SEC_FL_BULK)
264                 strlcat(buf, "bulk,", bufsize);
265         if (buf[0] == '\0')
266                 strlcat(buf, "-,", bufsize);
267
268         return buf;
269 }
270 EXPORT_SYMBOL(sptlrpc_secflags2str);
271
272 /*
273  * client context APIs
274  */
275
276 static
277 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
278 {
279         struct vfs_cred vcred;
280         int create = 1, remove_dead = 1;
281
282         LASSERT(sec);
283         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
284
285         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
286                                      PTLRPC_SEC_FL_ROOTONLY)) {
287                 vcred.vc_uid = 0;
288                 vcred.vc_gid = 0;
289                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
290                         create = 0;
291                         remove_dead = 0;
292                 }
293         } else {
294                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
295                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
296         }
297
298         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
299                                                    remove_dead);
300 }
301
302 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
303 {
304         atomic_inc(&ctx->cc_refcount);
305         return ctx;
306 }
307 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
308
309 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
310 {
311         struct ptlrpc_sec *sec = ctx->cc_sec;
312
313         LASSERT(sec);
314         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
315
316         if (!atomic_dec_and_test(&ctx->cc_refcount))
317                 return;
318
319         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
320 }
321 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
322
323 /**
324  * Expire the client context immediately.
325  *
326  * \pre Caller must hold at least 1 reference on the \a ctx.
327  */
328 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
329 {
330         LASSERT(ctx->cc_ops->die);
331         ctx->cc_ops->die(ctx, 0);
332 }
333 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
334
335 /**
336  * To wake up the threads who are waiting for this client context. Called
337  * after some status change happened on \a ctx.
338  */
339 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
340 {
341         struct ptlrpc_request *req, *next;
342
343         spin_lock(&ctx->cc_lock);
344         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
345                                      rq_ctx_chain) {
346                 list_del_init(&req->rq_ctx_chain);
347                 ptlrpc_client_wake_req(req);
348         }
349         spin_unlock(&ctx->cc_lock);
350 }
351 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
352
353 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
354 {
355         LASSERT(ctx->cc_ops);
356
357         if (ctx->cc_ops->display == NULL)
358                 return 0;
359
360         return ctx->cc_ops->display(ctx, buf, bufsize);
361 }
362
363 static int import_sec_check_expire(struct obd_import *imp)
364 {
365         int adapt = 0;
366
367         write_lock(&imp->imp_sec_lock);
368         if (imp->imp_sec_expire &&
369             imp->imp_sec_expire < ktime_get_real_seconds()) {
370                 adapt = 1;
371                 imp->imp_sec_expire = 0;
372         }
373         write_unlock(&imp->imp_sec_lock);
374
375         if (!adapt)
376                 return 0;
377
378         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
379         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
380 }
381
382 /**
383  * Get and validate the client side ptlrpc security facilities from
384  * \a imp. There is a race condition on client reconnect when the import is
385  * being destroyed while there are outstanding client bound requests. In
386  * this case do not output any error messages if import secuity is not
387  * found.
388  *
389  * \param[in] imp obd import associated with client
390  * \param[out] sec client side ptlrpc security
391  *
392  * \retval 0 if security retrieved successfully
393  * \retval -ve errno if there was a problem
394  */
395 static int import_sec_validate_get(struct obd_import *imp,
396                                    struct ptlrpc_sec **sec)
397 {
398         int rc;
399
400         if (unlikely(imp->imp_sec_expire)) {
401                 rc = import_sec_check_expire(imp);
402                 if (rc)
403                         return rc;
404         }
405
406         *sec = sptlrpc_import_sec_ref(imp);
407         if (*sec == NULL) {
408                 CERROR("import %p (%s) with no sec\n",
409                         imp, ptlrpc_import_state_name(imp->imp_state));
410                 return -EACCES;
411         }
412
413         if (unlikely((*sec)->ps_dying)) {
414                 CERROR("attempt to use dying sec %p\n", sec);
415                 sptlrpc_sec_put(*sec);
416                 return -EACCES;
417         }
418
419         return 0;
420 }
421
422 /**
423  * Given a \a req, find or allocate an appropriate context for it.
424  * \pre req->rq_cli_ctx == NULL.
425  *
426  * \retval 0 succeed, and req->rq_cli_ctx is set.
427  * \retval -ev error number, and req->rq_cli_ctx == NULL.
428  */
429 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
430 {
431         struct obd_import *imp = req->rq_import;
432         struct ptlrpc_sec *sec;
433         int rc;
434
435         ENTRY;
436
437         LASSERT(!req->rq_cli_ctx);
438         LASSERT(imp);
439
440         rc = import_sec_validate_get(imp, &sec);
441         if (rc)
442                 RETURN(rc);
443
444         req->rq_cli_ctx = get_my_ctx(sec);
445
446         sptlrpc_sec_put(sec);
447
448         if (!req->rq_cli_ctx) {
449                 CERROR("req %p: fail to get context\n", req);
450                 RETURN(-ECONNREFUSED);
451         }
452
453         RETURN(0);
454 }
455
456 /**
457  * Drop the context for \a req.
458  * \pre req->rq_cli_ctx != NULL.
459  * \post req->rq_cli_ctx == NULL.
460  *
461  * If \a sync == 0, this function should return quickly without sleep;
462  * otherwise it might trigger and wait for the whole process of sending
463  * an context-destroying rpc to server.
464  */
465 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
466 {
467         ENTRY;
468
469         LASSERT(req);
470         LASSERT(req->rq_cli_ctx);
471
472         /*
473          * request might be asked to release earlier while still
474          * in the context waiting list.
475          */
476         if (!list_empty(&req->rq_ctx_chain)) {
477                 spin_lock(&req->rq_cli_ctx->cc_lock);
478                 list_del_init(&req->rq_ctx_chain);
479                 spin_unlock(&req->rq_cli_ctx->cc_lock);
480         }
481
482         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
483         req->rq_cli_ctx = NULL;
484         EXIT;
485 }
486
487 static
488 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
489                            struct ptlrpc_cli_ctx *oldctx,
490                            struct ptlrpc_cli_ctx *newctx)
491 {
492         struct sptlrpc_flavor   old_flvr;
493         char *reqmsg = NULL; /* to workaround old gcc */
494         int reqmsg_size;
495         int rc = 0;
496
497         LASSERT(req->rq_reqmsg);
498         LASSERT(req->rq_reqlen);
499         LASSERT(req->rq_replen);
500
501         CDEBUG(D_SEC,
502                "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
503                req, oldctx, oldctx->cc_vcred.vc_uid,
504                sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
505                sec2target_str(newctx->cc_sec), oldctx->cc_sec,
506                oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
507                newctx->cc_sec->ps_policy->sp_name);
508
509         /* save flavor */
510         old_flvr = req->rq_flvr;
511
512         /* save request message */
513         reqmsg_size = req->rq_reqlen;
514         if (reqmsg_size != 0) {
515                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
516                 if (reqmsg == NULL)
517                         return -ENOMEM;
518                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
519         }
520
521         /* release old req/rep buf */
522         req->rq_cli_ctx = oldctx;
523         sptlrpc_cli_free_reqbuf(req);
524         sptlrpc_cli_free_repbuf(req);
525         req->rq_cli_ctx = newctx;
526
527         /* recalculate the flavor */
528         sptlrpc_req_set_flavor(req, 0);
529
530         /*
531          * alloc new request buffer
532          * we don't need to alloc reply buffer here, leave it to the
533          * rest procedure of ptlrpc
534          */
535         if (reqmsg_size != 0) {
536                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
537                 if (!rc) {
538                         LASSERT(req->rq_reqmsg);
539                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
540                 } else {
541                         CWARN("failed to alloc reqbuf: %d\n", rc);
542                         req->rq_flvr = old_flvr;
543                 }
544
545                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
546         }
547         return rc;
548 }
549
550 /**
551  * If current context of \a req is dead somehow, e.g. we just switched flavor
552  * thus marked original contexts dead, we'll find a new context for it. if
553  * no switch is needed, \a req will end up with the same context.
554  *
555  * \note a request must have a context, to keep other parts of code happy.
556  * In any case of failure during the switching, we must restore the old one.
557  */
558 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
559 {
560         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
561         struct ptlrpc_cli_ctx *newctx;
562         int rc;
563
564         ENTRY;
565
566         LASSERT(oldctx);
567
568         sptlrpc_cli_ctx_get(oldctx);
569         sptlrpc_req_put_ctx(req, 0);
570
571         rc = sptlrpc_req_get_ctx(req);
572         if (unlikely(rc)) {
573                 LASSERT(!req->rq_cli_ctx);
574
575                 /* restore old ctx */
576                 req->rq_cli_ctx = oldctx;
577                 RETURN(rc);
578         }
579
580         newctx = req->rq_cli_ctx;
581         LASSERT(newctx);
582
583         if (unlikely(newctx == oldctx &&
584                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
585                 /*
586                  * still get the old dead ctx, usually means system too busy
587                  */
588                 CDEBUG(D_SEC,
589                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
590                        newctx, newctx->cc_flags);
591
592                 schedule_timeout_interruptible(cfs_time_seconds(1));
593         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
594                             == 0)) {
595                 /*
596                  * new ctx not up to date yet
597                  */
598                 CDEBUG(D_SEC,
599                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
600                        newctx, newctx->cc_flags);
601         } else {
602                 /*
603                  * it's possible newctx == oldctx if we're switching
604                  * subflavor with the same sec.
605                  */
606                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
607                 if (rc) {
608                         /* restore old ctx */
609                         sptlrpc_req_put_ctx(req, 0);
610                         req->rq_cli_ctx = oldctx;
611                         RETURN(rc);
612                 }
613
614                 LASSERT(req->rq_cli_ctx == newctx);
615         }
616
617         sptlrpc_cli_ctx_put(oldctx, 1);
618         RETURN(0);
619 }
620 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
621
622 static
623 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
624 {
625         if (cli_ctx_is_refreshed(ctx))
626                 return 1;
627         return 0;
628 }
629
630 static
631 void ctx_refresh_interrupt(struct ptlrpc_request *req)
632 {
633
634         spin_lock(&req->rq_lock);
635         req->rq_intr = 1;
636         spin_unlock(&req->rq_lock);
637 }
638
639 static
640 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
641 {
642         spin_lock(&ctx->cc_lock);
643         if (!list_empty(&req->rq_ctx_chain))
644                 list_del_init(&req->rq_ctx_chain);
645         spin_unlock(&ctx->cc_lock);
646 }
647
648 /**
649  * To refresh the context of \req, if it's not up-to-date.
650  * \param timeout
651  * - == 0: do not wait
652  * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
653  * - > 0: not supported
654  *
655  * The status of the context could be subject to be changed by other threads
656  * at any time. We allow this race, but once we return with 0, the caller will
657  * suppose it's uptodated and keep using it until the owning rpc is done.
658  *
659  * \retval 0 only if the context is uptodated.
660  * \retval -ev error number.
661  */
662 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
663 {
664         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
665         struct ptlrpc_sec *sec;
666         int rc;
667
668         ENTRY;
669
670         LASSERT(ctx);
671
672         if (req->rq_ctx_init || req->rq_ctx_fini)
673                 RETURN(0);
674
675         if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
676                 CERROR("req %p: invalid timeout %lu\n", req, timeout);
677                 RETURN(-EINVAL);
678         }
679
680         /*
681          * during the process a request's context might change type even
682          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
683          * everything
684          */
685 again:
686         rc = import_sec_validate_get(req->rq_import, &sec);
687         if (rc)
688                 RETURN(rc);
689
690         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
691                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
692                        req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
693                 req_off_ctx_list(req, ctx);
694                 sptlrpc_req_replace_dead_ctx(req);
695                 ctx = req->rq_cli_ctx;
696         }
697         sptlrpc_sec_put(sec);
698
699         if (cli_ctx_is_eternal(ctx))
700                 RETURN(0);
701
702         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
703                 LASSERT(ctx->cc_ops->refresh);
704                 ctx->cc_ops->refresh(ctx);
705         }
706         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
707
708         LASSERT(ctx->cc_ops->validate);
709         if (ctx->cc_ops->validate(ctx) == 0) {
710                 req_off_ctx_list(req, ctx);
711                 RETURN(0);
712         }
713
714         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
715                 spin_lock(&req->rq_lock);
716                 req->rq_err = 1;
717                 spin_unlock(&req->rq_lock);
718                 req_off_ctx_list(req, ctx);
719                 RETURN(-EPERM);
720         }
721
722         /*
723          * There's a subtle issue for resending RPCs, suppose following
724          * situation:
725          *  1. the request was sent to server.
726          *  2. recovery was kicked start, after finished the request was
727          *     marked as resent.
728          *  3. resend the request.
729          *  4. old reply from server received, we accept and verify the reply.
730          *     this has to be success, otherwise the error will be aware
731          *     by application.
732          *  5. new reply from server received, dropped by LNet.
733          *
734          * Note the xid of old & new request is the same. We can't simply
735          * change xid for the resent request because the server replies on
736          * it for reply reconstruction.
737          *
738          * Commonly the original context should be uptodate because we
739          * have an expiry nice time; server will keep its context because
740          * we at least hold a ref of old context which prevent context
741          * from destroying RPC being sent. So server still can accept the
742          * request and finish the RPC. But if that's not the case:
743          *  1. If server side context has been trimmed, a NO_CONTEXT will
744          *     be returned, gss_cli_ctx_verify/unseal will switch to new
745          *     context by force.
746          *  2. Current context never be refreshed, then we are fine: we
747          *     never really send request with old context before.
748          */
749         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
750             unlikely(req->rq_reqmsg) &&
751             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
752                 req_off_ctx_list(req, ctx);
753                 RETURN(0);
754         }
755
756         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
757                 req_off_ctx_list(req, ctx);
758                 /*
759                  * don't switch ctx if import was deactivated
760                  */
761                 if (req->rq_import->imp_deactive) {
762                         spin_lock(&req->rq_lock);
763                         req->rq_err = 1;
764                         spin_unlock(&req->rq_lock);
765                         RETURN(-EINTR);
766                 }
767
768                 rc = sptlrpc_req_replace_dead_ctx(req);
769                 if (rc) {
770                         LASSERT(ctx == req->rq_cli_ctx);
771                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
772                                req, ctx, rc);
773                         spin_lock(&req->rq_lock);
774                         req->rq_err = 1;
775                         spin_unlock(&req->rq_lock);
776                         RETURN(rc);
777                 }
778
779                 ctx = req->rq_cli_ctx;
780                 goto again;
781         }
782
783         /*
784          * Now we're sure this context is during upcall, add myself into
785          * waiting list
786          */
787         spin_lock(&ctx->cc_lock);
788         if (list_empty(&req->rq_ctx_chain))
789                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
790         spin_unlock(&ctx->cc_lock);
791
792         if (timeout == 0)
793                 RETURN(-EWOULDBLOCK);
794
795         /* Clear any flags that may be present from previous sends */
796         LASSERT(req->rq_receiving_reply == 0);
797         spin_lock(&req->rq_lock);
798         req->rq_err = 0;
799         req->rq_timedout = 0;
800         req->rq_resend = 0;
801         req->rq_restart = 0;
802         spin_unlock(&req->rq_lock);
803
804         /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
805          * so wait indefinitely with non-fatal signals blocked
806          */
807         if (l_wait_event_abortable(req->rq_reply_waitq,
808                                    ctx_check_refresh(ctx)) == -ERESTARTSYS) {
809                 rc = -EINTR;
810                 ctx_refresh_interrupt(req);
811         }
812
813         /*
814          * following cases could lead us here:
815          * - successfully refreshed;
816          * - interrupted;
817          * - timedout, and we don't want recover from the failure;
818          * - timedout, and waked up upon recovery finished;
819          * - someone else mark this ctx dead by force;
820          * - someone invalidate the req and call ptlrpc_client_wake_req(),
821          *   e.g. ptlrpc_abort_inflight();
822          */
823         if (!cli_ctx_is_refreshed(ctx)) {
824                 /* timed out or interruptted */
825                 req_off_ctx_list(req, ctx);
826
827                 LASSERT(rc != 0);
828                 RETURN(rc);
829         }
830
831         goto again;
832 }
833
834 /**
835  * Initialize flavor settings for \a req, according to \a opcode.
836  *
837  * \note this could be called in two situations:
838  * - new request from ptlrpc_pre_req(), with proper @opcode
839  * - old request which changed ctx in the middle, with @opcode == 0
840  */
841 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
842 {
843         struct ptlrpc_sec *sec;
844
845         LASSERT(req->rq_import);
846         LASSERT(req->rq_cli_ctx);
847         LASSERT(req->rq_cli_ctx->cc_sec);
848         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
849
850         /* special security flags according to opcode */
851         switch (opcode) {
852         case OST_READ:
853         case MDS_READPAGE:
854         case MGS_CONFIG_READ:
855         case OBD_IDX_READ:
856                 req->rq_bulk_read = 1;
857                 break;
858         case OST_WRITE:
859         case MDS_WRITEPAGE:
860                 req->rq_bulk_write = 1;
861                 break;
862         case SEC_CTX_INIT:
863                 req->rq_ctx_init = 1;
864                 break;
865         case SEC_CTX_FINI:
866                 req->rq_ctx_fini = 1;
867                 break;
868         case 0:
869                 /* init/fini rpc won't be resend, so can't be here */
870                 LASSERT(req->rq_ctx_init == 0);
871                 LASSERT(req->rq_ctx_fini == 0);
872
873                 /* cleanup flags, which should be recalculated */
874                 req->rq_pack_udesc = 0;
875                 req->rq_pack_bulk = 0;
876                 break;
877         }
878
879         sec = req->rq_cli_ctx->cc_sec;
880
881         spin_lock(&sec->ps_lock);
882         req->rq_flvr = sec->ps_flvr;
883         spin_unlock(&sec->ps_lock);
884
885         /*
886          * force SVC_NULL for context initiation rpc, SVC_INTG for context
887          * destruction rpc
888          */
889         if (unlikely(req->rq_ctx_init))
890                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
891         else if (unlikely(req->rq_ctx_fini))
892                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
893
894         /* user descriptor flag, null security can't do it anyway */
895         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
896             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
897                 req->rq_pack_udesc = 1;
898
899         /* bulk security flag */
900         if ((req->rq_bulk_read || req->rq_bulk_write) &&
901             sptlrpc_flavor_has_bulk(&req->rq_flvr))
902                 req->rq_pack_bulk = 1;
903 }
904
905 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
906 {
907         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
908                 return;
909
910         LASSERT(req->rq_clrbuf);
911         if (req->rq_pool || !req->rq_reqbuf)
912                 return;
913
914         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
915         req->rq_reqbuf = NULL;
916         req->rq_reqbuf_len = 0;
917 }
918
919 /**
920  * Given an import \a imp, check whether current user has a valid context
921  * or not. We may create a new context and try to refresh it, and try
922  * repeatedly try in case of non-fatal errors. Return 0 means success.
923  */
924 int sptlrpc_import_check_ctx(struct obd_import *imp)
925 {
926         struct ptlrpc_sec     *sec;
927         struct ptlrpc_cli_ctx *ctx;
928         struct ptlrpc_request *req = NULL;
929         int rc;
930
931         ENTRY;
932
933         might_sleep();
934
935         sec = sptlrpc_import_sec_ref(imp);
936         ctx = get_my_ctx(sec);
937         sptlrpc_sec_put(sec);
938
939         if (!ctx)
940                 RETURN(-ENOMEM);
941
942         if (cli_ctx_is_eternal(ctx) ||
943             ctx->cc_ops->validate(ctx) == 0) {
944                 sptlrpc_cli_ctx_put(ctx, 1);
945                 RETURN(0);
946         }
947
948         if (cli_ctx_is_error(ctx)) {
949                 sptlrpc_cli_ctx_put(ctx, 1);
950                 RETURN(-EACCES);
951         }
952
953         req = ptlrpc_request_cache_alloc(GFP_NOFS);
954         if (!req)
955                 RETURN(-ENOMEM);
956
957         ptlrpc_cli_req_init(req);
958         atomic_set(&req->rq_refcount, 10000);
959
960         req->rq_import = imp;
961         req->rq_flvr = sec->ps_flvr;
962         req->rq_cli_ctx = ctx;
963
964         rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
965         LASSERT(list_empty(&req->rq_ctx_chain));
966         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
967         ptlrpc_request_cache_free(req);
968
969         RETURN(rc);
970 }
971
972 /**
973  * Used by ptlrpc client, to perform the pre-defined security transformation
974  * upon the request message of \a req. After this function called,
975  * req->rq_reqmsg is still accessible as clear text.
976  */
977 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
978 {
979         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
980         int rc = 0;
981
982         ENTRY;
983
984         LASSERT(ctx);
985         LASSERT(ctx->cc_sec);
986         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
987
988         /*
989          * we wrap bulk request here because now we can be sure
990          * the context is uptodate.
991          */
992         if (req->rq_bulk) {
993                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
994                 if (rc)
995                         RETURN(rc);
996         }
997
998         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
999         case SPTLRPC_SVC_NULL:
1000         case SPTLRPC_SVC_AUTH:
1001         case SPTLRPC_SVC_INTG:
1002                 LASSERT(ctx->cc_ops->sign);
1003                 rc = ctx->cc_ops->sign(ctx, req);
1004                 break;
1005         case SPTLRPC_SVC_PRIV:
1006                 LASSERT(ctx->cc_ops->seal);
1007                 rc = ctx->cc_ops->seal(ctx, req);
1008                 break;
1009         default:
1010                 LBUG();
1011         }
1012
1013         if (rc == 0) {
1014                 LASSERT(req->rq_reqdata_len);
1015                 LASSERT(req->rq_reqdata_len % 8 == 0);
1016                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1017         }
1018
1019         RETURN(rc);
1020 }
1021
1022 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1023 {
1024         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1025         int rc;
1026
1027         ENTRY;
1028
1029         LASSERT(ctx);
1030         LASSERT(ctx->cc_sec);
1031         LASSERT(req->rq_repbuf);
1032         LASSERT(req->rq_repdata);
1033         LASSERT(req->rq_repmsg == NULL);
1034
1035         req->rq_rep_swab_mask = 0;
1036
1037         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1038         switch (rc) {
1039         case 1:
1040                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1041         case 0:
1042                 break;
1043         default:
1044                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1045                 RETURN(-EPROTO);
1046         }
1047
1048         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1049                 CERROR("replied data length %d too small\n",
1050                        req->rq_repdata_len);
1051                 RETURN(-EPROTO);
1052         }
1053
1054         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1055             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1056                 CERROR("reply policy %u doesn't match request policy %u\n",
1057                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1058                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1059                 RETURN(-EPROTO);
1060         }
1061
1062         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1063         case SPTLRPC_SVC_NULL:
1064         case SPTLRPC_SVC_AUTH:
1065         case SPTLRPC_SVC_INTG:
1066                 LASSERT(ctx->cc_ops->verify);
1067                 rc = ctx->cc_ops->verify(ctx, req);
1068                 break;
1069         case SPTLRPC_SVC_PRIV:
1070                 LASSERT(ctx->cc_ops->unseal);
1071                 rc = ctx->cc_ops->unseal(ctx, req);
1072                 break;
1073         default:
1074                 LBUG();
1075         }
1076         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1077
1078         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1079             !req->rq_ctx_init)
1080                 req->rq_rep_swab_mask = 0;
1081         RETURN(rc);
1082 }
1083
1084 /**
1085  * Used by ptlrpc client, to perform security transformation upon the reply
1086  * message of \a req. After return successfully, req->rq_repmsg points to
1087  * the reply message in clear text.
1088  *
1089  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1090  * going to change.
1091  */
1092 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1093 {
1094         LASSERT(req->rq_repbuf);
1095         LASSERT(req->rq_repdata == NULL);
1096         LASSERT(req->rq_repmsg == NULL);
1097         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1098
1099         if (req->rq_reply_off == 0 &&
1100             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1101                 CERROR("real reply with offset 0\n");
1102                 return -EPROTO;
1103         }
1104
1105         if (req->rq_reply_off % 8 != 0) {
1106                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1107                 return -EPROTO;
1108         }
1109
1110         req->rq_repdata = (struct lustre_msg *)
1111                                 (req->rq_repbuf + req->rq_reply_off);
1112         req->rq_repdata_len = req->rq_nob_received;
1113
1114         return do_cli_unwrap_reply(req);
1115 }
1116
1117 /**
1118  * Used by ptlrpc client, to perform security transformation upon the early
1119  * reply message of \a req. We expect the rq_reply_off is 0, and
1120  * rq_nob_received is the early reply size.
1121  *
1122  * Because the receive buffer might be still posted, the reply data might be
1123  * changed at any time, no matter we're holding rq_lock or not. For this reason
1124  * we allocate a separate ptlrpc_request and reply buffer for early reply
1125  * processing.
1126  *
1127  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1128  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1129  * \a *req_ret to release it.
1130  * \retval -ev error number, and \a req_ret will not be set.
1131  */
1132 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1133                                    struct ptlrpc_request **req_ret)
1134 {
1135         struct ptlrpc_request *early_req;
1136         char *early_buf;
1137         int early_bufsz, early_size;
1138         int rc;
1139
1140         ENTRY;
1141
1142         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1143         if (early_req == NULL)
1144                 RETURN(-ENOMEM);
1145
1146         ptlrpc_cli_req_init(early_req);
1147
1148         early_size = req->rq_nob_received;
1149         early_bufsz = size_roundup_power2(early_size);
1150         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1151         if (early_buf == NULL)
1152                 GOTO(err_req, rc = -ENOMEM);
1153
1154         /* sanity checkings and copy data out, do it inside spinlock */
1155         spin_lock(&req->rq_lock);
1156
1157         if (req->rq_replied) {
1158                 spin_unlock(&req->rq_lock);
1159                 GOTO(err_buf, rc = -EALREADY);
1160         }
1161
1162         LASSERT(req->rq_repbuf);
1163         LASSERT(req->rq_repdata == NULL);
1164         LASSERT(req->rq_repmsg == NULL);
1165
1166         if (req->rq_reply_off != 0) {
1167                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1168                 spin_unlock(&req->rq_lock);
1169                 GOTO(err_buf, rc = -EPROTO);
1170         }
1171
1172         if (req->rq_nob_received != early_size) {
1173                 /* even another early arrived the size should be the same */
1174                 CERROR("data size has changed from %u to %u\n",
1175                        early_size, req->rq_nob_received);
1176                 spin_unlock(&req->rq_lock);
1177                 GOTO(err_buf, rc = -EINVAL);
1178         }
1179
1180         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1181                 CERROR("early reply length %d too small\n",
1182                        req->rq_nob_received);
1183                 spin_unlock(&req->rq_lock);
1184                 GOTO(err_buf, rc = -EALREADY);
1185         }
1186
1187         memcpy(early_buf, req->rq_repbuf, early_size);
1188         spin_unlock(&req->rq_lock);
1189
1190         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1191         early_req->rq_flvr = req->rq_flvr;
1192         early_req->rq_repbuf = early_buf;
1193         early_req->rq_repbuf_len = early_bufsz;
1194         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1195         early_req->rq_repdata_len = early_size;
1196         early_req->rq_early = 1;
1197         early_req->rq_reqmsg = req->rq_reqmsg;
1198
1199         rc = do_cli_unwrap_reply(early_req);
1200         if (rc) {
1201                 DEBUG_REQ(D_ADAPTTO, early_req,
1202                           "unwrap early reply: rc = %d", rc);
1203                 GOTO(err_ctx, rc);
1204         }
1205
1206         LASSERT(early_req->rq_repmsg);
1207         *req_ret = early_req;
1208         RETURN(0);
1209
1210 err_ctx:
1211         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1212 err_buf:
1213         OBD_FREE_LARGE(early_buf, early_bufsz);
1214 err_req:
1215         ptlrpc_request_cache_free(early_req);
1216         RETURN(rc);
1217 }
1218
1219 /**
1220  * Used by ptlrpc client, to release a processed early reply \a early_req.
1221  *
1222  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1223  */
1224 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1225 {
1226         LASSERT(early_req->rq_repbuf);
1227         LASSERT(early_req->rq_repdata);
1228         LASSERT(early_req->rq_repmsg);
1229
1230         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1231         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1232         ptlrpc_request_cache_free(early_req);
1233 }
1234
1235 /**************************************************
1236  * sec ID                                         *
1237  **************************************************/
1238
1239 /*
1240  * "fixed" sec (e.g. null) use sec_id < 0
1241  */
1242 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1243
1244 int sptlrpc_get_next_secid(void)
1245 {
1246         return atomic_inc_return(&sptlrpc_sec_id);
1247 }
1248 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1249
1250 /*
1251  * client side high-level security APIs
1252  */
1253
1254 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1255                                    int grace, int force)
1256 {
1257         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1258
1259         LASSERT(policy->sp_cops);
1260         LASSERT(policy->sp_cops->flush_ctx_cache);
1261
1262         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1263 }
1264
1265 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1266 {
1267         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1268
1269         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1270         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1271         LASSERT(policy->sp_cops->destroy_sec);
1272
1273         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1274
1275         policy->sp_cops->destroy_sec(sec);
1276         sptlrpc_policy_put(policy);
1277 }
1278
1279 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1280 {
1281         sec_cop_destroy_sec(sec);
1282 }
1283 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1284
1285 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1286 {
1287         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1288
1289         if (sec->ps_policy->sp_cops->kill_sec) {
1290                 sec->ps_policy->sp_cops->kill_sec(sec);
1291
1292                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1293         }
1294 }
1295
1296 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1297 {
1298         if (sec)
1299                 atomic_inc(&sec->ps_refcount);
1300
1301         return sec;
1302 }
1303 EXPORT_SYMBOL(sptlrpc_sec_get);
1304
1305 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1306 {
1307         if (sec) {
1308                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1309
1310                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1311                         sptlrpc_gc_del_sec(sec);
1312                         sec_cop_destroy_sec(sec);
1313                 }
1314         }
1315 }
1316 EXPORT_SYMBOL(sptlrpc_sec_put);
1317
1318 /*
1319  * policy module is responsible for taking refrence of import
1320  */
1321 static
1322 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1323                                        struct ptlrpc_svc_ctx *svc_ctx,
1324                                        struct sptlrpc_flavor *sf,
1325                                        enum lustre_sec_part sp)
1326 {
1327         struct ptlrpc_sec_policy *policy;
1328         struct ptlrpc_sec *sec;
1329         char str[32];
1330
1331         ENTRY;
1332
1333         if (svc_ctx) {
1334                 LASSERT(imp->imp_dlm_fake == 1);
1335
1336                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1337                        imp->imp_obd->obd_type->typ_name,
1338                        imp->imp_obd->obd_name,
1339                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1340
1341                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1342                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1343         } else {
1344                 LASSERT(imp->imp_dlm_fake == 0);
1345
1346                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1347                        imp->imp_obd->obd_type->typ_name,
1348                        imp->imp_obd->obd_name,
1349                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1350
1351                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1352                 if (!policy) {
1353                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1354                         RETURN(NULL);
1355                 }
1356         }
1357
1358         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1359         if (sec) {
1360                 atomic_inc(&sec->ps_refcount);
1361
1362                 sec->ps_part = sp;
1363
1364                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1365                         sptlrpc_gc_add_sec(sec);
1366         } else {
1367                 sptlrpc_policy_put(policy);
1368         }
1369
1370         RETURN(sec);
1371 }
1372
1373 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1374 {
1375         struct ptlrpc_sec *sec;
1376
1377         read_lock(&imp->imp_sec_lock);
1378         sec = sptlrpc_sec_get(imp->imp_sec);
1379         read_unlock(&imp->imp_sec_lock);
1380
1381         return sec;
1382 }
1383 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1384
1385 static void sptlrpc_import_sec_install(struct obd_import *imp,
1386                                        struct ptlrpc_sec *sec)
1387 {
1388         struct ptlrpc_sec *old_sec;
1389
1390         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1391
1392         write_lock(&imp->imp_sec_lock);
1393         old_sec = imp->imp_sec;
1394         imp->imp_sec = sec;
1395         write_unlock(&imp->imp_sec_lock);
1396
1397         if (old_sec) {
1398                 sptlrpc_sec_kill(old_sec);
1399
1400                 /* balance the ref taken by this import */
1401                 sptlrpc_sec_put(old_sec);
1402         }
1403 }
1404
1405 static inline
1406 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1407 {
1408         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1409 }
1410
1411 static inline
1412 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1413 {
1414         *dst = *src;
1415 }
1416
1417 /**
1418  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1419  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1420  *
1421  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1422  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1423  */
1424 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1425                              struct ptlrpc_svc_ctx *svc_ctx,
1426                              struct sptlrpc_flavor *flvr)
1427 {
1428         struct ptlrpc_connection *conn;
1429         struct sptlrpc_flavor sf;
1430         struct ptlrpc_sec *sec, *newsec;
1431         enum lustre_sec_part sp;
1432         char str[24];
1433         int rc = 0;
1434
1435         ENTRY;
1436
1437         might_sleep();
1438
1439         if (imp == NULL)
1440                 RETURN(0);
1441
1442         conn = imp->imp_connection;
1443
1444         if (svc_ctx == NULL) {
1445                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1446                 /*
1447                  * normal import, determine flavor from rule set, except
1448                  * for mgc the flavor is predetermined.
1449                  */
1450                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1451                         sf = cliobd->cl_flvr_mgc;
1452                 else
1453                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1454                                                    cliobd->cl_sp_to,
1455                                                    &cliobd->cl_target_uuid,
1456                                                    conn->c_self, &sf);
1457
1458                 sp = imp->imp_obd->u.cli.cl_sp_me;
1459         } else {
1460                 /* reverse import, determine flavor from incoming reqeust */
1461                 sf = *flvr;
1462
1463                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1464                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1465                                       PTLRPC_SEC_FL_ROOTONLY;
1466
1467                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1468         }
1469
1470         sec = sptlrpc_import_sec_ref(imp);
1471         if (sec) {
1472                 char str2[24];
1473
1474                 if (flavor_equal(&sf, &sec->ps_flvr))
1475                         GOTO(out, rc);
1476
1477                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1478                        imp->imp_obd->obd_name,
1479                        obd_uuid2str(&conn->c_remote_uuid),
1480                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1481                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1482         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1483                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1484                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1485                        imp->imp_obd->obd_name,
1486                        obd_uuid2str(&conn->c_remote_uuid),
1487                        LNET_NIDNET(conn->c_self),
1488                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1489         }
1490
1491         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1492         if (newsec) {
1493                 sptlrpc_import_sec_install(imp, newsec);
1494         } else {
1495                 CERROR("import %s->%s: failed to create new sec\n",
1496                        imp->imp_obd->obd_name,
1497                        obd_uuid2str(&conn->c_remote_uuid));
1498                 rc = -EPERM;
1499         }
1500
1501 out:
1502         sptlrpc_sec_put(sec);
1503         RETURN(rc);
1504 }
1505
1506 void sptlrpc_import_sec_put(struct obd_import *imp)
1507 {
1508         if (imp->imp_sec) {
1509                 sptlrpc_sec_kill(imp->imp_sec);
1510
1511                 sptlrpc_sec_put(imp->imp_sec);
1512                 imp->imp_sec = NULL;
1513         }
1514 }
1515
1516 static void import_flush_ctx_common(struct obd_import *imp,
1517                                     uid_t uid, int grace, int force)
1518 {
1519         struct ptlrpc_sec *sec;
1520
1521         if (imp == NULL)
1522                 return;
1523
1524         sec = sptlrpc_import_sec_ref(imp);
1525         if (sec == NULL)
1526                 return;
1527
1528         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1529         sptlrpc_sec_put(sec);
1530 }
1531
1532 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1533 {
1534         /*
1535          * it's important to use grace mode, see explain in
1536          * sptlrpc_req_refresh_ctx()
1537          */
1538         import_flush_ctx_common(imp, 0, 1, 1);
1539 }
1540
1541 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1542 {
1543         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1544                                 1, 1);
1545 }
1546 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1547
1548 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1549 {
1550         import_flush_ctx_common(imp, -1, 1, 1);
1551 }
1552 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1553
1554 /**
1555  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1556  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1557  */
1558 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1559 {
1560         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1561         struct ptlrpc_sec_policy *policy;
1562         int rc;
1563
1564         LASSERT(ctx);
1565         LASSERT(ctx->cc_sec);
1566         LASSERT(ctx->cc_sec->ps_policy);
1567         LASSERT(req->rq_reqmsg == NULL);
1568         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1569
1570         policy = ctx->cc_sec->ps_policy;
1571         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1572         if (!rc) {
1573                 LASSERT(req->rq_reqmsg);
1574                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1575
1576                 /* zeroing preallocated buffer */
1577                 if (req->rq_pool)
1578                         memset(req->rq_reqmsg, 0, msgsize);
1579         }
1580
1581         return rc;
1582 }
1583
1584 /**
1585  * Used by ptlrpc client to free request buffer of \a req. After this
1586  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1587  */
1588 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1589 {
1590         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1591         struct ptlrpc_sec_policy *policy;
1592
1593         LASSERT(ctx);
1594         LASSERT(ctx->cc_sec);
1595         LASSERT(ctx->cc_sec->ps_policy);
1596         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1597
1598         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1599                 return;
1600
1601         policy = ctx->cc_sec->ps_policy;
1602         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1603         req->rq_reqmsg = NULL;
1604 }
1605
1606 /*
1607  * NOTE caller must guarantee the buffer size is enough for the enlargement
1608  */
1609 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1610                                   int segment, int newsize)
1611 {
1612         void *src, *dst;
1613         int oldsize, oldmsg_size, movesize;
1614
1615         LASSERT(segment < msg->lm_bufcount);
1616         LASSERT(msg->lm_buflens[segment] <= newsize);
1617
1618         if (msg->lm_buflens[segment] == newsize)
1619                 return;
1620
1621         /* nothing to do if we are enlarging the last segment */
1622         if (segment == msg->lm_bufcount - 1) {
1623                 msg->lm_buflens[segment] = newsize;
1624                 return;
1625         }
1626
1627         oldsize = msg->lm_buflens[segment];
1628
1629         src = lustre_msg_buf(msg, segment + 1, 0);
1630         msg->lm_buflens[segment] = newsize;
1631         dst = lustre_msg_buf(msg, segment + 1, 0);
1632         msg->lm_buflens[segment] = oldsize;
1633
1634         /* move from segment + 1 to end segment */
1635         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1636         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1637         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1638         LASSERT(movesize >= 0);
1639
1640         if (movesize)
1641                 memmove(dst, src, movesize);
1642
1643         /* note we don't clear the ares where old data live, not secret */
1644
1645         /* finally set new segment size */
1646         msg->lm_buflens[segment] = newsize;
1647 }
1648 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1649
1650 /**
1651  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1652  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1653  * preserved after the enlargement. this must be called after original request
1654  * buffer being allocated.
1655  *
1656  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1657  * so caller should refresh its local pointers if needed.
1658  */
1659 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1660                                const struct req_msg_field *field,
1661                                int newsize)
1662 {
1663         struct req_capsule *pill = &req->rq_pill;
1664         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1665         struct ptlrpc_sec_cops *cops;
1666         struct lustre_msg *msg = req->rq_reqmsg;
1667         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1668
1669         LASSERT(ctx);
1670         LASSERT(msg);
1671         LASSERT(msg->lm_bufcount > segment);
1672         LASSERT(msg->lm_buflens[segment] <= newsize);
1673
1674         if (msg->lm_buflens[segment] == newsize)
1675                 return 0;
1676
1677         cops = ctx->cc_sec->ps_policy->sp_cops;
1678         LASSERT(cops->enlarge_reqbuf);
1679         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1680 }
1681 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1682
1683 /**
1684  * Used by ptlrpc client to allocate reply buffer of \a req.
1685  *
1686  * \note After this, req->rq_repmsg is still not accessible.
1687  */
1688 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1689 {
1690         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1691         struct ptlrpc_sec_policy *policy;
1692
1693         ENTRY;
1694
1695         LASSERT(ctx);
1696         LASSERT(ctx->cc_sec);
1697         LASSERT(ctx->cc_sec->ps_policy);
1698
1699         if (req->rq_repbuf)
1700                 RETURN(0);
1701
1702         policy = ctx->cc_sec->ps_policy;
1703         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1704 }
1705
1706 /**
1707  * Used by ptlrpc client to free reply buffer of \a req. After this
1708  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1709  */
1710 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1711 {
1712         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1713         struct ptlrpc_sec_policy *policy;
1714
1715         ENTRY;
1716
1717         LASSERT(ctx);
1718         LASSERT(ctx->cc_sec);
1719         LASSERT(ctx->cc_sec->ps_policy);
1720         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1721
1722         if (req->rq_repbuf == NULL)
1723                 return;
1724         LASSERT(req->rq_repbuf_len);
1725
1726         policy = ctx->cc_sec->ps_policy;
1727         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1728         req->rq_repmsg = NULL;
1729         EXIT;
1730 }
1731 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1732
1733 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1734                                 struct ptlrpc_cli_ctx *ctx)
1735 {
1736         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1737
1738         if (!policy->sp_cops->install_rctx)
1739                 return 0;
1740         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1741 }
1742
1743 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1744                                 struct ptlrpc_svc_ctx *ctx)
1745 {
1746         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1747
1748         if (!policy->sp_sops->install_rctx)
1749                 return 0;
1750         return policy->sp_sops->install_rctx(imp, ctx);
1751 }
1752
1753 /* Get SELinux policy info from userspace */
1754 static int sepol_helper(struct obd_import *imp)
1755 {
1756         char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1757         char *argv[] = {
1758                 [0] = "/usr/sbin/l_getsepol",
1759                 [1] = "-o",
1760                 [2] = NULL,         /* obd type */
1761                 [3] = "-n",
1762                 [4] = NULL,         /* obd name */
1763                 [5] = "-t",
1764                 [6] = mtime_str,    /* policy mtime */
1765                 [7] = "-m",
1766                 [8] = mode_str,     /* enforcing mode */
1767                 [9] = NULL
1768         };
1769         char *envp[] = {
1770                 [0] = "HOME=/",
1771                 [1] = "PATH=/sbin:/usr/sbin",
1772                 [2] = NULL
1773         };
1774         signed short ret;
1775         int rc = 0;
1776
1777         if (imp == NULL || imp->imp_obd == NULL ||
1778             imp->imp_obd->obd_type == NULL) {
1779                 rc = -EINVAL;
1780         } else {
1781                 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1782                 argv[4] = imp->imp_obd->obd_name;
1783                 spin_lock(&imp->imp_sec->ps_lock);
1784                 if (imp->imp_sec->ps_sepol_mtime == 0 &&
1785                     imp->imp_sec->ps_sepol[0] == '\0') {
1786                         /* ps_sepol has not been initialized */
1787                         argv[5] = NULL;
1788                         argv[7] = NULL;
1789                 } else {
1790                         snprintf(mtime_str, sizeof(mtime_str), "%lu",
1791                                  imp->imp_sec->ps_sepol_mtime);
1792                         mode_str[0] = imp->imp_sec->ps_sepol[0];
1793                 }
1794                 spin_unlock(&imp->imp_sec->ps_lock);
1795                 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1796                 rc = ret>>8;
1797         }
1798
1799         return rc;
1800 }
1801
1802 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1803 {
1804         ktime_t checknext;
1805
1806         if (send_sepol == 0 || !selinux_is_enabled())
1807                 return 0;
1808
1809         if (send_sepol == -1)
1810                 /* send_sepol == -1 means fetch sepol status every time */
1811                 return 1;
1812
1813         spin_lock(&imp_sec->ps_lock);
1814         checknext = imp_sec->ps_sepol_checknext;
1815         spin_unlock(&imp_sec->ps_lock);
1816
1817         /* next check is too far in time, please update */
1818         if (ktime_after(checknext,
1819                         ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1820                 goto setnext;
1821
1822         if (ktime_before(ktime_get(), checknext))
1823                 /* too early to fetch sepol status */
1824                 return 0;
1825
1826 setnext:
1827         /* define new sepol_checknext time */
1828         spin_lock(&imp_sec->ps_lock);
1829         imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1830                                                 ktime_set(send_sepol, 0));
1831         spin_unlock(&imp_sec->ps_lock);
1832
1833         return 1;
1834 }
1835
1836 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1837 {
1838         struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1839         int rc = 0;
1840
1841         ENTRY;
1842
1843         (req->rq_sepol)[0] = '\0';
1844
1845 #ifndef HAVE_SELINUX
1846         if (unlikely(send_sepol != 0))
1847                 CDEBUG(D_SEC,
1848                        "Client cannot report SELinux status, it was not built against libselinux.\n");
1849         RETURN(0);
1850 #endif
1851
1852         if (send_sepol == 0 || !selinux_is_enabled())
1853                 RETURN(0);
1854
1855         if (imp_sec == NULL)
1856                 RETURN(-EINVAL);
1857
1858         /* Retrieve SELinux status info */
1859         if (sptlrpc_sepol_needs_check(imp_sec))
1860                 rc = sepol_helper(req->rq_import);
1861         if (likely(rc == 0)) {
1862                 spin_lock(&imp_sec->ps_lock);
1863                 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1864                        sizeof(req->rq_sepol));
1865                 spin_unlock(&imp_sec->ps_lock);
1866         }
1867
1868         RETURN(rc);
1869 }
1870 EXPORT_SYMBOL(sptlrpc_get_sepol);
1871
1872 /*
1873  * server side security
1874  */
1875
1876 static int flavor_allowed(struct sptlrpc_flavor *exp,
1877                           struct ptlrpc_request *req)
1878 {
1879         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1880
1881         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1882                 return 1;
1883
1884         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1885             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1886             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1887             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1888                 return 1;
1889
1890         return 0;
1891 }
1892
1893 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1894
1895 /**
1896  * Given an export \a exp, check whether the flavor of incoming \a req
1897  * is allowed by the export \a exp. Main logic is about taking care of
1898  * changing configurations. Return 0 means success.
1899  */
1900 int sptlrpc_target_export_check(struct obd_export *exp,
1901                                 struct ptlrpc_request *req)
1902 {
1903         struct sptlrpc_flavor   flavor;
1904
1905         if (exp == NULL)
1906                 return 0;
1907
1908         /*
1909          * client side export has no imp_reverse, skip
1910          * FIXME maybe we should check flavor this as well???
1911          */
1912         if (exp->exp_imp_reverse == NULL)
1913                 return 0;
1914
1915         /* don't care about ctx fini rpc */
1916         if (req->rq_ctx_fini)
1917                 return 0;
1918
1919         spin_lock(&exp->exp_lock);
1920
1921         /*
1922          * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1923          * the first req with the new flavor, then treat it as current flavor,
1924          * adapt reverse sec according to it.
1925          * note the first rpc with new flavor might not be with root ctx, in
1926          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1927          */
1928         if (unlikely(exp->exp_flvr_changed) &&
1929             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1930                 /*
1931                  * make the new flavor as "current", and old ones as
1932                  * about-to-expire
1933                  */
1934                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1935                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1936                 flavor = exp->exp_flvr_old[1];
1937                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1938                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1939                 exp->exp_flvr_old[0] = exp->exp_flvr;
1940                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1941                                           EXP_FLVR_UPDATE_EXPIRE;
1942                 exp->exp_flvr = flavor;
1943
1944                 /* flavor change finished */
1945                 exp->exp_flvr_changed = 0;
1946                 LASSERT(exp->exp_flvr_adapt == 1);
1947
1948                 /* if it's gss, we only interested in root ctx init */
1949                 if (req->rq_auth_gss &&
1950                     !(req->rq_ctx_init &&
1951                     (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1952                     req->rq_auth_usr_ost))) {
1953                         spin_unlock(&exp->exp_lock);
1954                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1955                                req->rq_auth_gss, req->rq_ctx_init,
1956                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1957                                req->rq_auth_usr_ost);
1958                         return 0;
1959                 }
1960
1961                 exp->exp_flvr_adapt = 0;
1962                 spin_unlock(&exp->exp_lock);
1963
1964                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1965                                                 req->rq_svc_ctx, &flavor);
1966         }
1967
1968         /*
1969          * if it equals to the current flavor, we accept it, but need to
1970          * dealing with reverse sec/ctx
1971          */
1972         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1973                 /*
1974                  * most cases should return here, we only interested in
1975                  * gss root ctx init
1976                  */
1977                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1978                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1979                      !req->rq_auth_usr_ost)) {
1980                         spin_unlock(&exp->exp_lock);
1981                         return 0;
1982                 }
1983
1984                 /*
1985                  * if flavor just changed, we should not proceed, just leave
1986                  * it and current flavor will be discovered and replaced
1987                  * shortly, and let _this_ rpc pass through
1988                  */
1989                 if (exp->exp_flvr_changed) {
1990                         LASSERT(exp->exp_flvr_adapt);
1991                         spin_unlock(&exp->exp_lock);
1992                         return 0;
1993                 }
1994
1995                 if (exp->exp_flvr_adapt) {
1996                         exp->exp_flvr_adapt = 0;
1997                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1998                                exp, exp->exp_flvr.sf_rpc,
1999                                exp->exp_flvr_old[0].sf_rpc,
2000                                exp->exp_flvr_old[1].sf_rpc);
2001                         flavor = exp->exp_flvr;
2002                         spin_unlock(&exp->exp_lock);
2003
2004                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2005                                                         req->rq_svc_ctx,
2006                                                         &flavor);
2007                 } else {
2008                         CDEBUG(D_SEC,
2009                                "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2010                                exp, exp->exp_flvr.sf_rpc,
2011                                exp->exp_flvr_old[0].sf_rpc,
2012                                exp->exp_flvr_old[1].sf_rpc);
2013                         spin_unlock(&exp->exp_lock);
2014
2015                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2016                                                            req->rq_svc_ctx);
2017                 }
2018         }
2019
2020         if (exp->exp_flvr_expire[0]) {
2021                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2022                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2023                                 CDEBUG(D_SEC,
2024                                        "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2025                                        exp, exp->exp_flvr.sf_rpc,
2026                                        exp->exp_flvr_old[0].sf_rpc,
2027                                        exp->exp_flvr_old[1].sf_rpc,
2028                                        (s64)(exp->exp_flvr_expire[0] -
2029                                              ktime_get_real_seconds()));
2030                                 spin_unlock(&exp->exp_lock);
2031                                 return 0;
2032                         }
2033                 } else {
2034                         CDEBUG(D_SEC, "mark middle expired\n");
2035                         exp->exp_flvr_expire[0] = 0;
2036                 }
2037                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2038                        exp->exp_flvr.sf_rpc,
2039                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2040                        req->rq_flvr.sf_rpc);
2041         }
2042
2043         /*
2044          * now it doesn't match the current flavor, the only chance we can
2045          * accept it is match the old flavors which is not expired.
2046          */
2047         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2048                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2049                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2050                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2051                                        exp,
2052                                        exp->exp_flvr.sf_rpc,
2053                                        exp->exp_flvr_old[0].sf_rpc,
2054                                        exp->exp_flvr_old[1].sf_rpc,
2055                                        (s64)(exp->exp_flvr_expire[1] -
2056                                        ktime_get_real_seconds()));
2057                                 spin_unlock(&exp->exp_lock);
2058                                 return 0;
2059                         }
2060                 } else {
2061                         CDEBUG(D_SEC, "mark oldest expired\n");
2062                         exp->exp_flvr_expire[1] = 0;
2063                 }
2064                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2065                        exp, exp->exp_flvr.sf_rpc,
2066                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2067                        req->rq_flvr.sf_rpc);
2068         } else {
2069                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2070                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2071                        exp->exp_flvr_old[1].sf_rpc);
2072         }
2073
2074         spin_unlock(&exp->exp_lock);
2075
2076         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2077               exp, exp->exp_obd->obd_name,
2078               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2079               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2080               req->rq_flvr.sf_rpc,
2081               exp->exp_flvr.sf_rpc,
2082               exp->exp_flvr_old[0].sf_rpc,
2083               exp->exp_flvr_expire[0] ?
2084               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2085               exp->exp_flvr_old[1].sf_rpc,
2086               exp->exp_flvr_expire[1] ?
2087               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2088         return -EACCES;
2089 }
2090 EXPORT_SYMBOL(sptlrpc_target_export_check);
2091
2092 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2093                                       struct sptlrpc_rule_set *rset)
2094 {
2095         struct obd_export *exp;
2096         struct sptlrpc_flavor new_flvr;
2097
2098         LASSERT(obd);
2099
2100         spin_lock(&obd->obd_dev_lock);
2101
2102         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2103                 if (exp->exp_connection == NULL)
2104                         continue;
2105
2106                 /*
2107                  * note if this export had just been updated flavor
2108                  * (exp_flvr_changed == 1), this will override the
2109                  * previous one.
2110                  */
2111                 spin_lock(&exp->exp_lock);
2112                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2113                                              exp->exp_connection->c_peer.nid,
2114                                              &new_flvr);
2115                 if (exp->exp_flvr_changed ||
2116                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2117                         exp->exp_flvr_old[1] = new_flvr;
2118                         exp->exp_flvr_expire[1] = 0;
2119                         exp->exp_flvr_changed = 1;
2120                         exp->exp_flvr_adapt = 1;
2121
2122                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2123                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2124                                exp->exp_flvr.sf_rpc,
2125                                exp->exp_flvr_old[1].sf_rpc);
2126                 }
2127                 spin_unlock(&exp->exp_lock);
2128         }
2129
2130         spin_unlock(&obd->obd_dev_lock);
2131 }
2132 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2133
2134 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2135 {
2136         /* peer's claim is unreliable unless gss is being used */
2137         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2138                 return svc_rc;
2139
2140         switch (req->rq_sp_from) {
2141         case LUSTRE_SP_CLI:
2142                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2143                         /* The below message is checked in sanity-sec test_33 */
2144                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2145                         svc_rc = SECSVC_DROP;
2146                 }
2147                 break;
2148         case LUSTRE_SP_MDT:
2149                 if (!req->rq_auth_usr_mdt) {
2150                         /* The below message is checked in sanity-sec test_33 */
2151                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2152                         svc_rc = SECSVC_DROP;
2153                 }
2154                 break;
2155         case LUSTRE_SP_OST:
2156                 if (!req->rq_auth_usr_ost) {
2157                         /* The below message is checked in sanity-sec test_33 */
2158                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2159                         svc_rc = SECSVC_DROP;
2160                 }
2161                 break;
2162         case LUSTRE_SP_MGS:
2163         case LUSTRE_SP_MGC:
2164                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2165                     !req->rq_auth_usr_ost) {
2166                         /* The below message is checked in sanity-sec test_33 */
2167                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2168                         svc_rc = SECSVC_DROP;
2169                 }
2170                 break;
2171         case LUSTRE_SP_ANY:
2172         default:
2173                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2174                 svc_rc = SECSVC_DROP;
2175         }
2176
2177         return svc_rc;
2178 }
2179
2180 /**
2181  * Used by ptlrpc server, to perform transformation upon request message of
2182  * incoming \a req. This must be the first thing to do with an incoming
2183  * request in ptlrpc layer.
2184  *
2185  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2186  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2187  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2188  * reply message has been prepared.
2189  * \retval SECSVC_DROP failed, this request should be dropped.
2190  */
2191 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2192 {
2193         struct ptlrpc_sec_policy *policy;
2194         struct lustre_msg *msg = req->rq_reqbuf;
2195         int rc;
2196
2197         ENTRY;
2198
2199         LASSERT(msg);
2200         LASSERT(req->rq_reqmsg == NULL);
2201         LASSERT(req->rq_repmsg == NULL);
2202         LASSERT(req->rq_svc_ctx == NULL);
2203
2204         req->rq_req_swab_mask = 0;
2205
2206         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2207         switch (rc) {
2208         case 1:
2209                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2210         case 0:
2211                 break;
2212         default:
2213                 CERROR("error unpacking request from %s x%llu\n",
2214                        libcfs_id2str(req->rq_peer), req->rq_xid);
2215                 RETURN(SECSVC_DROP);
2216         }
2217
2218         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2219         req->rq_sp_from = LUSTRE_SP_ANY;
2220         req->rq_auth_uid = -1; /* set to INVALID_UID */
2221         req->rq_auth_mapped_uid = -1;
2222
2223         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2224         if (!policy) {
2225                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2226                 RETURN(SECSVC_DROP);
2227         }
2228
2229         LASSERT(policy->sp_sops->accept);
2230         rc = policy->sp_sops->accept(req);
2231         sptlrpc_policy_put(policy);
2232         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2233         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2234
2235         /*
2236          * if it's not null flavor (which means embedded packing msg),
2237          * reset the swab mask for the comming inner msg unpacking.
2238          */
2239         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2240                 req->rq_req_swab_mask = 0;
2241
2242         /* sanity check for the request source */
2243         rc = sptlrpc_svc_check_from(req, rc);
2244         RETURN(rc);
2245 }
2246
2247 /**
2248  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2249  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2250  * a buffer of \a msglen size.
2251  */
2252 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2253 {
2254         struct ptlrpc_sec_policy *policy;
2255         struct ptlrpc_reply_state *rs;
2256         int rc;
2257
2258         ENTRY;
2259
2260         LASSERT(req->rq_svc_ctx);
2261         LASSERT(req->rq_svc_ctx->sc_policy);
2262
2263         policy = req->rq_svc_ctx->sc_policy;
2264         LASSERT(policy->sp_sops->alloc_rs);
2265
2266         rc = policy->sp_sops->alloc_rs(req, msglen);
2267         if (unlikely(rc == -ENOMEM)) {
2268                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2269
2270                 if (svcpt->scp_service->srv_max_reply_size <
2271                    msglen + sizeof(struct ptlrpc_reply_state)) {
2272                         /* Just return failure if the size is too big */
2273                         CERROR("size of message is too big (%zd), %d allowed\n",
2274                                 msglen + sizeof(struct ptlrpc_reply_state),
2275                                 svcpt->scp_service->srv_max_reply_size);
2276                         RETURN(-ENOMEM);
2277                 }
2278
2279                 /* failed alloc, try emergency pool */
2280                 rs = lustre_get_emerg_rs(svcpt);
2281                 if (rs == NULL)
2282                         RETURN(-ENOMEM);
2283
2284                 req->rq_reply_state = rs;
2285                 rc = policy->sp_sops->alloc_rs(req, msglen);
2286                 if (rc) {
2287                         lustre_put_emerg_rs(rs);
2288                         req->rq_reply_state = NULL;
2289                 }
2290         }
2291
2292         LASSERT(rc != 0 ||
2293                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2294
2295         RETURN(rc);
2296 }
2297
2298 /**
2299  * Used by ptlrpc server, to perform transformation upon reply message.
2300  *
2301  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2302  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2303  */
2304 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2305 {
2306         struct ptlrpc_sec_policy *policy;
2307         int rc;
2308
2309         ENTRY;
2310
2311         LASSERT(req->rq_svc_ctx);
2312         LASSERT(req->rq_svc_ctx->sc_policy);
2313
2314         policy = req->rq_svc_ctx->sc_policy;
2315         LASSERT(policy->sp_sops->authorize);
2316
2317         rc = policy->sp_sops->authorize(req);
2318         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2319
2320         RETURN(rc);
2321 }
2322
2323 /**
2324  * Used by ptlrpc server, to free reply_state.
2325  */
2326 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2327 {
2328         struct ptlrpc_sec_policy *policy;
2329         unsigned int prealloc;
2330
2331         ENTRY;
2332
2333         LASSERT(rs->rs_svc_ctx);
2334         LASSERT(rs->rs_svc_ctx->sc_policy);
2335
2336         policy = rs->rs_svc_ctx->sc_policy;
2337         LASSERT(policy->sp_sops->free_rs);
2338
2339         prealloc = rs->rs_prealloc;
2340         policy->sp_sops->free_rs(rs);
2341
2342         if (prealloc)
2343                 lustre_put_emerg_rs(rs);
2344         EXIT;
2345 }
2346
2347 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2348 {
2349         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2350
2351         if (ctx != NULL)
2352                 atomic_inc(&ctx->sc_refcount);
2353 }
2354
2355 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2356 {
2357         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2358
2359         if (ctx == NULL)
2360                 return;
2361
2362         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2363         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2364                 if (ctx->sc_policy->sp_sops->free_ctx)
2365                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2366         }
2367         req->rq_svc_ctx = NULL;
2368 }
2369
2370 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2371 {
2372         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2373
2374         if (ctx == NULL)
2375                 return;
2376
2377         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2378         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2379                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2380 }
2381 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2382
2383 /*
2384  * bulk security
2385  */
2386
2387 /**
2388  * Perform transformation upon bulk data pointed by \a desc. This is called
2389  * before transforming the request message.
2390  */
2391 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2392                           struct ptlrpc_bulk_desc *desc)
2393 {
2394         struct ptlrpc_cli_ctx *ctx;
2395
2396         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2397
2398         if (!req->rq_pack_bulk)
2399                 return 0;
2400
2401         ctx = req->rq_cli_ctx;
2402         if (ctx->cc_ops->wrap_bulk)
2403                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2404         return 0;
2405 }
2406 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2407
2408 /**
2409  * This is called after unwrap the reply message.
2410  * return nob of actual plain text size received, or error code.
2411  */
2412 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2413                                  struct ptlrpc_bulk_desc *desc,
2414                                  int nob)
2415 {
2416         struct ptlrpc_cli_ctx *ctx;
2417         int rc;
2418
2419         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2420
2421         if (!req->rq_pack_bulk)
2422                 return desc->bd_nob_transferred;
2423
2424         ctx = req->rq_cli_ctx;
2425         if (ctx->cc_ops->unwrap_bulk) {
2426                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2427                 if (rc < 0)
2428                         return rc;
2429         }
2430         return desc->bd_nob_transferred;
2431 }
2432 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2433
2434 /**
2435  * This is called after unwrap the reply message.
2436  * return 0 for success or error code.
2437  */
2438 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2439                                   struct ptlrpc_bulk_desc *desc)
2440 {
2441         struct ptlrpc_cli_ctx *ctx;
2442         int rc;
2443
2444         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2445
2446         if (!req->rq_pack_bulk)
2447                 return 0;
2448
2449         ctx = req->rq_cli_ctx;
2450         if (ctx->cc_ops->unwrap_bulk) {
2451                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2452                 if (rc < 0)
2453                         return rc;
2454         }
2455
2456         /*
2457          * if everything is going right, nob should equals to nob_transferred.
2458          * in case of privacy mode, nob_transferred needs to be adjusted.
2459          */
2460         if (desc->bd_nob != desc->bd_nob_transferred) {
2461                 CERROR("nob %d doesn't match transferred nob %d\n",
2462                        desc->bd_nob, desc->bd_nob_transferred);
2463                 return -EPROTO;
2464         }
2465
2466         return 0;
2467 }
2468 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2469
2470 #ifdef HAVE_SERVER_SUPPORT
2471 /**
2472  * Performe transformation upon outgoing bulk read.
2473  */
2474 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2475                           struct ptlrpc_bulk_desc *desc)
2476 {
2477         struct ptlrpc_svc_ctx *ctx;
2478
2479         LASSERT(req->rq_bulk_read);
2480
2481         if (!req->rq_pack_bulk)
2482                 return 0;
2483
2484         ctx = req->rq_svc_ctx;
2485         if (ctx->sc_policy->sp_sops->wrap_bulk)
2486                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2487
2488         return 0;
2489 }
2490 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2491
2492 /**
2493  * Performe transformation upon incoming bulk write.
2494  */
2495 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2496                             struct ptlrpc_bulk_desc *desc)
2497 {
2498         struct ptlrpc_svc_ctx *ctx;
2499         int rc;
2500
2501         LASSERT(req->rq_bulk_write);
2502
2503         /*
2504          * if it's in privacy mode, transferred should >= expected; otherwise
2505          * transferred should == expected.
2506          */
2507         if (desc->bd_nob_transferred < desc->bd_nob ||
2508             (desc->bd_nob_transferred > desc->bd_nob &&
2509              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2510              SPTLRPC_BULK_SVC_PRIV)) {
2511                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2512                           desc->bd_nob_transferred, desc->bd_nob);
2513                 return -ETIMEDOUT;
2514         }
2515
2516         if (!req->rq_pack_bulk)
2517                 return 0;
2518
2519         ctx = req->rq_svc_ctx;
2520         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2521                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2522                 if (rc)
2523                         CERROR("error unwrap bulk: %d\n", rc);
2524         }
2525
2526         /* return 0 to allow reply be sent */
2527         return 0;
2528 }
2529 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2530
2531 /**
2532  * Prepare buffers for incoming bulk write.
2533  */
2534 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2535                           struct ptlrpc_bulk_desc *desc)
2536 {
2537         struct ptlrpc_svc_ctx *ctx;
2538
2539         LASSERT(req->rq_bulk_write);
2540
2541         if (!req->rq_pack_bulk)
2542                 return 0;
2543
2544         ctx = req->rq_svc_ctx;
2545         if (ctx->sc_policy->sp_sops->prep_bulk)
2546                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2547
2548         return 0;
2549 }
2550 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2551
2552 #endif /* HAVE_SERVER_SUPPORT */
2553
2554 /*
2555  * user descriptor helpers
2556  */
2557
2558 int sptlrpc_current_user_desc_size(void)
2559 {
2560         int ngroups;
2561
2562         ngroups = current_cred()->group_info->ngroups;
2563
2564         if (ngroups > LUSTRE_MAX_GROUPS)
2565                 ngroups = LUSTRE_MAX_GROUPS;
2566         return sptlrpc_user_desc_size(ngroups);
2567 }
2568 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2569
2570 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2571 {
2572         struct ptlrpc_user_desc *pud;
2573         int ngroups;
2574
2575         pud = lustre_msg_buf(msg, offset, 0);
2576
2577         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2578         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2579         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2580         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2581         pud->pud_cap = cfs_curproc_cap_pack();
2582         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2583
2584         task_lock(current);
2585         ngroups = current_cred()->group_info->ngroups;
2586         if (pud->pud_ngroups > ngroups)
2587                 pud->pud_ngroups = ngroups;
2588 #ifdef HAVE_GROUP_INFO_GID
2589         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2590                pud->pud_ngroups * sizeof(__u32));
2591 #else /* !HAVE_GROUP_INFO_GID */
2592         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2593                pud->pud_ngroups * sizeof(__u32));
2594 #endif /* HAVE_GROUP_INFO_GID */
2595         task_unlock(current);
2596
2597         return 0;
2598 }
2599 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2600
2601 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2602 {
2603         struct ptlrpc_user_desc *pud;
2604         int i;
2605
2606         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2607         if (!pud)
2608                 return -EINVAL;
2609
2610         if (swabbed) {
2611                 __swab32s(&pud->pud_uid);
2612                 __swab32s(&pud->pud_gid);
2613                 __swab32s(&pud->pud_fsuid);
2614                 __swab32s(&pud->pud_fsgid);
2615                 __swab32s(&pud->pud_cap);
2616                 __swab32s(&pud->pud_ngroups);
2617         }
2618
2619         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2620                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2621                 return -EINVAL;
2622         }
2623
2624         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2625             msg->lm_buflens[offset]) {
2626                 CERROR("%u groups are claimed but bufsize only %u\n",
2627                        pud->pud_ngroups, msg->lm_buflens[offset]);
2628                 return -EINVAL;
2629         }
2630
2631         if (swabbed) {
2632                 for (i = 0; i < pud->pud_ngroups; i++)
2633                         __swab32s(&pud->pud_groups[i]);
2634         }
2635
2636         return 0;
2637 }
2638 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2639
2640 /*
2641  * misc helpers
2642  */
2643
2644 const char *sec2target_str(struct ptlrpc_sec *sec)
2645 {
2646         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2647                 return "*";
2648         if (sec_is_reverse(sec))
2649                 return "c";
2650         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2651 }
2652 EXPORT_SYMBOL(sec2target_str);
2653
2654 /*
2655  * return true if the bulk data is protected
2656  */
2657 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2658 {
2659         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2660         case SPTLRPC_BULK_SVC_INTG:
2661         case SPTLRPC_BULK_SVC_PRIV:
2662                 return 1;
2663         default:
2664                 return 0;
2665         }
2666 }
2667 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2668
2669 /*
2670  * crypto API helper/alloc blkciper
2671  */
2672
2673 /*
2674  * initialize/finalize
2675  */
2676
2677 int sptlrpc_init(void)
2678 {
2679         int rc;
2680
2681         rwlock_init(&policy_lock);
2682
2683         rc = sptlrpc_gc_init();
2684         if (rc)
2685                 goto out;
2686
2687         rc = sptlrpc_conf_init();
2688         if (rc)
2689                 goto out_gc;
2690
2691         rc = sptlrpc_enc_pool_init();
2692         if (rc)
2693                 goto out_conf;
2694
2695         rc = sptlrpc_null_init();
2696         if (rc)
2697                 goto out_pool;
2698
2699         rc = sptlrpc_plain_init();
2700         if (rc)
2701                 goto out_null;
2702
2703         rc = sptlrpc_lproc_init();
2704         if (rc)
2705                 goto out_plain;
2706
2707         return 0;
2708
2709 out_plain:
2710         sptlrpc_plain_fini();
2711 out_null:
2712         sptlrpc_null_fini();
2713 out_pool:
2714         sptlrpc_enc_pool_fini();
2715 out_conf:
2716         sptlrpc_conf_fini();
2717 out_gc:
2718         sptlrpc_gc_fini();
2719 out:
2720         return rc;
2721 }
2722
2723 void sptlrpc_fini(void)
2724 {
2725         sptlrpc_lproc_fini();
2726         sptlrpc_plain_fini();
2727         sptlrpc_null_fini();
2728         sptlrpc_enc_pool_fini();
2729         sptlrpc_conf_fini();
2730         sptlrpc_gc_fini();
2731 }