Whamcloud - gitweb
LU-6179 llite: remove LOCKAHEAD_OLD compatibility
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #include <linux/uidgid.h>
41 #include <linux/crypto.h>
42 #include <linux/key.h>
43
44 #include <libcfs/libcfs.h>
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_dlm.h>
51 #include <lustre_sec.h>
52
53 #include "ptlrpc_internal.h"
54
55 static int send_sepol;
56 module_param(send_sepol, int, 0644);
57 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
58
59 /*
60  * policy registers
61  */
62
63 static rwlock_t policy_lock;
64 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
65         NULL,
66 };
67
68 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
69 {
70         __u16 number = policy->sp_policy;
71
72         LASSERT(policy->sp_name);
73         LASSERT(policy->sp_cops);
74         LASSERT(policy->sp_sops);
75
76         if (number >= SPTLRPC_POLICY_MAX)
77                 return -EINVAL;
78
79         write_lock(&policy_lock);
80         if (unlikely(policies[number])) {
81                 write_unlock(&policy_lock);
82                 return -EALREADY;
83         }
84         policies[number] = policy;
85         write_unlock(&policy_lock);
86
87         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
88         return 0;
89 }
90 EXPORT_SYMBOL(sptlrpc_register_policy);
91
92 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
93 {
94         __u16 number = policy->sp_policy;
95
96         LASSERT(number < SPTLRPC_POLICY_MAX);
97
98         write_lock(&policy_lock);
99         if (unlikely(policies[number] == NULL)) {
100                 write_unlock(&policy_lock);
101                 CERROR("%s: already unregistered\n", policy->sp_name);
102                 return -EINVAL;
103         }
104
105         LASSERT(policies[number] == policy);
106         policies[number] = NULL;
107         write_unlock(&policy_lock);
108
109         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
110         return 0;
111 }
112 EXPORT_SYMBOL(sptlrpc_unregister_policy);
113
114 static
115 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
116 {
117         static DEFINE_MUTEX(load_mutex);
118         static atomic_t           loaded = ATOMIC_INIT(0);
119         struct ptlrpc_sec_policy *policy;
120         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
121         __u16                     flag = 0;
122
123         if (number >= SPTLRPC_POLICY_MAX)
124                 return NULL;
125
126         while (1) {
127                 read_lock(&policy_lock);
128                 policy = policies[number];
129                 if (policy && !try_module_get(policy->sp_owner))
130                         policy = NULL;
131                 if (policy == NULL)
132                         flag = atomic_read(&loaded);
133                 read_unlock(&policy_lock);
134
135                 if (policy != NULL || flag != 0 ||
136                     number != SPTLRPC_POLICY_GSS)
137                         break;
138
139                 /* try to load gss module, once */
140                 mutex_lock(&load_mutex);
141                 if (atomic_read(&loaded) == 0) {
142                         if (request_module("ptlrpc_gss") == 0)
143                                 CDEBUG(D_SEC,
144                                        "module ptlrpc_gss loaded on demand\n");
145                         else
146                                 CERROR("Unable to load module ptlrpc_gss\n");
147
148                         atomic_set(&loaded, 1);
149                 }
150                 mutex_unlock(&load_mutex);
151         }
152
153         return policy;
154 }
155
156 __u32 sptlrpc_name2flavor_base(const char *name)
157 {
158         if (!strcmp(name, "null"))
159                 return SPTLRPC_FLVR_NULL;
160         if (!strcmp(name, "plain"))
161                 return SPTLRPC_FLVR_PLAIN;
162         if (!strcmp(name, "gssnull"))
163                 return SPTLRPC_FLVR_GSSNULL;
164         if (!strcmp(name, "krb5n"))
165                 return SPTLRPC_FLVR_KRB5N;
166         if (!strcmp(name, "krb5a"))
167                 return SPTLRPC_FLVR_KRB5A;
168         if (!strcmp(name, "krb5i"))
169                 return SPTLRPC_FLVR_KRB5I;
170         if (!strcmp(name, "krb5p"))
171                 return SPTLRPC_FLVR_KRB5P;
172         if (!strcmp(name, "skn"))
173                 return SPTLRPC_FLVR_SKN;
174         if (!strcmp(name, "ska"))
175                 return SPTLRPC_FLVR_SKA;
176         if (!strcmp(name, "ski"))
177                 return SPTLRPC_FLVR_SKI;
178         if (!strcmp(name, "skpi"))
179                 return SPTLRPC_FLVR_SKPI;
180
181         return SPTLRPC_FLVR_INVALID;
182 }
183 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
184
185 const char *sptlrpc_flavor2name_base(__u32 flvr)
186 {
187         __u32   base = SPTLRPC_FLVR_BASE(flvr);
188
189         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
190                 return "null";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
192                 return "plain";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
194                 return "gssnull";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
196                 return "krb5n";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
198                 return "krb5a";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
200                 return "krb5i";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
202                 return "krb5p";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
204                 return "skn";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
206                 return "ska";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
208                 return "ski";
209         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
210                 return "skpi";
211
212         CERROR("invalid wire flavor 0x%x\n", flvr);
213         return "invalid";
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
216
217 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
218                                char *buf, int bufsize)
219 {
220         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
221                 snprintf(buf, bufsize, "hash:%s",
222                         sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
223         else
224                 snprintf(buf, bufsize, "%s",
225                         sptlrpc_flavor2name_base(sf->sf_rpc));
226
227         buf[bufsize - 1] = '\0';
228         return buf;
229 }
230 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
231
232 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
233 {
234         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
235
236         /*
237          * currently we don't support customized bulk specification for
238          * flavors other than plain
239          */
240         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
241                 char bspec[16];
242
243                 bspec[0] = '-';
244                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
245                 strncat(buf, bspec, bufsize);
246         }
247
248         buf[bufsize - 1] = '\0';
249         return buf;
250 }
251 EXPORT_SYMBOL(sptlrpc_flavor2name);
252
253 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
254 {
255         buf[0] = '\0';
256
257         if (flags & PTLRPC_SEC_FL_REVERSE)
258                 strlcat(buf, "reverse,", bufsize);
259         if (flags & PTLRPC_SEC_FL_ROOTONLY)
260                 strlcat(buf, "rootonly,", bufsize);
261         if (flags & PTLRPC_SEC_FL_UDESC)
262                 strlcat(buf, "udesc,", bufsize);
263         if (flags & PTLRPC_SEC_FL_BULK)
264                 strlcat(buf, "bulk,", bufsize);
265         if (buf[0] == '\0')
266                 strlcat(buf, "-,", bufsize);
267
268         return buf;
269 }
270 EXPORT_SYMBOL(sptlrpc_secflags2str);
271
272 /*
273  * client context APIs
274  */
275
276 static
277 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
278 {
279         struct vfs_cred vcred;
280         int create = 1, remove_dead = 1;
281
282         LASSERT(sec);
283         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
284
285         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
286                                      PTLRPC_SEC_FL_ROOTONLY)) {
287                 vcred.vc_uid = 0;
288                 vcred.vc_gid = 0;
289                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
290                         create = 0;
291                         remove_dead = 0;
292                 }
293         } else {
294                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
295                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
296         }
297
298         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
299                                                    remove_dead);
300 }
301
302 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
303 {
304         atomic_inc(&ctx->cc_refcount);
305         return ctx;
306 }
307 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
308
309 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
310 {
311         struct ptlrpc_sec *sec = ctx->cc_sec;
312
313         LASSERT(sec);
314         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
315
316         if (!atomic_dec_and_test(&ctx->cc_refcount))
317                 return;
318
319         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
320 }
321 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
322
323 /**
324  * Expire the client context immediately.
325  *
326  * \pre Caller must hold at least 1 reference on the \a ctx.
327  */
328 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
329 {
330         LASSERT(ctx->cc_ops->die);
331         ctx->cc_ops->die(ctx, 0);
332 }
333 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
334
335 /**
336  * To wake up the threads who are waiting for this client context. Called
337  * after some status change happened on \a ctx.
338  */
339 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
340 {
341         struct ptlrpc_request *req, *next;
342
343         spin_lock(&ctx->cc_lock);
344         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
345                                      rq_ctx_chain) {
346                 list_del_init(&req->rq_ctx_chain);
347                 ptlrpc_client_wake_req(req);
348         }
349         spin_unlock(&ctx->cc_lock);
350 }
351 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
352
353 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
354 {
355         LASSERT(ctx->cc_ops);
356
357         if (ctx->cc_ops->display == NULL)
358                 return 0;
359
360         return ctx->cc_ops->display(ctx, buf, bufsize);
361 }
362
363 static int import_sec_check_expire(struct obd_import *imp)
364 {
365         int adapt = 0;
366
367         write_lock(&imp->imp_sec_lock);
368         if (imp->imp_sec_expire &&
369             imp->imp_sec_expire < ktime_get_real_seconds()) {
370                 adapt = 1;
371                 imp->imp_sec_expire = 0;
372         }
373         write_unlock(&imp->imp_sec_lock);
374
375         if (!adapt)
376                 return 0;
377
378         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
379         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
380 }
381
382 /**
383  * Get and validate the client side ptlrpc security facilities from
384  * \a imp. There is a race condition on client reconnect when the import is
385  * being destroyed while there are outstanding client bound requests. In
386  * this case do not output any error messages if import secuity is not
387  * found.
388  *
389  * \param[in] imp obd import associated with client
390  * \param[out] sec client side ptlrpc security
391  *
392  * \retval 0 if security retrieved successfully
393  * \retval -ve errno if there was a problem
394  */
395 static int import_sec_validate_get(struct obd_import *imp,
396                                    struct ptlrpc_sec **sec)
397 {
398         int rc;
399
400         if (unlikely(imp->imp_sec_expire)) {
401                 rc = import_sec_check_expire(imp);
402                 if (rc)
403                         return rc;
404         }
405
406         *sec = sptlrpc_import_sec_ref(imp);
407         if (*sec == NULL) {
408                 CERROR("import %p (%s) with no sec\n",
409                         imp, ptlrpc_import_state_name(imp->imp_state));
410                 return -EACCES;
411         }
412
413         if (unlikely((*sec)->ps_dying)) {
414                 CERROR("attempt to use dying sec %p\n", sec);
415                 sptlrpc_sec_put(*sec);
416                 return -EACCES;
417         }
418
419         return 0;
420 }
421
422 /**
423  * Given a \a req, find or allocate an appropriate context for it.
424  * \pre req->rq_cli_ctx == NULL.
425  *
426  * \retval 0 succeed, and req->rq_cli_ctx is set.
427  * \retval -ev error number, and req->rq_cli_ctx == NULL.
428  */
429 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
430 {
431         struct obd_import *imp = req->rq_import;
432         struct ptlrpc_sec *sec;
433         int rc;
434
435         ENTRY;
436
437         LASSERT(!req->rq_cli_ctx);
438         LASSERT(imp);
439
440         rc = import_sec_validate_get(imp, &sec);
441         if (rc)
442                 RETURN(rc);
443
444         req->rq_cli_ctx = get_my_ctx(sec);
445
446         sptlrpc_sec_put(sec);
447
448         if (!req->rq_cli_ctx) {
449                 CERROR("req %p: fail to get context\n", req);
450                 RETURN(-ECONNREFUSED);
451         }
452
453         RETURN(0);
454 }
455
456 /**
457  * Drop the context for \a req.
458  * \pre req->rq_cli_ctx != NULL.
459  * \post req->rq_cli_ctx == NULL.
460  *
461  * If \a sync == 0, this function should return quickly without sleep;
462  * otherwise it might trigger and wait for the whole process of sending
463  * an context-destroying rpc to server.
464  */
465 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
466 {
467         ENTRY;
468
469         LASSERT(req);
470         LASSERT(req->rq_cli_ctx);
471
472         /*
473          * request might be asked to release earlier while still
474          * in the context waiting list.
475          */
476         if (!list_empty(&req->rq_ctx_chain)) {
477                 spin_lock(&req->rq_cli_ctx->cc_lock);
478                 list_del_init(&req->rq_ctx_chain);
479                 spin_unlock(&req->rq_cli_ctx->cc_lock);
480         }
481
482         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
483         req->rq_cli_ctx = NULL;
484         EXIT;
485 }
486
487 static
488 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
489                            struct ptlrpc_cli_ctx *oldctx,
490                            struct ptlrpc_cli_ctx *newctx)
491 {
492         struct sptlrpc_flavor   old_flvr;
493         char *reqmsg = NULL; /* to workaround old gcc */
494         int reqmsg_size;
495         int rc = 0;
496
497         LASSERT(req->rq_reqmsg);
498         LASSERT(req->rq_reqlen);
499         LASSERT(req->rq_replen);
500
501         CDEBUG(D_SEC,
502                "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
503                req, oldctx, oldctx->cc_vcred.vc_uid,
504                sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
505                sec2target_str(newctx->cc_sec), oldctx->cc_sec,
506                oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
507                newctx->cc_sec->ps_policy->sp_name);
508
509         /* save flavor */
510         old_flvr = req->rq_flvr;
511
512         /* save request message */
513         reqmsg_size = req->rq_reqlen;
514         if (reqmsg_size != 0) {
515                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
516                 if (reqmsg == NULL)
517                         return -ENOMEM;
518                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
519         }
520
521         /* release old req/rep buf */
522         req->rq_cli_ctx = oldctx;
523         sptlrpc_cli_free_reqbuf(req);
524         sptlrpc_cli_free_repbuf(req);
525         req->rq_cli_ctx = newctx;
526
527         /* recalculate the flavor */
528         sptlrpc_req_set_flavor(req, 0);
529
530         /*
531          * alloc new request buffer
532          * we don't need to alloc reply buffer here, leave it to the
533          * rest procedure of ptlrpc
534          */
535         if (reqmsg_size != 0) {
536                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
537                 if (!rc) {
538                         LASSERT(req->rq_reqmsg);
539                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
540                 } else {
541                         CWARN("failed to alloc reqbuf: %d\n", rc);
542                         req->rq_flvr = old_flvr;
543                 }
544
545                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
546         }
547         return rc;
548 }
549
550 /**
551  * If current context of \a req is dead somehow, e.g. we just switched flavor
552  * thus marked original contexts dead, we'll find a new context for it. if
553  * no switch is needed, \a req will end up with the same context.
554  *
555  * \note a request must have a context, to keep other parts of code happy.
556  * In any case of failure during the switching, we must restore the old one.
557  */
558 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
559 {
560         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
561         struct ptlrpc_cli_ctx *newctx;
562         int rc;
563
564         ENTRY;
565
566         LASSERT(oldctx);
567
568         sptlrpc_cli_ctx_get(oldctx);
569         sptlrpc_req_put_ctx(req, 0);
570
571         rc = sptlrpc_req_get_ctx(req);
572         if (unlikely(rc)) {
573                 LASSERT(!req->rq_cli_ctx);
574
575                 /* restore old ctx */
576                 req->rq_cli_ctx = oldctx;
577                 RETURN(rc);
578         }
579
580         newctx = req->rq_cli_ctx;
581         LASSERT(newctx);
582
583         if (unlikely(newctx == oldctx &&
584                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
585                 /*
586                  * still get the old dead ctx, usually means system too busy
587                  */
588                 CDEBUG(D_SEC,
589                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
590                        newctx, newctx->cc_flags);
591
592                 schedule_timeout_interruptible(cfs_time_seconds(1));
593         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
594                             == 0)) {
595                 /*
596                  * new ctx not up to date yet
597                  */
598                 CDEBUG(D_SEC,
599                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
600                        newctx, newctx->cc_flags);
601         } else {
602                 /*
603                  * it's possible newctx == oldctx if we're switching
604                  * subflavor with the same sec.
605                  */
606                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
607                 if (rc) {
608                         /* restore old ctx */
609                         sptlrpc_req_put_ctx(req, 0);
610                         req->rq_cli_ctx = oldctx;
611                         RETURN(rc);
612                 }
613
614                 LASSERT(req->rq_cli_ctx == newctx);
615         }
616
617         sptlrpc_cli_ctx_put(oldctx, 1);
618         RETURN(0);
619 }
620 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
621
622 static
623 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
624 {
625         if (cli_ctx_is_refreshed(ctx))
626                 return 1;
627         return 0;
628 }
629
630 static
631 void ctx_refresh_interrupt(struct ptlrpc_request *req)
632 {
633
634         spin_lock(&req->rq_lock);
635         req->rq_intr = 1;
636         spin_unlock(&req->rq_lock);
637 }
638
639 static
640 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
641 {
642         spin_lock(&ctx->cc_lock);
643         if (!list_empty(&req->rq_ctx_chain))
644                 list_del_init(&req->rq_ctx_chain);
645         spin_unlock(&ctx->cc_lock);
646 }
647
648 /**
649  * To refresh the context of \req, if it's not up-to-date.
650  * \param timeout
651  * - == 0: do not wait
652  * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
653  * - > 0: not supported
654  *
655  * The status of the context could be subject to be changed by other threads
656  * at any time. We allow this race, but once we return with 0, the caller will
657  * suppose it's uptodated and keep using it until the owning rpc is done.
658  *
659  * \retval 0 only if the context is uptodated.
660  * \retval -ev error number.
661  */
662 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
663 {
664         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
665         struct ptlrpc_sec *sec;
666         int rc;
667
668         ENTRY;
669
670         LASSERT(ctx);
671
672         if (req->rq_ctx_init || req->rq_ctx_fini)
673                 RETURN(0);
674
675         if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
676                 CERROR("req %p: invalid timeout %lu\n", req, timeout);
677                 RETURN(-EINVAL);
678         }
679
680         /*
681          * during the process a request's context might change type even
682          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
683          * everything
684          */
685 again:
686         rc = import_sec_validate_get(req->rq_import, &sec);
687         if (rc)
688                 RETURN(rc);
689
690         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
691                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
692                        req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
693                 req_off_ctx_list(req, ctx);
694                 sptlrpc_req_replace_dead_ctx(req);
695                 ctx = req->rq_cli_ctx;
696         }
697         sptlrpc_sec_put(sec);
698
699         if (cli_ctx_is_eternal(ctx))
700                 RETURN(0);
701
702         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
703                 LASSERT(ctx->cc_ops->refresh);
704                 ctx->cc_ops->refresh(ctx);
705         }
706         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
707
708         LASSERT(ctx->cc_ops->validate);
709         if (ctx->cc_ops->validate(ctx) == 0) {
710                 req_off_ctx_list(req, ctx);
711                 RETURN(0);
712         }
713
714         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
715                 spin_lock(&req->rq_lock);
716                 req->rq_err = 1;
717                 spin_unlock(&req->rq_lock);
718                 req_off_ctx_list(req, ctx);
719                 RETURN(-EPERM);
720         }
721
722         /*
723          * There's a subtle issue for resending RPCs, suppose following
724          * situation:
725          *  1. the request was sent to server.
726          *  2. recovery was kicked start, after finished the request was
727          *     marked as resent.
728          *  3. resend the request.
729          *  4. old reply from server received, we accept and verify the reply.
730          *     this has to be success, otherwise the error will be aware
731          *     by application.
732          *  5. new reply from server received, dropped by LNet.
733          *
734          * Note the xid of old & new request is the same. We can't simply
735          * change xid for the resent request because the server replies on
736          * it for reply reconstruction.
737          *
738          * Commonly the original context should be uptodate because we
739          * have an expiry nice time; server will keep its context because
740          * we at least hold a ref of old context which prevent context
741          * from destroying RPC being sent. So server still can accept the
742          * request and finish the RPC. But if that's not the case:
743          *  1. If server side context has been trimmed, a NO_CONTEXT will
744          *     be returned, gss_cli_ctx_verify/unseal will switch to new
745          *     context by force.
746          *  2. Current context never be refreshed, then we are fine: we
747          *     never really send request with old context before.
748          */
749         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
750             unlikely(req->rq_reqmsg) &&
751             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
752                 req_off_ctx_list(req, ctx);
753                 RETURN(0);
754         }
755
756         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
757                 req_off_ctx_list(req, ctx);
758                 /*
759                  * don't switch ctx if import was deactivated
760                  */
761                 if (req->rq_import->imp_deactive) {
762                         spin_lock(&req->rq_lock);
763                         req->rq_err = 1;
764                         spin_unlock(&req->rq_lock);
765                         RETURN(-EINTR);
766                 }
767
768                 rc = sptlrpc_req_replace_dead_ctx(req);
769                 if (rc) {
770                         LASSERT(ctx == req->rq_cli_ctx);
771                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
772                                req, ctx, rc);
773                         spin_lock(&req->rq_lock);
774                         req->rq_err = 1;
775                         spin_unlock(&req->rq_lock);
776                         RETURN(rc);
777                 }
778
779                 ctx = req->rq_cli_ctx;
780                 goto again;
781         }
782
783         /*
784          * Now we're sure this context is during upcall, add myself into
785          * waiting list
786          */
787         spin_lock(&ctx->cc_lock);
788         if (list_empty(&req->rq_ctx_chain))
789                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
790         spin_unlock(&ctx->cc_lock);
791
792         if (timeout == 0)
793                 RETURN(-EWOULDBLOCK);
794
795         /* Clear any flags that may be present from previous sends */
796         LASSERT(req->rq_receiving_reply == 0);
797         spin_lock(&req->rq_lock);
798         req->rq_err = 0;
799         req->rq_timedout = 0;
800         req->rq_resend = 0;
801         req->rq_restart = 0;
802         spin_unlock(&req->rq_lock);
803
804         /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
805          * so wait indefinitely with non-fatal signals blocked
806          */
807         if (l_wait_event_abortable(req->rq_reply_waitq,
808                                    ctx_check_refresh(ctx)) == -ERESTARTSYS) {
809                 rc = -EINTR;
810                 ctx_refresh_interrupt(req);
811         }
812
813         /*
814          * following cases could lead us here:
815          * - successfully refreshed;
816          * - interrupted;
817          * - timedout, and we don't want recover from the failure;
818          * - timedout, and waked up upon recovery finished;
819          * - someone else mark this ctx dead by force;
820          * - someone invalidate the req and call ptlrpc_client_wake_req(),
821          *   e.g. ptlrpc_abort_inflight();
822          */
823         if (!cli_ctx_is_refreshed(ctx)) {
824                 /* timed out or interruptted */
825                 req_off_ctx_list(req, ctx);
826
827                 LASSERT(rc != 0);
828                 RETURN(rc);
829         }
830
831         goto again;
832 }
833
834 /**
835  * Initialize flavor settings for \a req, according to \a opcode.
836  *
837  * \note this could be called in two situations:
838  * - new request from ptlrpc_pre_req(), with proper @opcode
839  * - old request which changed ctx in the middle, with @opcode == 0
840  */
841 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
842 {
843         struct ptlrpc_sec *sec;
844
845         LASSERT(req->rq_import);
846         LASSERT(req->rq_cli_ctx);
847         LASSERT(req->rq_cli_ctx->cc_sec);
848         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
849
850         /* special security flags according to opcode */
851         switch (opcode) {
852         case OST_READ:
853         case MDS_READPAGE:
854         case MGS_CONFIG_READ:
855         case OBD_IDX_READ:
856                 req->rq_bulk_read = 1;
857                 break;
858         case OST_WRITE:
859         case MDS_WRITEPAGE:
860                 req->rq_bulk_write = 1;
861                 break;
862         case SEC_CTX_INIT:
863                 req->rq_ctx_init = 1;
864                 break;
865         case SEC_CTX_FINI:
866                 req->rq_ctx_fini = 1;
867                 break;
868         case 0:
869                 /* init/fini rpc won't be resend, so can't be here */
870                 LASSERT(req->rq_ctx_init == 0);
871                 LASSERT(req->rq_ctx_fini == 0);
872
873                 /* cleanup flags, which should be recalculated */
874                 req->rq_pack_udesc = 0;
875                 req->rq_pack_bulk = 0;
876                 break;
877         }
878
879         sec = req->rq_cli_ctx->cc_sec;
880
881         spin_lock(&sec->ps_lock);
882         req->rq_flvr = sec->ps_flvr;
883         spin_unlock(&sec->ps_lock);
884
885         /*
886          * force SVC_NULL for context initiation rpc, SVC_INTG for context
887          * destruction rpc
888          */
889         if (unlikely(req->rq_ctx_init))
890                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
891         else if (unlikely(req->rq_ctx_fini))
892                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
893
894         /* user descriptor flag, null security can't do it anyway */
895         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
896             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
897                 req->rq_pack_udesc = 1;
898
899         /* bulk security flag */
900         if ((req->rq_bulk_read || req->rq_bulk_write) &&
901             sptlrpc_flavor_has_bulk(&req->rq_flvr))
902                 req->rq_pack_bulk = 1;
903 }
904
905 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
906 {
907         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
908                 return;
909
910         LASSERT(req->rq_clrbuf);
911         if (req->rq_pool || !req->rq_reqbuf)
912                 return;
913
914         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
915         req->rq_reqbuf = NULL;
916         req->rq_reqbuf_len = 0;
917 }
918
919 /**
920  * Given an import \a imp, check whether current user has a valid context
921  * or not. We may create a new context and try to refresh it, and try
922  * repeatedly try in case of non-fatal errors. Return 0 means success.
923  */
924 int sptlrpc_import_check_ctx(struct obd_import *imp)
925 {
926         struct ptlrpc_sec     *sec;
927         struct ptlrpc_cli_ctx *ctx;
928         struct ptlrpc_request *req = NULL;
929         int rc;
930
931         ENTRY;
932
933         might_sleep();
934
935         sec = sptlrpc_import_sec_ref(imp);
936         ctx = get_my_ctx(sec);
937         sptlrpc_sec_put(sec);
938
939         if (!ctx)
940                 RETURN(-ENOMEM);
941
942         if (cli_ctx_is_eternal(ctx) ||
943             ctx->cc_ops->validate(ctx) == 0) {
944                 sptlrpc_cli_ctx_put(ctx, 1);
945                 RETURN(0);
946         }
947
948         if (cli_ctx_is_error(ctx)) {
949                 sptlrpc_cli_ctx_put(ctx, 1);
950                 RETURN(-EACCES);
951         }
952
953         req = ptlrpc_request_cache_alloc(GFP_NOFS);
954         if (!req)
955                 RETURN(-ENOMEM);
956
957         ptlrpc_cli_req_init(req);
958         atomic_set(&req->rq_refcount, 10000);
959
960         req->rq_import = imp;
961         req->rq_flvr = sec->ps_flvr;
962         req->rq_cli_ctx = ctx;
963
964         rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
965         LASSERT(list_empty(&req->rq_ctx_chain));
966         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
967         ptlrpc_request_cache_free(req);
968
969         RETURN(rc);
970 }
971
972 /**
973  * Used by ptlrpc client, to perform the pre-defined security transformation
974  * upon the request message of \a req. After this function called,
975  * req->rq_reqmsg is still accessible as clear text.
976  */
977 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
978 {
979         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
980         int rc = 0;
981
982         ENTRY;
983
984         LASSERT(ctx);
985         LASSERT(ctx->cc_sec);
986         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
987
988         /*
989          * we wrap bulk request here because now we can be sure
990          * the context is uptodate.
991          */
992         if (req->rq_bulk) {
993                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
994                 if (rc)
995                         RETURN(rc);
996         }
997
998         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
999         case SPTLRPC_SVC_NULL:
1000         case SPTLRPC_SVC_AUTH:
1001         case SPTLRPC_SVC_INTG:
1002                 LASSERT(ctx->cc_ops->sign);
1003                 rc = ctx->cc_ops->sign(ctx, req);
1004                 break;
1005         case SPTLRPC_SVC_PRIV:
1006                 LASSERT(ctx->cc_ops->seal);
1007                 rc = ctx->cc_ops->seal(ctx, req);
1008                 break;
1009         default:
1010                 LBUG();
1011         }
1012
1013         if (rc == 0) {
1014                 LASSERT(req->rq_reqdata_len);
1015                 LASSERT(req->rq_reqdata_len % 8 == 0);
1016                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1017         }
1018
1019         RETURN(rc);
1020 }
1021
1022 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1023 {
1024         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1025         int rc;
1026
1027         ENTRY;
1028
1029         LASSERT(ctx);
1030         LASSERT(ctx->cc_sec);
1031         LASSERT(req->rq_repbuf);
1032         LASSERT(req->rq_repdata);
1033         LASSERT(req->rq_repmsg == NULL);
1034
1035         req->rq_rep_swab_mask = 0;
1036
1037         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1038         switch (rc) {
1039         case 1:
1040                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1041         case 0:
1042                 break;
1043         default:
1044                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1045                 RETURN(-EPROTO);
1046         }
1047
1048         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1049                 CERROR("replied data length %d too small\n",
1050                        req->rq_repdata_len);
1051                 RETURN(-EPROTO);
1052         }
1053
1054         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1055             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1056                 CERROR("reply policy %u doesn't match request policy %u\n",
1057                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1058                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1059                 RETURN(-EPROTO);
1060         }
1061
1062         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1063         case SPTLRPC_SVC_NULL:
1064         case SPTLRPC_SVC_AUTH:
1065         case SPTLRPC_SVC_INTG:
1066                 LASSERT(ctx->cc_ops->verify);
1067                 rc = ctx->cc_ops->verify(ctx, req);
1068                 break;
1069         case SPTLRPC_SVC_PRIV:
1070                 LASSERT(ctx->cc_ops->unseal);
1071                 rc = ctx->cc_ops->unseal(ctx, req);
1072                 break;
1073         default:
1074                 LBUG();
1075         }
1076         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1077
1078         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1079             !req->rq_ctx_init)
1080                 req->rq_rep_swab_mask = 0;
1081         RETURN(rc);
1082 }
1083
1084 /**
1085  * Used by ptlrpc client, to perform security transformation upon the reply
1086  * message of \a req. After return successfully, req->rq_repmsg points to
1087  * the reply message in clear text.
1088  *
1089  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1090  * going to change.
1091  */
1092 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1093 {
1094         LASSERT(req->rq_repbuf);
1095         LASSERT(req->rq_repdata == NULL);
1096         LASSERT(req->rq_repmsg == NULL);
1097         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1098
1099         if (req->rq_reply_off == 0 &&
1100             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1101                 CERROR("real reply with offset 0\n");
1102                 return -EPROTO;
1103         }
1104
1105         if (req->rq_reply_off % 8 != 0) {
1106                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1107                 return -EPROTO;
1108         }
1109
1110         req->rq_repdata = (struct lustre_msg *)
1111                                 (req->rq_repbuf + req->rq_reply_off);
1112         req->rq_repdata_len = req->rq_nob_received;
1113
1114         return do_cli_unwrap_reply(req);
1115 }
1116
1117 /**
1118  * Used by ptlrpc client, to perform security transformation upon the early
1119  * reply message of \a req. We expect the rq_reply_off is 0, and
1120  * rq_nob_received is the early reply size.
1121  *
1122  * Because the receive buffer might be still posted, the reply data might be
1123  * changed at any time, no matter we're holding rq_lock or not. For this reason
1124  * we allocate a separate ptlrpc_request and reply buffer for early reply
1125  * processing.
1126  *
1127  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1128  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1129  * \a *req_ret to release it.
1130  * \retval -ev error number, and \a req_ret will not be set.
1131  */
1132 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1133                                    struct ptlrpc_request **req_ret)
1134 {
1135         struct ptlrpc_request *early_req;
1136         char *early_buf;
1137         int early_bufsz, early_size;
1138         int rc;
1139
1140         ENTRY;
1141
1142         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1143         if (early_req == NULL)
1144                 RETURN(-ENOMEM);
1145
1146         ptlrpc_cli_req_init(early_req);
1147
1148         early_size = req->rq_nob_received;
1149         early_bufsz = size_roundup_power2(early_size);
1150         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1151         if (early_buf == NULL)
1152                 GOTO(err_req, rc = -ENOMEM);
1153
1154         /* sanity checkings and copy data out, do it inside spinlock */
1155         spin_lock(&req->rq_lock);
1156
1157         if (req->rq_replied) {
1158                 spin_unlock(&req->rq_lock);
1159                 GOTO(err_buf, rc = -EALREADY);
1160         }
1161
1162         LASSERT(req->rq_repbuf);
1163         LASSERT(req->rq_repdata == NULL);
1164         LASSERT(req->rq_repmsg == NULL);
1165
1166         if (req->rq_reply_off != 0) {
1167                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1168                 spin_unlock(&req->rq_lock);
1169                 GOTO(err_buf, rc = -EPROTO);
1170         }
1171
1172         if (req->rq_nob_received != early_size) {
1173                 /* even another early arrived the size should be the same */
1174                 CERROR("data size has changed from %u to %u\n",
1175                        early_size, req->rq_nob_received);
1176                 spin_unlock(&req->rq_lock);
1177                 GOTO(err_buf, rc = -EINVAL);
1178         }
1179
1180         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1181                 CERROR("early reply length %d too small\n",
1182                        req->rq_nob_received);
1183                 spin_unlock(&req->rq_lock);
1184                 GOTO(err_buf, rc = -EALREADY);
1185         }
1186
1187         memcpy(early_buf, req->rq_repbuf, early_size);
1188         spin_unlock(&req->rq_lock);
1189
1190         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1191         early_req->rq_flvr = req->rq_flvr;
1192         early_req->rq_repbuf = early_buf;
1193         early_req->rq_repbuf_len = early_bufsz;
1194         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1195         early_req->rq_repdata_len = early_size;
1196         early_req->rq_early = 1;
1197         early_req->rq_reqmsg = req->rq_reqmsg;
1198
1199         rc = do_cli_unwrap_reply(early_req);
1200         if (rc) {
1201                 DEBUG_REQ(D_ADAPTTO, early_req,
1202                           "unwrap early reply: rc = %d", rc);
1203                 GOTO(err_ctx, rc);
1204         }
1205
1206         LASSERT(early_req->rq_repmsg);
1207         *req_ret = early_req;
1208         RETURN(0);
1209
1210 err_ctx:
1211         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1212 err_buf:
1213         OBD_FREE_LARGE(early_buf, early_bufsz);
1214 err_req:
1215         ptlrpc_request_cache_free(early_req);
1216         RETURN(rc);
1217 }
1218
1219 /**
1220  * Used by ptlrpc client, to release a processed early reply \a early_req.
1221  *
1222  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1223  */
1224 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1225 {
1226         LASSERT(early_req->rq_repbuf);
1227         LASSERT(early_req->rq_repdata);
1228         LASSERT(early_req->rq_repmsg);
1229
1230         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1231         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1232         ptlrpc_request_cache_free(early_req);
1233 }
1234
1235 /**************************************************
1236  * sec ID                                         *
1237  **************************************************/
1238
1239 /*
1240  * "fixed" sec (e.g. null) use sec_id < 0
1241  */
1242 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1243
1244 int sptlrpc_get_next_secid(void)
1245 {
1246         return atomic_inc_return(&sptlrpc_sec_id);
1247 }
1248 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1249
1250 /*
1251  * client side high-level security APIs
1252  */
1253
1254 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1255                                    int grace, int force)
1256 {
1257         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1258
1259         LASSERT(policy->sp_cops);
1260         LASSERT(policy->sp_cops->flush_ctx_cache);
1261
1262         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1263 }
1264
1265 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1266 {
1267         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1268
1269         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1270         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1271         LASSERT(policy->sp_cops->destroy_sec);
1272
1273         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1274
1275         policy->sp_cops->destroy_sec(sec);
1276         sptlrpc_policy_put(policy);
1277 }
1278
1279 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1280 {
1281         sec_cop_destroy_sec(sec);
1282 }
1283 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1284
1285 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1286 {
1287         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1288
1289         if (sec->ps_policy->sp_cops->kill_sec) {
1290                 sec->ps_policy->sp_cops->kill_sec(sec);
1291
1292                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1293         }
1294 }
1295
1296 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1297 {
1298         if (sec)
1299                 atomic_inc(&sec->ps_refcount);
1300
1301         return sec;
1302 }
1303 EXPORT_SYMBOL(sptlrpc_sec_get);
1304
1305 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1306 {
1307         if (sec) {
1308                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1309
1310                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1311                         sptlrpc_gc_del_sec(sec);
1312                         sec_cop_destroy_sec(sec);
1313                 }
1314         }
1315 }
1316 EXPORT_SYMBOL(sptlrpc_sec_put);
1317
1318 /*
1319  * policy module is responsible for taking refrence of import
1320  */
1321 static
1322 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1323                                        struct ptlrpc_svc_ctx *svc_ctx,
1324                                        struct sptlrpc_flavor *sf,
1325                                        enum lustre_sec_part sp)
1326 {
1327         struct ptlrpc_sec_policy *policy;
1328         struct ptlrpc_sec *sec;
1329         char str[32];
1330
1331         ENTRY;
1332
1333         if (svc_ctx) {
1334                 LASSERT(imp->imp_dlm_fake == 1);
1335
1336                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1337                        imp->imp_obd->obd_type->typ_name,
1338                        imp->imp_obd->obd_name,
1339                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1340
1341                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1342                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1343         } else {
1344                 LASSERT(imp->imp_dlm_fake == 0);
1345
1346                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1347                        imp->imp_obd->obd_type->typ_name,
1348                        imp->imp_obd->obd_name,
1349                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1350
1351                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1352                 if (!policy) {
1353                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1354                         RETURN(NULL);
1355                 }
1356         }
1357
1358         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1359         if (sec) {
1360                 atomic_inc(&sec->ps_refcount);
1361
1362                 sec->ps_part = sp;
1363
1364                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1365                         sptlrpc_gc_add_sec(sec);
1366         } else {
1367                 sptlrpc_policy_put(policy);
1368         }
1369
1370         RETURN(sec);
1371 }
1372
1373 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1374 {
1375         struct ptlrpc_sec *sec;
1376
1377         read_lock(&imp->imp_sec_lock);
1378         sec = sptlrpc_sec_get(imp->imp_sec);
1379         read_unlock(&imp->imp_sec_lock);
1380
1381         return sec;
1382 }
1383 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1384
1385 static void sptlrpc_import_sec_install(struct obd_import *imp,
1386                                        struct ptlrpc_sec *sec)
1387 {
1388         struct ptlrpc_sec *old_sec;
1389
1390         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1391
1392         write_lock(&imp->imp_sec_lock);
1393         old_sec = imp->imp_sec;
1394         imp->imp_sec = sec;
1395         write_unlock(&imp->imp_sec_lock);
1396
1397         if (old_sec) {
1398                 sptlrpc_sec_kill(old_sec);
1399
1400                 /* balance the ref taken by this import */
1401                 sptlrpc_sec_put(old_sec);
1402         }
1403 }
1404
1405 static inline
1406 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1407 {
1408         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1409 }
1410
1411 static inline
1412 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1413 {
1414         *dst = *src;
1415 }
1416
1417 /**
1418  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1419  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1420  *
1421  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1422  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1423  */
1424 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1425                              struct ptlrpc_svc_ctx *svc_ctx,
1426                              struct sptlrpc_flavor *flvr)
1427 {
1428         struct ptlrpc_connection *conn;
1429         struct sptlrpc_flavor sf;
1430         struct ptlrpc_sec *sec, *newsec;
1431         enum lustre_sec_part sp;
1432         char str[24];
1433         int rc = 0;
1434
1435         ENTRY;
1436
1437         might_sleep();
1438
1439         if (imp == NULL)
1440                 RETURN(0);
1441
1442         conn = imp->imp_connection;
1443
1444         if (svc_ctx == NULL) {
1445                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1446                 /*
1447                  * normal import, determine flavor from rule set, except
1448                  * for mgc the flavor is predetermined.
1449                  */
1450                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1451                         sf = cliobd->cl_flvr_mgc;
1452                 else
1453                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1454                                                    cliobd->cl_sp_to,
1455                                                    &cliobd->cl_target_uuid,
1456                                                    conn->c_self, &sf);
1457
1458                 sp = imp->imp_obd->u.cli.cl_sp_me;
1459         } else {
1460                 /* reverse import, determine flavor from incoming reqeust */
1461                 sf = *flvr;
1462
1463                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1464                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1465                                       PTLRPC_SEC_FL_ROOTONLY;
1466
1467                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1468         }
1469
1470         sec = sptlrpc_import_sec_ref(imp);
1471         if (sec) {
1472                 char str2[24];
1473
1474                 if (flavor_equal(&sf, &sec->ps_flvr))
1475                         GOTO(out, rc);
1476
1477                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1478                        imp->imp_obd->obd_name,
1479                        obd_uuid2str(&conn->c_remote_uuid),
1480                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1481                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1482         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1483                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1484                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1485                        imp->imp_obd->obd_name,
1486                        obd_uuid2str(&conn->c_remote_uuid),
1487                        LNET_NIDNET(conn->c_self),
1488                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1489         }
1490
1491         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1492         if (newsec) {
1493                 sptlrpc_import_sec_install(imp, newsec);
1494         } else {
1495                 CERROR("import %s->%s: failed to create new sec\n",
1496                        imp->imp_obd->obd_name,
1497                        obd_uuid2str(&conn->c_remote_uuid));
1498                 rc = -EPERM;
1499         }
1500
1501 out:
1502         sptlrpc_sec_put(sec);
1503         RETURN(rc);
1504 }
1505
1506 void sptlrpc_import_sec_put(struct obd_import *imp)
1507 {
1508         if (imp->imp_sec) {
1509                 sptlrpc_sec_kill(imp->imp_sec);
1510
1511                 sptlrpc_sec_put(imp->imp_sec);
1512                 imp->imp_sec = NULL;
1513         }
1514 }
1515
1516 static void import_flush_ctx_common(struct obd_import *imp,
1517                                     uid_t uid, int grace, int force)
1518 {
1519         struct ptlrpc_sec *sec;
1520
1521         if (imp == NULL)
1522                 return;
1523
1524         sec = sptlrpc_import_sec_ref(imp);
1525         if (sec == NULL)
1526                 return;
1527
1528         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1529         sptlrpc_sec_put(sec);
1530 }
1531
1532 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1533 {
1534         /*
1535          * it's important to use grace mode, see explain in
1536          * sptlrpc_req_refresh_ctx()
1537          */
1538         import_flush_ctx_common(imp, 0, 1, 1);
1539 }
1540
1541 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1542 {
1543         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1544                                 1, 1);
1545 }
1546 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1547
1548 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1549 {
1550         import_flush_ctx_common(imp, -1, 1, 1);
1551 }
1552 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1553
1554 /**
1555  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1556  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1557  */
1558 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1559 {
1560         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1561         struct ptlrpc_sec_policy *policy;
1562         int rc;
1563
1564         LASSERT(ctx);
1565         LASSERT(ctx->cc_sec);
1566         LASSERT(ctx->cc_sec->ps_policy);
1567         LASSERT(req->rq_reqmsg == NULL);
1568         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1569
1570         policy = ctx->cc_sec->ps_policy;
1571         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1572         if (!rc) {
1573                 LASSERT(req->rq_reqmsg);
1574                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1575
1576                 /* zeroing preallocated buffer */
1577                 if (req->rq_pool)
1578                         memset(req->rq_reqmsg, 0, msgsize);
1579         }
1580
1581         return rc;
1582 }
1583
1584 /**
1585  * Used by ptlrpc client to free request buffer of \a req. After this
1586  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1587  */
1588 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1589 {
1590         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1591         struct ptlrpc_sec_policy *policy;
1592
1593         LASSERT(ctx);
1594         LASSERT(ctx->cc_sec);
1595         LASSERT(ctx->cc_sec->ps_policy);
1596         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1597
1598         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1599                 return;
1600
1601         policy = ctx->cc_sec->ps_policy;
1602         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1603         req->rq_reqmsg = NULL;
1604 }
1605
1606 /*
1607  * NOTE caller must guarantee the buffer size is enough for the enlargement
1608  */
1609 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1610                                   int segment, int newsize)
1611 {
1612         void *src, *dst;
1613         int oldsize, oldmsg_size, movesize;
1614
1615         LASSERT(segment < msg->lm_bufcount);
1616         LASSERT(msg->lm_buflens[segment] <= newsize);
1617
1618         if (msg->lm_buflens[segment] == newsize)
1619                 return;
1620
1621         /* nothing to do if we are enlarging the last segment */
1622         if (segment == msg->lm_bufcount - 1) {
1623                 msg->lm_buflens[segment] = newsize;
1624                 return;
1625         }
1626
1627         oldsize = msg->lm_buflens[segment];
1628
1629         src = lustre_msg_buf(msg, segment + 1, 0);
1630         msg->lm_buflens[segment] = newsize;
1631         dst = lustre_msg_buf(msg, segment + 1, 0);
1632         msg->lm_buflens[segment] = oldsize;
1633
1634         /* move from segment + 1 to end segment */
1635         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1636         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1637         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1638         LASSERT(movesize >= 0);
1639
1640         if (movesize)
1641                 memmove(dst, src, movesize);
1642
1643         /* note we don't clear the ares where old data live, not secret */
1644
1645         /* finally set new segment size */
1646         msg->lm_buflens[segment] = newsize;
1647 }
1648 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1649
1650 /**
1651  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1652  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1653  * preserved after the enlargement. this must be called after original request
1654  * buffer being allocated.
1655  *
1656  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1657  * so caller should refresh its local pointers if needed.
1658  */
1659 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1660                                const struct req_msg_field *field,
1661                                int newsize)
1662 {
1663         struct req_capsule *pill = &req->rq_pill;
1664         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1665         struct ptlrpc_sec_cops *cops;
1666         struct lustre_msg *msg = req->rq_reqmsg;
1667         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1668
1669         LASSERT(ctx);
1670         LASSERT(msg);
1671         LASSERT(msg->lm_bufcount > segment);
1672         LASSERT(msg->lm_buflens[segment] <= newsize);
1673
1674         if (msg->lm_buflens[segment] == newsize)
1675                 return 0;
1676
1677         cops = ctx->cc_sec->ps_policy->sp_cops;
1678         LASSERT(cops->enlarge_reqbuf);
1679         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1680 }
1681 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1682
1683 /**
1684  * Used by ptlrpc client to allocate reply buffer of \a req.
1685  *
1686  * \note After this, req->rq_repmsg is still not accessible.
1687  */
1688 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1689 {
1690         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1691         struct ptlrpc_sec_policy *policy;
1692
1693         ENTRY;
1694
1695         LASSERT(ctx);
1696         LASSERT(ctx->cc_sec);
1697         LASSERT(ctx->cc_sec->ps_policy);
1698
1699         if (req->rq_repbuf)
1700                 RETURN(0);
1701
1702         policy = ctx->cc_sec->ps_policy;
1703         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1704 }
1705
1706 /**
1707  * Used by ptlrpc client to free reply buffer of \a req. After this
1708  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1709  */
1710 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1711 {
1712         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1713         struct ptlrpc_sec_policy *policy;
1714
1715         ENTRY;
1716
1717         LASSERT(ctx);
1718         LASSERT(ctx->cc_sec);
1719         LASSERT(ctx->cc_sec->ps_policy);
1720         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1721
1722         if (req->rq_repbuf == NULL)
1723                 return;
1724         LASSERT(req->rq_repbuf_len);
1725
1726         policy = ctx->cc_sec->ps_policy;
1727         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1728         req->rq_repmsg = NULL;
1729         EXIT;
1730 }
1731 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1732
1733 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1734                                 struct ptlrpc_cli_ctx *ctx)
1735 {
1736         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1737
1738         if (!policy->sp_cops->install_rctx)
1739                 return 0;
1740         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1741 }
1742
1743 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1744                                 struct ptlrpc_svc_ctx *ctx)
1745 {
1746         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1747
1748         if (!policy->sp_sops->install_rctx)
1749                 return 0;
1750         return policy->sp_sops->install_rctx(imp, ctx);
1751 }
1752
1753 /* Get SELinux policy info from userspace */
1754 static int sepol_helper(struct obd_import *imp)
1755 {
1756         char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1757         char *argv[] = {
1758                 [0] = "/usr/sbin/l_getsepol",
1759                 [1] = "-o",
1760                 [2] = NULL,         /* obd type */
1761                 [3] = "-n",
1762                 [4] = NULL,         /* obd name */
1763                 [5] = "-t",
1764                 [6] = mtime_str,    /* policy mtime */
1765                 [7] = "-m",
1766                 [8] = mode_str,     /* enforcing mode */
1767                 [9] = NULL
1768         };
1769         char *envp[] = {
1770                 [0] = "HOME=/",
1771                 [1] = "PATH=/sbin:/usr/sbin",
1772                 [2] = NULL
1773         };
1774         signed short ret;
1775         int rc = 0;
1776
1777         if (imp == NULL || imp->imp_obd == NULL ||
1778             imp->imp_obd->obd_type == NULL) {
1779                 rc = -EINVAL;
1780         } else {
1781                 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1782                 argv[4] = imp->imp_obd->obd_name;
1783                 spin_lock(&imp->imp_sec->ps_lock);
1784                 if (ktime_to_ns(imp->imp_sec->ps_sepol_mtime) == 0 &&
1785                     imp->imp_sec->ps_sepol[0] == '\0') {
1786                         /* ps_sepol has not been initialized */
1787                         argv[5] = NULL;
1788                         argv[7] = NULL;
1789                 } else {
1790                         time64_t mtime_ms;
1791
1792                         mtime_ms = ktime_to_ms(imp->imp_sec->ps_sepol_mtime);
1793                         snprintf(mtime_str, sizeof(mtime_str), "%lld",
1794                                  mtime_ms / MSEC_PER_SEC);
1795                         mode_str[0] = imp->imp_sec->ps_sepol[0];
1796                 }
1797                 spin_unlock(&imp->imp_sec->ps_lock);
1798                 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1799                 rc = ret>>8;
1800         }
1801
1802         return rc;
1803 }
1804
1805 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1806 {
1807         ktime_t checknext;
1808
1809         if (send_sepol == 0 || !selinux_is_enabled())
1810                 return 0;
1811
1812         if (send_sepol == -1)
1813                 /* send_sepol == -1 means fetch sepol status every time */
1814                 return 1;
1815
1816         spin_lock(&imp_sec->ps_lock);
1817         checknext = imp_sec->ps_sepol_checknext;
1818         spin_unlock(&imp_sec->ps_lock);
1819
1820         /* next check is too far in time, please update */
1821         if (ktime_after(checknext,
1822                         ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1823                 goto setnext;
1824
1825         if (ktime_before(ktime_get(), checknext))
1826                 /* too early to fetch sepol status */
1827                 return 0;
1828
1829 setnext:
1830         /* define new sepol_checknext time */
1831         spin_lock(&imp_sec->ps_lock);
1832         imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1833                                                 ktime_set(send_sepol, 0));
1834         spin_unlock(&imp_sec->ps_lock);
1835
1836         return 1;
1837 }
1838
1839 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1840 {
1841         struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1842         int rc = 0;
1843
1844         ENTRY;
1845
1846         (req->rq_sepol)[0] = '\0';
1847
1848 #ifndef HAVE_SELINUX
1849         if (unlikely(send_sepol != 0))
1850                 CDEBUG(D_SEC,
1851                        "Client cannot report SELinux status, it was not built against libselinux.\n");
1852         RETURN(0);
1853 #endif
1854
1855         if (send_sepol == 0 || !selinux_is_enabled())
1856                 RETURN(0);
1857
1858         if (imp_sec == NULL)
1859                 RETURN(-EINVAL);
1860
1861         /* Retrieve SELinux status info */
1862         if (sptlrpc_sepol_needs_check(imp_sec))
1863                 rc = sepol_helper(req->rq_import);
1864         if (likely(rc == 0)) {
1865                 spin_lock(&imp_sec->ps_lock);
1866                 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1867                        sizeof(req->rq_sepol));
1868                 spin_unlock(&imp_sec->ps_lock);
1869         }
1870
1871         RETURN(rc);
1872 }
1873 EXPORT_SYMBOL(sptlrpc_get_sepol);
1874
1875 /*
1876  * server side security
1877  */
1878
1879 static int flavor_allowed(struct sptlrpc_flavor *exp,
1880                           struct ptlrpc_request *req)
1881 {
1882         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1883
1884         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1885                 return 1;
1886
1887         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1888             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1889             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1890             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1891                 return 1;
1892
1893         return 0;
1894 }
1895
1896 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1897
1898 /**
1899  * Given an export \a exp, check whether the flavor of incoming \a req
1900  * is allowed by the export \a exp. Main logic is about taking care of
1901  * changing configurations. Return 0 means success.
1902  */
1903 int sptlrpc_target_export_check(struct obd_export *exp,
1904                                 struct ptlrpc_request *req)
1905 {
1906         struct sptlrpc_flavor   flavor;
1907
1908         if (exp == NULL)
1909                 return 0;
1910
1911         /*
1912          * client side export has no imp_reverse, skip
1913          * FIXME maybe we should check flavor this as well???
1914          */
1915         if (exp->exp_imp_reverse == NULL)
1916                 return 0;
1917
1918         /* don't care about ctx fini rpc */
1919         if (req->rq_ctx_fini)
1920                 return 0;
1921
1922         spin_lock(&exp->exp_lock);
1923
1924         /*
1925          * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1926          * the first req with the new flavor, then treat it as current flavor,
1927          * adapt reverse sec according to it.
1928          * note the first rpc with new flavor might not be with root ctx, in
1929          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1930          */
1931         if (unlikely(exp->exp_flvr_changed) &&
1932             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1933                 /*
1934                  * make the new flavor as "current", and old ones as
1935                  * about-to-expire
1936                  */
1937                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1938                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1939                 flavor = exp->exp_flvr_old[1];
1940                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1941                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1942                 exp->exp_flvr_old[0] = exp->exp_flvr;
1943                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1944                                           EXP_FLVR_UPDATE_EXPIRE;
1945                 exp->exp_flvr = flavor;
1946
1947                 /* flavor change finished */
1948                 exp->exp_flvr_changed = 0;
1949                 LASSERT(exp->exp_flvr_adapt == 1);
1950
1951                 /* if it's gss, we only interested in root ctx init */
1952                 if (req->rq_auth_gss &&
1953                     !(req->rq_ctx_init &&
1954                     (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1955                     req->rq_auth_usr_ost))) {
1956                         spin_unlock(&exp->exp_lock);
1957                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1958                                req->rq_auth_gss, req->rq_ctx_init,
1959                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1960                                req->rq_auth_usr_ost);
1961                         return 0;
1962                 }
1963
1964                 exp->exp_flvr_adapt = 0;
1965                 spin_unlock(&exp->exp_lock);
1966
1967                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1968                                                 req->rq_svc_ctx, &flavor);
1969         }
1970
1971         /*
1972          * if it equals to the current flavor, we accept it, but need to
1973          * dealing with reverse sec/ctx
1974          */
1975         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1976                 /*
1977                  * most cases should return here, we only interested in
1978                  * gss root ctx init
1979                  */
1980                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1981                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1982                      !req->rq_auth_usr_ost)) {
1983                         spin_unlock(&exp->exp_lock);
1984                         return 0;
1985                 }
1986
1987                 /*
1988                  * if flavor just changed, we should not proceed, just leave
1989                  * it and current flavor will be discovered and replaced
1990                  * shortly, and let _this_ rpc pass through
1991                  */
1992                 if (exp->exp_flvr_changed) {
1993                         LASSERT(exp->exp_flvr_adapt);
1994                         spin_unlock(&exp->exp_lock);
1995                         return 0;
1996                 }
1997
1998                 if (exp->exp_flvr_adapt) {
1999                         exp->exp_flvr_adapt = 0;
2000                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2001                                exp, exp->exp_flvr.sf_rpc,
2002                                exp->exp_flvr_old[0].sf_rpc,
2003                                exp->exp_flvr_old[1].sf_rpc);
2004                         flavor = exp->exp_flvr;
2005                         spin_unlock(&exp->exp_lock);
2006
2007                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2008                                                         req->rq_svc_ctx,
2009                                                         &flavor);
2010                 } else {
2011                         CDEBUG(D_SEC,
2012                                "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2013                                exp, exp->exp_flvr.sf_rpc,
2014                                exp->exp_flvr_old[0].sf_rpc,
2015                                exp->exp_flvr_old[1].sf_rpc);
2016                         spin_unlock(&exp->exp_lock);
2017
2018                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2019                                                            req->rq_svc_ctx);
2020                 }
2021         }
2022
2023         if (exp->exp_flvr_expire[0]) {
2024                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2025                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2026                                 CDEBUG(D_SEC,
2027                                        "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2028                                        exp, exp->exp_flvr.sf_rpc,
2029                                        exp->exp_flvr_old[0].sf_rpc,
2030                                        exp->exp_flvr_old[1].sf_rpc,
2031                                        (s64)(exp->exp_flvr_expire[0] -
2032                                              ktime_get_real_seconds()));
2033                                 spin_unlock(&exp->exp_lock);
2034                                 return 0;
2035                         }
2036                 } else {
2037                         CDEBUG(D_SEC, "mark middle expired\n");
2038                         exp->exp_flvr_expire[0] = 0;
2039                 }
2040                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2041                        exp->exp_flvr.sf_rpc,
2042                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2043                        req->rq_flvr.sf_rpc);
2044         }
2045
2046         /*
2047          * now it doesn't match the current flavor, the only chance we can
2048          * accept it is match the old flavors which is not expired.
2049          */
2050         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2051                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2052                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2053                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2054                                        exp,
2055                                        exp->exp_flvr.sf_rpc,
2056                                        exp->exp_flvr_old[0].sf_rpc,
2057                                        exp->exp_flvr_old[1].sf_rpc,
2058                                        (s64)(exp->exp_flvr_expire[1] -
2059                                        ktime_get_real_seconds()));
2060                                 spin_unlock(&exp->exp_lock);
2061                                 return 0;
2062                         }
2063                 } else {
2064                         CDEBUG(D_SEC, "mark oldest expired\n");
2065                         exp->exp_flvr_expire[1] = 0;
2066                 }
2067                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2068                        exp, exp->exp_flvr.sf_rpc,
2069                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2070                        req->rq_flvr.sf_rpc);
2071         } else {
2072                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2073                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2074                        exp->exp_flvr_old[1].sf_rpc);
2075         }
2076
2077         spin_unlock(&exp->exp_lock);
2078
2079         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2080               exp, exp->exp_obd->obd_name,
2081               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2082               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2083               req->rq_flvr.sf_rpc,
2084               exp->exp_flvr.sf_rpc,
2085               exp->exp_flvr_old[0].sf_rpc,
2086               exp->exp_flvr_expire[0] ?
2087               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2088               exp->exp_flvr_old[1].sf_rpc,
2089               exp->exp_flvr_expire[1] ?
2090               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2091         return -EACCES;
2092 }
2093 EXPORT_SYMBOL(sptlrpc_target_export_check);
2094
2095 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2096                                       struct sptlrpc_rule_set *rset)
2097 {
2098         struct obd_export *exp;
2099         struct sptlrpc_flavor new_flvr;
2100
2101         LASSERT(obd);
2102
2103         spin_lock(&obd->obd_dev_lock);
2104
2105         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2106                 if (exp->exp_connection == NULL)
2107                         continue;
2108
2109                 /*
2110                  * note if this export had just been updated flavor
2111                  * (exp_flvr_changed == 1), this will override the
2112                  * previous one.
2113                  */
2114                 spin_lock(&exp->exp_lock);
2115                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2116                                              exp->exp_connection->c_peer.nid,
2117                                              &new_flvr);
2118                 if (exp->exp_flvr_changed ||
2119                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2120                         exp->exp_flvr_old[1] = new_flvr;
2121                         exp->exp_flvr_expire[1] = 0;
2122                         exp->exp_flvr_changed = 1;
2123                         exp->exp_flvr_adapt = 1;
2124
2125                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2126                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2127                                exp->exp_flvr.sf_rpc,
2128                                exp->exp_flvr_old[1].sf_rpc);
2129                 }
2130                 spin_unlock(&exp->exp_lock);
2131         }
2132
2133         spin_unlock(&obd->obd_dev_lock);
2134 }
2135 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2136
2137 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2138 {
2139         /* peer's claim is unreliable unless gss is being used */
2140         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2141                 return svc_rc;
2142
2143         switch (req->rq_sp_from) {
2144         case LUSTRE_SP_CLI:
2145                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2146                         /* The below message is checked in sanity-sec test_33 */
2147                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2148                         svc_rc = SECSVC_DROP;
2149                 }
2150                 break;
2151         case LUSTRE_SP_MDT:
2152                 if (!req->rq_auth_usr_mdt) {
2153                         /* The below message is checked in sanity-sec test_33 */
2154                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2155                         svc_rc = SECSVC_DROP;
2156                 }
2157                 break;
2158         case LUSTRE_SP_OST:
2159                 if (!req->rq_auth_usr_ost) {
2160                         /* The below message is checked in sanity-sec test_33 */
2161                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2162                         svc_rc = SECSVC_DROP;
2163                 }
2164                 break;
2165         case LUSTRE_SP_MGS:
2166         case LUSTRE_SP_MGC:
2167                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2168                     !req->rq_auth_usr_ost) {
2169                         /* The below message is checked in sanity-sec test_33 */
2170                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2171                         svc_rc = SECSVC_DROP;
2172                 }
2173                 break;
2174         case LUSTRE_SP_ANY:
2175         default:
2176                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2177                 svc_rc = SECSVC_DROP;
2178         }
2179
2180         return svc_rc;
2181 }
2182
2183 /**
2184  * Used by ptlrpc server, to perform transformation upon request message of
2185  * incoming \a req. This must be the first thing to do with an incoming
2186  * request in ptlrpc layer.
2187  *
2188  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2189  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2190  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2191  * reply message has been prepared.
2192  * \retval SECSVC_DROP failed, this request should be dropped.
2193  */
2194 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2195 {
2196         struct ptlrpc_sec_policy *policy;
2197         struct lustre_msg *msg = req->rq_reqbuf;
2198         int rc;
2199
2200         ENTRY;
2201
2202         LASSERT(msg);
2203         LASSERT(req->rq_reqmsg == NULL);
2204         LASSERT(req->rq_repmsg == NULL);
2205         LASSERT(req->rq_svc_ctx == NULL);
2206
2207         req->rq_req_swab_mask = 0;
2208
2209         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2210         switch (rc) {
2211         case 1:
2212                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2213         case 0:
2214                 break;
2215         default:
2216                 CERROR("error unpacking request from %s x%llu\n",
2217                        libcfs_id2str(req->rq_peer), req->rq_xid);
2218                 RETURN(SECSVC_DROP);
2219         }
2220
2221         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2222         req->rq_sp_from = LUSTRE_SP_ANY;
2223         req->rq_auth_uid = -1; /* set to INVALID_UID */
2224         req->rq_auth_mapped_uid = -1;
2225
2226         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2227         if (!policy) {
2228                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2229                 RETURN(SECSVC_DROP);
2230         }
2231
2232         LASSERT(policy->sp_sops->accept);
2233         rc = policy->sp_sops->accept(req);
2234         sptlrpc_policy_put(policy);
2235         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2236         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2237
2238         /*
2239          * if it's not null flavor (which means embedded packing msg),
2240          * reset the swab mask for the comming inner msg unpacking.
2241          */
2242         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2243                 req->rq_req_swab_mask = 0;
2244
2245         /* sanity check for the request source */
2246         rc = sptlrpc_svc_check_from(req, rc);
2247         RETURN(rc);
2248 }
2249
2250 /**
2251  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2252  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2253  * a buffer of \a msglen size.
2254  */
2255 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2256 {
2257         struct ptlrpc_sec_policy *policy;
2258         struct ptlrpc_reply_state *rs;
2259         int rc;
2260
2261         ENTRY;
2262
2263         LASSERT(req->rq_svc_ctx);
2264         LASSERT(req->rq_svc_ctx->sc_policy);
2265
2266         policy = req->rq_svc_ctx->sc_policy;
2267         LASSERT(policy->sp_sops->alloc_rs);
2268
2269         rc = policy->sp_sops->alloc_rs(req, msglen);
2270         if (unlikely(rc == -ENOMEM)) {
2271                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2272
2273                 if (svcpt->scp_service->srv_max_reply_size <
2274                    msglen + sizeof(struct ptlrpc_reply_state)) {
2275                         /* Just return failure if the size is too big */
2276                         CERROR("size of message is too big (%zd), %d allowed\n",
2277                                 msglen + sizeof(struct ptlrpc_reply_state),
2278                                 svcpt->scp_service->srv_max_reply_size);
2279                         RETURN(-ENOMEM);
2280                 }
2281
2282                 /* failed alloc, try emergency pool */
2283                 rs = lustre_get_emerg_rs(svcpt);
2284                 if (rs == NULL)
2285                         RETURN(-ENOMEM);
2286
2287                 req->rq_reply_state = rs;
2288                 rc = policy->sp_sops->alloc_rs(req, msglen);
2289                 if (rc) {
2290                         lustre_put_emerg_rs(rs);
2291                         req->rq_reply_state = NULL;
2292                 }
2293         }
2294
2295         LASSERT(rc != 0 ||
2296                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2297
2298         RETURN(rc);
2299 }
2300
2301 /**
2302  * Used by ptlrpc server, to perform transformation upon reply message.
2303  *
2304  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2305  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2306  */
2307 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2308 {
2309         struct ptlrpc_sec_policy *policy;
2310         int rc;
2311
2312         ENTRY;
2313
2314         LASSERT(req->rq_svc_ctx);
2315         LASSERT(req->rq_svc_ctx->sc_policy);
2316
2317         policy = req->rq_svc_ctx->sc_policy;
2318         LASSERT(policy->sp_sops->authorize);
2319
2320         rc = policy->sp_sops->authorize(req);
2321         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2322
2323         RETURN(rc);
2324 }
2325
2326 /**
2327  * Used by ptlrpc server, to free reply_state.
2328  */
2329 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2330 {
2331         struct ptlrpc_sec_policy *policy;
2332         unsigned int prealloc;
2333
2334         ENTRY;
2335
2336         LASSERT(rs->rs_svc_ctx);
2337         LASSERT(rs->rs_svc_ctx->sc_policy);
2338
2339         policy = rs->rs_svc_ctx->sc_policy;
2340         LASSERT(policy->sp_sops->free_rs);
2341
2342         prealloc = rs->rs_prealloc;
2343         policy->sp_sops->free_rs(rs);
2344
2345         if (prealloc)
2346                 lustre_put_emerg_rs(rs);
2347         EXIT;
2348 }
2349
2350 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2351 {
2352         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2353
2354         if (ctx != NULL)
2355                 atomic_inc(&ctx->sc_refcount);
2356 }
2357
2358 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2359 {
2360         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2361
2362         if (ctx == NULL)
2363                 return;
2364
2365         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2366         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2367                 if (ctx->sc_policy->sp_sops->free_ctx)
2368                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2369         }
2370         req->rq_svc_ctx = NULL;
2371 }
2372
2373 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2374 {
2375         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2376
2377         if (ctx == NULL)
2378                 return;
2379
2380         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2381         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2382                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2383 }
2384 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2385
2386 /*
2387  * bulk security
2388  */
2389
2390 /**
2391  * Perform transformation upon bulk data pointed by \a desc. This is called
2392  * before transforming the request message.
2393  */
2394 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2395                           struct ptlrpc_bulk_desc *desc)
2396 {
2397         struct ptlrpc_cli_ctx *ctx;
2398
2399         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2400
2401         if (!req->rq_pack_bulk)
2402                 return 0;
2403
2404         ctx = req->rq_cli_ctx;
2405         if (ctx->cc_ops->wrap_bulk)
2406                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2407         return 0;
2408 }
2409 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2410
2411 /**
2412  * This is called after unwrap the reply message.
2413  * return nob of actual plain text size received, or error code.
2414  */
2415 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2416                                  struct ptlrpc_bulk_desc *desc,
2417                                  int nob)
2418 {
2419         struct ptlrpc_cli_ctx *ctx;
2420         int rc;
2421
2422         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2423
2424         if (!req->rq_pack_bulk)
2425                 return desc->bd_nob_transferred;
2426
2427         ctx = req->rq_cli_ctx;
2428         if (ctx->cc_ops->unwrap_bulk) {
2429                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2430                 if (rc < 0)
2431                         return rc;
2432         }
2433         return desc->bd_nob_transferred;
2434 }
2435 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2436
2437 /**
2438  * This is called after unwrap the reply message.
2439  * return 0 for success or error code.
2440  */
2441 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2442                                   struct ptlrpc_bulk_desc *desc)
2443 {
2444         struct ptlrpc_cli_ctx *ctx;
2445         int rc;
2446
2447         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2448
2449         if (!req->rq_pack_bulk)
2450                 return 0;
2451
2452         ctx = req->rq_cli_ctx;
2453         if (ctx->cc_ops->unwrap_bulk) {
2454                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2455                 if (rc < 0)
2456                         return rc;
2457         }
2458
2459         /*
2460          * if everything is going right, nob should equals to nob_transferred.
2461          * in case of privacy mode, nob_transferred needs to be adjusted.
2462          */
2463         if (desc->bd_nob != desc->bd_nob_transferred) {
2464                 CERROR("nob %d doesn't match transferred nob %d\n",
2465                        desc->bd_nob, desc->bd_nob_transferred);
2466                 return -EPROTO;
2467         }
2468
2469         return 0;
2470 }
2471 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2472
2473 #ifdef HAVE_SERVER_SUPPORT
2474 /**
2475  * Performe transformation upon outgoing bulk read.
2476  */
2477 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2478                           struct ptlrpc_bulk_desc *desc)
2479 {
2480         struct ptlrpc_svc_ctx *ctx;
2481
2482         LASSERT(req->rq_bulk_read);
2483
2484         if (!req->rq_pack_bulk)
2485                 return 0;
2486
2487         ctx = req->rq_svc_ctx;
2488         if (ctx->sc_policy->sp_sops->wrap_bulk)
2489                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2490
2491         return 0;
2492 }
2493 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2494
2495 /**
2496  * Performe transformation upon incoming bulk write.
2497  */
2498 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2499                             struct ptlrpc_bulk_desc *desc)
2500 {
2501         struct ptlrpc_svc_ctx *ctx;
2502         int rc;
2503
2504         LASSERT(req->rq_bulk_write);
2505
2506         /*
2507          * if it's in privacy mode, transferred should >= expected; otherwise
2508          * transferred should == expected.
2509          */
2510         if (desc->bd_nob_transferred < desc->bd_nob ||
2511             (desc->bd_nob_transferred > desc->bd_nob &&
2512              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2513              SPTLRPC_BULK_SVC_PRIV)) {
2514                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2515                           desc->bd_nob_transferred, desc->bd_nob);
2516                 return -ETIMEDOUT;
2517         }
2518
2519         if (!req->rq_pack_bulk)
2520                 return 0;
2521
2522         ctx = req->rq_svc_ctx;
2523         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2524                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2525                 if (rc)
2526                         CERROR("error unwrap bulk: %d\n", rc);
2527         }
2528
2529         /* return 0 to allow reply be sent */
2530         return 0;
2531 }
2532 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2533
2534 /**
2535  * Prepare buffers for incoming bulk write.
2536  */
2537 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2538                           struct ptlrpc_bulk_desc *desc)
2539 {
2540         struct ptlrpc_svc_ctx *ctx;
2541
2542         LASSERT(req->rq_bulk_write);
2543
2544         if (!req->rq_pack_bulk)
2545                 return 0;
2546
2547         ctx = req->rq_svc_ctx;
2548         if (ctx->sc_policy->sp_sops->prep_bulk)
2549                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2550
2551         return 0;
2552 }
2553 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2554
2555 #endif /* HAVE_SERVER_SUPPORT */
2556
2557 /*
2558  * user descriptor helpers
2559  */
2560
2561 int sptlrpc_current_user_desc_size(void)
2562 {
2563         int ngroups;
2564
2565         ngroups = current_cred()->group_info->ngroups;
2566
2567         if (ngroups > LUSTRE_MAX_GROUPS)
2568                 ngroups = LUSTRE_MAX_GROUPS;
2569         return sptlrpc_user_desc_size(ngroups);
2570 }
2571 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2572
2573 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2574 {
2575         struct ptlrpc_user_desc *pud;
2576         int ngroups;
2577
2578         pud = lustre_msg_buf(msg, offset, 0);
2579
2580         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2581         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2582         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2583         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2584         pud->pud_cap = cfs_curproc_cap_pack();
2585         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2586
2587         task_lock(current);
2588         ngroups = current_cred()->group_info->ngroups;
2589         if (pud->pud_ngroups > ngroups)
2590                 pud->pud_ngroups = ngroups;
2591 #ifdef HAVE_GROUP_INFO_GID
2592         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2593                pud->pud_ngroups * sizeof(__u32));
2594 #else /* !HAVE_GROUP_INFO_GID */
2595         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2596                pud->pud_ngroups * sizeof(__u32));
2597 #endif /* HAVE_GROUP_INFO_GID */
2598         task_unlock(current);
2599
2600         return 0;
2601 }
2602 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2603
2604 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2605 {
2606         struct ptlrpc_user_desc *pud;
2607         int i;
2608
2609         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2610         if (!pud)
2611                 return -EINVAL;
2612
2613         if (swabbed) {
2614                 __swab32s(&pud->pud_uid);
2615                 __swab32s(&pud->pud_gid);
2616                 __swab32s(&pud->pud_fsuid);
2617                 __swab32s(&pud->pud_fsgid);
2618                 __swab32s(&pud->pud_cap);
2619                 __swab32s(&pud->pud_ngroups);
2620         }
2621
2622         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2623                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2624                 return -EINVAL;
2625         }
2626
2627         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2628             msg->lm_buflens[offset]) {
2629                 CERROR("%u groups are claimed but bufsize only %u\n",
2630                        pud->pud_ngroups, msg->lm_buflens[offset]);
2631                 return -EINVAL;
2632         }
2633
2634         if (swabbed) {
2635                 for (i = 0; i < pud->pud_ngroups; i++)
2636                         __swab32s(&pud->pud_groups[i]);
2637         }
2638
2639         return 0;
2640 }
2641 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2642
2643 /*
2644  * misc helpers
2645  */
2646
2647 const char *sec2target_str(struct ptlrpc_sec *sec)
2648 {
2649         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2650                 return "*";
2651         if (sec_is_reverse(sec))
2652                 return "c";
2653         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2654 }
2655 EXPORT_SYMBOL(sec2target_str);
2656
2657 /*
2658  * return true if the bulk data is protected
2659  */
2660 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2661 {
2662         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2663         case SPTLRPC_BULK_SVC_INTG:
2664         case SPTLRPC_BULK_SVC_PRIV:
2665                 return 1;
2666         default:
2667                 return 0;
2668         }
2669 }
2670 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2671
2672 /*
2673  * crypto API helper/alloc blkciper
2674  */
2675
2676 /*
2677  * initialize/finalize
2678  */
2679
2680 int sptlrpc_init(void)
2681 {
2682         int rc;
2683
2684         rwlock_init(&policy_lock);
2685
2686         rc = sptlrpc_gc_init();
2687         if (rc)
2688                 goto out;
2689
2690         rc = sptlrpc_conf_init();
2691         if (rc)
2692                 goto out_gc;
2693
2694         rc = sptlrpc_enc_pool_init();
2695         if (rc)
2696                 goto out_conf;
2697
2698         rc = sptlrpc_null_init();
2699         if (rc)
2700                 goto out_pool;
2701
2702         rc = sptlrpc_plain_init();
2703         if (rc)
2704                 goto out_null;
2705
2706         rc = sptlrpc_lproc_init();
2707         if (rc)
2708                 goto out_plain;
2709
2710         return 0;
2711
2712 out_plain:
2713         sptlrpc_plain_fini();
2714 out_null:
2715         sptlrpc_null_fini();
2716 out_pool:
2717         sptlrpc_enc_pool_fini();
2718 out_conf:
2719         sptlrpc_conf_fini();
2720 out_gc:
2721         sptlrpc_gc_fini();
2722 out:
2723         return rc;
2724 }
2725
2726 void sptlrpc_fini(void)
2727 {
2728         sptlrpc_lproc_fini();
2729         sptlrpc_plain_fini();
2730         sptlrpc_null_fini();
2731         sptlrpc_enc_pool_fini();
2732         sptlrpc_conf_fini();
2733         sptlrpc_gc_fini();
2734 }