Whamcloud - gitweb
LU-17000 ptlrpc: fix string overflow warnings
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ptlrpc/sec.c
32  *
33  * Author: Eric Mei <ericm@clusterfs.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_SEC
37
38 #include <linux/user_namespace.h>
39 #include <linux/uidgid.h>
40 #include <linux/crypto.h>
41 #include <linux/key.h>
42
43 #include <libcfs/libcfs.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
48 #include <lustre_import.h>
49 #include <lustre_dlm.h>
50 #include <lustre_sec.h>
51
52 #include "ptlrpc_internal.h"
53
54 static int send_sepol;
55 module_param(send_sepol, int, 0644);
56 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
57
58 /*
59  * policy registers
60  */
61
62 static rwlock_t policy_lock;
63 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
64         NULL,
65 };
66
67 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
68 {
69         __u16 number = policy->sp_policy;
70
71         LASSERT(policy->sp_name);
72         LASSERT(policy->sp_cops);
73         LASSERT(policy->sp_sops);
74
75         if (number >= SPTLRPC_POLICY_MAX)
76                 return -EINVAL;
77
78         write_lock(&policy_lock);
79         if (unlikely(policies[number])) {
80                 write_unlock(&policy_lock);
81                 return -EALREADY;
82         }
83         policies[number] = policy;
84         write_unlock(&policy_lock);
85
86         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
87         return 0;
88 }
89 EXPORT_SYMBOL(sptlrpc_register_policy);
90
91 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
92 {
93         __u16 number = policy->sp_policy;
94
95         LASSERT(number < SPTLRPC_POLICY_MAX);
96
97         write_lock(&policy_lock);
98         if (unlikely(policies[number] == NULL)) {
99                 write_unlock(&policy_lock);
100                 CERROR("%s: already unregistered\n", policy->sp_name);
101                 return -EINVAL;
102         }
103
104         LASSERT(policies[number] == policy);
105         policies[number] = NULL;
106         write_unlock(&policy_lock);
107
108         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
109         return 0;
110 }
111 EXPORT_SYMBOL(sptlrpc_unregister_policy);
112
113 static
114 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
115 {
116         static DEFINE_MUTEX(load_mutex);
117         struct ptlrpc_sec_policy *policy;
118         __u16 number = SPTLRPC_FLVR_POLICY(flavor);
119         int rc;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 read_unlock(&policy_lock);
130
131                 if (policy != NULL || number != SPTLRPC_POLICY_GSS)
132                         break;
133
134                 /* try to load gss module, happens only if policy at index
135                  * SPTLRPC_POLICY_GSS is not already referenced in
136                  * global array policies[]
137                  */
138                 mutex_lock(&load_mutex);
139                 /* The fact that request_module() returns 0 does not guarantee
140                  * the module has done its job. So we must check that the
141                  * requested policy is now available. This is done by checking
142                  * again for policies[number] in the loop.
143                  */
144                 rc = request_module("ptlrpc_gss");
145                 if (rc == 0)
146                         CDEBUG(D_SEC, "module ptlrpc_gss loaded on demand\n");
147                 else
148                         CERROR("Unable to load module ptlrpc_gss: rc %d\n", rc);
149                 mutex_unlock(&load_mutex);
150         }
151
152         return policy;
153 }
154
155 __u32 sptlrpc_name2flavor_base(const char *name)
156 {
157         if (!strcmp(name, "null"))
158                 return SPTLRPC_FLVR_NULL;
159         if (!strcmp(name, "plain"))
160                 return SPTLRPC_FLVR_PLAIN;
161         if (!strcmp(name, "gssnull"))
162                 return SPTLRPC_FLVR_GSSNULL;
163         if (!strcmp(name, "krb5n"))
164                 return SPTLRPC_FLVR_KRB5N;
165         if (!strcmp(name, "krb5a"))
166                 return SPTLRPC_FLVR_KRB5A;
167         if (!strcmp(name, "krb5i"))
168                 return SPTLRPC_FLVR_KRB5I;
169         if (!strcmp(name, "krb5p"))
170                 return SPTLRPC_FLVR_KRB5P;
171         if (!strcmp(name, "skn"))
172                 return SPTLRPC_FLVR_SKN;
173         if (!strcmp(name, "ska"))
174                 return SPTLRPC_FLVR_SKA;
175         if (!strcmp(name, "ski"))
176                 return SPTLRPC_FLVR_SKI;
177         if (!strcmp(name, "skpi"))
178                 return SPTLRPC_FLVR_SKPI;
179
180         return SPTLRPC_FLVR_INVALID;
181 }
182 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
183
184 const char *sptlrpc_flavor2name_base(__u32 flvr)
185 {
186         __u32   base = SPTLRPC_FLVR_BASE(flvr);
187
188         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
189                 return "null";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
191                 return "plain";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
193                 return "gssnull";
194         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
195                 return "krb5n";
196         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
197                 return "krb5a";
198         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
199                 return "krb5i";
200         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
201                 return "krb5p";
202         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
203                 return "skn";
204         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
205                 return "ska";
206         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
207                 return "ski";
208         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
209                 return "skpi";
210
211         CERROR("invalid wire flavor 0x%x\n", flvr);
212         return "invalid";
213 }
214 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
215
216 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
217                                char *buf, int bufsize)
218 {
219         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
220                 snprintf(buf, bufsize, "hash:%s",
221                         sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
222         else
223                 snprintf(buf, bufsize, "%s",
224                         sptlrpc_flavor2name_base(sf->sf_rpc));
225
226         buf[bufsize - 1] = '\0';
227         return buf;
228 }
229 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
230
231 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
232 {
233         size_t ln;
234
235         ln = snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
236
237         /*
238          * currently we don't support customized bulk specification for
239          * flavors other than plain
240          */
241         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
242                 char bspec[16];
243
244                 bspec[0] = '-';
245                 sptlrpc_flavor2name_bulk(sf, bspec + 1, sizeof(bspec) - 1);
246                 strncat(buf, bspec, bufsize - ln);
247         }
248
249         buf[bufsize - 1] = '\0';
250         return buf;
251 }
252 EXPORT_SYMBOL(sptlrpc_flavor2name);
253
254 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
255 {
256         buf[0] = '\0';
257
258         if (flags & PTLRPC_SEC_FL_REVERSE)
259                 strlcat(buf, "reverse,", bufsize);
260         if (flags & PTLRPC_SEC_FL_ROOTONLY)
261                 strlcat(buf, "rootonly,", bufsize);
262         if (flags & PTLRPC_SEC_FL_UDESC)
263                 strlcat(buf, "udesc,", bufsize);
264         if (flags & PTLRPC_SEC_FL_BULK)
265                 strlcat(buf, "bulk,", bufsize);
266         if (buf[0] == '\0')
267                 strlcat(buf, "-,", bufsize);
268
269         return buf;
270 }
271 EXPORT_SYMBOL(sptlrpc_secflags2str);
272
273 /*
274  * client context APIs
275  */
276
277 static
278 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
279 {
280         struct vfs_cred vcred;
281         int create = 1, remove_dead = 1;
282
283         LASSERT(sec);
284         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
285
286         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
287                                      PTLRPC_SEC_FL_ROOTONLY)) {
288                 vcred.vc_uid = 0;
289                 vcred.vc_gid = 0;
290                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
291                         create = 0;
292                         remove_dead = 0;
293                 }
294         } else {
295                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
296                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
297         }
298
299         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
300                                                    remove_dead);
301 }
302
303 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
304 {
305         atomic_inc(&ctx->cc_refcount);
306         return ctx;
307 }
308 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
309
310 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
311 {
312         struct ptlrpc_sec *sec = ctx->cc_sec;
313
314         LASSERT(sec);
315         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
316
317         if (!atomic_dec_and_test(&ctx->cc_refcount))
318                 return;
319
320         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
321 }
322 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
323
324 /**
325  * Expire the client context immediately.
326  *
327  * \pre Caller must hold at least 1 reference on the \a ctx.
328  */
329 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
330 {
331         LASSERT(ctx->cc_ops->die);
332         ctx->cc_ops->die(ctx, 0);
333 }
334 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
335
336 /**
337  * To wake up the threads who are waiting for this client context. Called
338  * after some status change happened on \a ctx.
339  */
340 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
341 {
342         struct ptlrpc_request *req, *next;
343
344         spin_lock(&ctx->cc_lock);
345         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
346                                      rq_ctx_chain) {
347                 list_del_init(&req->rq_ctx_chain);
348                 ptlrpc_client_wake_req(req);
349         }
350         spin_unlock(&ctx->cc_lock);
351 }
352 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
353
354 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
355 {
356         LASSERT(ctx->cc_ops);
357
358         if (ctx->cc_ops->display == NULL)
359                 return 0;
360
361         return ctx->cc_ops->display(ctx, buf, bufsize);
362 }
363
364 static int import_sec_check_expire(struct obd_import *imp)
365 {
366         int adapt = 0;
367
368         write_lock(&imp->imp_sec_lock);
369         if (imp->imp_sec_expire &&
370             imp->imp_sec_expire < ktime_get_real_seconds()) {
371                 adapt = 1;
372                 imp->imp_sec_expire = 0;
373         }
374         write_unlock(&imp->imp_sec_lock);
375
376         if (!adapt)
377                 return 0;
378
379         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
380         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
381 }
382
383 /**
384  * Get and validate the client side ptlrpc security facilities from
385  * \a imp. There is a race condition on client reconnect when the import is
386  * being destroyed while there are outstanding client bound requests. In
387  * this case do not output any error messages if import secuity is not
388  * found.
389  *
390  * \param[in] imp obd import associated with client
391  * \param[out] sec client side ptlrpc security
392  *
393  * \retval 0 if security retrieved successfully
394  * \retval -ve errno if there was a problem
395  */
396 static int import_sec_validate_get(struct obd_import *imp,
397                                    struct ptlrpc_sec **sec)
398 {
399         int rc;
400
401         if (unlikely(imp->imp_sec_expire)) {
402                 rc = import_sec_check_expire(imp);
403                 if (rc)
404                         return rc;
405         }
406
407         *sec = sptlrpc_import_sec_ref(imp);
408         if (*sec == NULL) {
409                 /* Only output an error when the import is still active */
410                 if (!test_bit(WORK_STRUCT_PENDING_BIT,
411                               work_data_bits(&imp->imp_zombie_work)))
412                         CERROR("import %p (%s) with no sec\n",
413                                imp, ptlrpc_import_state_name(imp->imp_state));
414                 return -EACCES;
415         }
416
417         if (unlikely((*sec)->ps_dying)) {
418                 CERROR("attempt to use dying sec %p\n", sec);
419                 sptlrpc_sec_put(*sec);
420                 return -EACCES;
421         }
422
423         return 0;
424 }
425
426 /**
427  * Given a \a req, find or allocate an appropriate context for it.
428  * \pre req->rq_cli_ctx == NULL.
429  *
430  * \retval 0 succeed, and req->rq_cli_ctx is set.
431  * \retval -ev error number, and req->rq_cli_ctx == NULL.
432  */
433 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
434 {
435         struct obd_import *imp = req->rq_import;
436         struct ptlrpc_sec *sec;
437         int rc;
438
439         ENTRY;
440
441         LASSERT(!req->rq_cli_ctx);
442         LASSERT(imp);
443
444         rc = import_sec_validate_get(imp, &sec);
445         if (rc)
446                 RETURN(rc);
447
448         req->rq_cli_ctx = get_my_ctx(sec);
449
450         sptlrpc_sec_put(sec);
451
452         if (!req->rq_cli_ctx) {
453                 CERROR("req %p: fail to get context\n", req);
454                 RETURN(-ECONNREFUSED);
455         }
456
457         RETURN(0);
458 }
459
460 /**
461  * Drop the context for \a req.
462  * \pre req->rq_cli_ctx != NULL.
463  * \post req->rq_cli_ctx == NULL.
464  *
465  * If \a sync == 0, this function should return quickly without sleep;
466  * otherwise it might trigger and wait for the whole process of sending
467  * an context-destroying rpc to server.
468  */
469 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
470 {
471         ENTRY;
472
473         LASSERT(req);
474         LASSERT(req->rq_cli_ctx);
475
476         /*
477          * request might be asked to release earlier while still
478          * in the context waiting list.
479          */
480         if (!list_empty(&req->rq_ctx_chain)) {
481                 spin_lock(&req->rq_cli_ctx->cc_lock);
482                 list_del_init(&req->rq_ctx_chain);
483                 spin_unlock(&req->rq_cli_ctx->cc_lock);
484         }
485
486         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
487         req->rq_cli_ctx = NULL;
488         EXIT;
489 }
490
491 static
492 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
493                            struct ptlrpc_cli_ctx *oldctx,
494                            struct ptlrpc_cli_ctx *newctx)
495 {
496         struct sptlrpc_flavor old_flvr;
497         char *reqmsg = NULL; /* to workaround old gcc */
498         int reqmsg_size;
499         int rc = 0;
500
501         CDEBUG(D_SEC,
502                "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
503                req, oldctx, oldctx->cc_vcred.vc_uid,
504                sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
505                sec2target_str(newctx->cc_sec), oldctx->cc_sec,
506                oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
507                newctx->cc_sec->ps_policy->sp_name);
508
509         /* save flavor */
510         old_flvr = req->rq_flvr;
511
512         /* save request message */
513         reqmsg_size = req->rq_reqlen;
514         if (reqmsg_size != 0) {
515                 LASSERT(req->rq_reqmsg);
516                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
517                 if (reqmsg == NULL)
518                         return -ENOMEM;
519                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
520         }
521
522         /* release old req/rep buf */
523         req->rq_cli_ctx = oldctx;
524         sptlrpc_cli_free_reqbuf(req);
525         sptlrpc_cli_free_repbuf(req);
526         req->rq_cli_ctx = newctx;
527
528         /* recalculate the flavor */
529         sptlrpc_req_set_flavor(req, 0);
530
531         /*
532          * alloc new request buffer
533          * we don't need to alloc reply buffer here, leave it to the
534          * rest procedure of ptlrpc
535          */
536         if (reqmsg_size != 0) {
537                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
538                 if (!rc) {
539                         LASSERT(req->rq_reqmsg);
540                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
541                 } else {
542                         CWARN("failed to alloc reqbuf: %d\n", rc);
543                         req->rq_flvr = old_flvr;
544                 }
545
546                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
547         }
548         return rc;
549 }
550
551 /**
552  * If current context of \a req is dead somehow, e.g. we just switched flavor
553  * thus marked original contexts dead, we'll find a new context for it. if
554  * no switch is needed, \a req will end up with the same context.
555  *
556  * \note a request must have a context, to keep other parts of code happy.
557  * In any case of failure during the switching, we must restore the old one.
558  */
559 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
560 {
561         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
562         struct ptlrpc_cli_ctx *newctx;
563         int rc;
564
565         ENTRY;
566
567         LASSERT(oldctx);
568
569         sptlrpc_cli_ctx_get(oldctx);
570         sptlrpc_req_put_ctx(req, 0);
571
572         rc = sptlrpc_req_get_ctx(req);
573         if (unlikely(rc)) {
574                 LASSERT(!req->rq_cli_ctx);
575
576                 /* restore old ctx */
577                 req->rq_cli_ctx = oldctx;
578                 RETURN(rc);
579         }
580
581         newctx = req->rq_cli_ctx;
582         LASSERT(newctx);
583
584         if (unlikely(newctx == oldctx &&
585                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
586                 /*
587                  * still get the old dead ctx, usually means system too busy
588                  */
589                 CDEBUG(D_SEC,
590                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
591                        newctx, newctx->cc_flags);
592
593                 schedule_timeout_interruptible(cfs_time_seconds(1));
594         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
595                             == 0)) {
596                 /*
597                  * new ctx not up to date yet
598                  */
599                 CDEBUG(D_SEC,
600                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
601                        newctx, newctx->cc_flags);
602         } else {
603                 /*
604                  * it's possible newctx == oldctx if we're switching
605                  * subflavor with the same sec.
606                  */
607                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
608                 if (rc) {
609                         /* restore old ctx */
610                         sptlrpc_req_put_ctx(req, 0);
611                         req->rq_cli_ctx = oldctx;
612                         RETURN(rc);
613                 }
614
615                 LASSERT(req->rq_cli_ctx == newctx);
616         }
617
618         sptlrpc_cli_ctx_put(oldctx, 1);
619         RETURN(0);
620 }
621 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
622
623 static
624 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
625 {
626         if (cli_ctx_is_refreshed(ctx))
627                 return 1;
628         return 0;
629 }
630
631 static
632 void ctx_refresh_interrupt(struct ptlrpc_request *req)
633 {
634
635         spin_lock(&req->rq_lock);
636         req->rq_intr = 1;
637         spin_unlock(&req->rq_lock);
638 }
639
640 static
641 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
642 {
643         spin_lock(&ctx->cc_lock);
644         if (!list_empty(&req->rq_ctx_chain))
645                 list_del_init(&req->rq_ctx_chain);
646         spin_unlock(&ctx->cc_lock);
647 }
648
649 /**
650  * To refresh the context of \req, if it's not up-to-date.
651  * \param timeout
652  * - == 0: do not wait
653  * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
654  * - > 0: not supported
655  *
656  * The status of the context could be subject to be changed by other threads
657  * at any time. We allow this race, but once we return with 0, the caller will
658  * suppose it's uptodated and keep using it until the owning rpc is done.
659  *
660  * \retval 0 only if the context is uptodated.
661  * \retval -ev error number.
662  */
663 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
664 {
665         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
666         struct ptlrpc_sec *sec;
667         int rc;
668
669         ENTRY;
670
671         LASSERT(ctx);
672
673         if (req->rq_ctx_init || req->rq_ctx_fini)
674                 RETURN(0);
675
676         if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
677                 CERROR("req %p: invalid timeout %lu\n", req, timeout);
678                 RETURN(-EINVAL);
679         }
680
681         /*
682          * during the process a request's context might change type even
683          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
684          * everything
685          */
686 again:
687         rc = import_sec_validate_get(req->rq_import, &sec);
688         if (rc)
689                 RETURN(rc);
690
691         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
692                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
693                        req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
694                 req_off_ctx_list(req, ctx);
695                 sptlrpc_req_replace_dead_ctx(req);
696                 ctx = req->rq_cli_ctx;
697         }
698         sptlrpc_sec_put(sec);
699
700         if (cli_ctx_is_eternal(ctx))
701                 RETURN(0);
702
703         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
704                 if (ctx->cc_ops->refresh)
705                         ctx->cc_ops->refresh(ctx);
706         }
707         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
708
709         LASSERT(ctx->cc_ops->validate);
710         if (ctx->cc_ops->validate(ctx) == 0) {
711                 req_off_ctx_list(req, ctx);
712                 RETURN(0);
713         }
714
715         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
716                 spin_lock(&req->rq_lock);
717                 req->rq_err = 1;
718                 spin_unlock(&req->rq_lock);
719                 req_off_ctx_list(req, ctx);
720                 RETURN(-EPERM);
721         }
722
723         /*
724          * There's a subtle issue for resending RPCs, suppose following
725          * situation:
726          *  1. the request was sent to server.
727          *  2. recovery was kicked start, after finished the request was
728          *     marked as resent.
729          *  3. resend the request.
730          *  4. old reply from server received, we accept and verify the reply.
731          *     this has to be success, otherwise the error will be aware
732          *     by application.
733          *  5. new reply from server received, dropped by LNet.
734          *
735          * Note the xid of old & new request is the same. We can't simply
736          * change xid for the resent request because the server replies on
737          * it for reply reconstruction.
738          *
739          * Commonly the original context should be uptodate because we
740          * have an expiry nice time; server will keep its context because
741          * we at least hold a ref of old context which prevent context
742          * from destroying RPC being sent. So server still can accept the
743          * request and finish the RPC. But if that's not the case:
744          *  1. If server side context has been trimmed, a NO_CONTEXT will
745          *     be returned, gss_cli_ctx_verify/unseal will switch to new
746          *     context by force.
747          *  2. Current context never be refreshed, then we are fine: we
748          *     never really send request with old context before.
749          */
750         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
751             unlikely(req->rq_reqmsg) &&
752             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
753                 req_off_ctx_list(req, ctx);
754                 RETURN(0);
755         }
756
757         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
758                 req_off_ctx_list(req, ctx);
759                 /*
760                  * don't switch ctx if import was deactivated
761                  */
762                 if (req->rq_import->imp_deactive) {
763                         spin_lock(&req->rq_lock);
764                         req->rq_err = 1;
765                         spin_unlock(&req->rq_lock);
766                         RETURN(-EINTR);
767                 }
768
769                 rc = sptlrpc_req_replace_dead_ctx(req);
770                 if (rc) {
771                         LASSERT(ctx == req->rq_cli_ctx);
772                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
773                                req, ctx, rc);
774                         spin_lock(&req->rq_lock);
775                         req->rq_err = 1;
776                         spin_unlock(&req->rq_lock);
777                         RETURN(rc);
778                 }
779
780                 ctx = req->rq_cli_ctx;
781                 goto again;
782         }
783
784         /*
785          * Now we're sure this context is during upcall, add myself into
786          * waiting list
787          */
788         spin_lock(&ctx->cc_lock);
789         if (list_empty(&req->rq_ctx_chain))
790                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
791         spin_unlock(&ctx->cc_lock);
792
793         if (timeout == 0)
794                 RETURN(-EAGAIN);
795
796         /* Clear any flags that may be present from previous sends */
797         LASSERT(req->rq_receiving_reply == 0);
798         spin_lock(&req->rq_lock);
799         req->rq_err = 0;
800         req->rq_timedout = 0;
801         req->rq_resend = 0;
802         req->rq_restart = 0;
803         spin_unlock(&req->rq_lock);
804
805         /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
806          * so wait indefinitely with non-fatal signals blocked
807          */
808         if (l_wait_event_abortable(req->rq_reply_waitq,
809                                    ctx_check_refresh(ctx)) == -ERESTARTSYS) {
810                 rc = -EINTR;
811                 ctx_refresh_interrupt(req);
812         }
813
814         /*
815          * following cases could lead us here:
816          * - successfully refreshed;
817          * - interrupted;
818          * - timedout, and we don't want recover from the failure;
819          * - timedout, and waked up upon recovery finished;
820          * - someone else mark this ctx dead by force;
821          * - someone invalidate the req and call ptlrpc_client_wake_req(),
822          *   e.g. ptlrpc_abort_inflight();
823          */
824         if (!cli_ctx_is_refreshed(ctx)) {
825                 /* timed out or interruptted */
826                 req_off_ctx_list(req, ctx);
827
828                 LASSERT(rc != 0);
829                 RETURN(rc);
830         }
831
832         goto again;
833 }
834
835 /* Bring ptlrpc_sec context up-to-date */
836 int sptlrpc_export_update_ctx(struct obd_export *exp)
837 {
838         struct obd_import *imp = exp ? exp->exp_imp_reverse : NULL;
839         struct ptlrpc_sec *sec = NULL;
840         struct ptlrpc_cli_ctx *ctx = NULL;
841         int rc = 0;
842
843         if (imp)
844                 sec = sptlrpc_import_sec_ref(imp);
845         if (sec) {
846                 ctx = get_my_ctx(sec);
847                 sptlrpc_sec_put(sec);
848         }
849
850         if (ctx) {
851                 if (ctx->cc_ops->refresh)
852                         rc = ctx->cc_ops->refresh(ctx);
853                 sptlrpc_cli_ctx_put(ctx, 1);
854         }
855         return rc;
856 }
857
858 /**
859  * Initialize flavor settings for \a req, according to \a opcode.
860  *
861  * \note this could be called in two situations:
862  * - new request from ptlrpc_pre_req(), with proper @opcode
863  * - old request which changed ctx in the middle, with @opcode == 0
864  */
865 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
866 {
867         struct ptlrpc_sec *sec;
868
869         LASSERT(req->rq_import);
870         LASSERT(req->rq_cli_ctx);
871         LASSERT(req->rq_cli_ctx->cc_sec);
872         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
873
874         /* special security flags according to opcode */
875         switch (opcode) {
876         case OST_READ:
877         case MDS_READPAGE:
878         case MGS_CONFIG_READ:
879         case OBD_IDX_READ:
880                 req->rq_bulk_read = 1;
881                 break;
882         case OST_WRITE:
883         case MDS_WRITEPAGE:
884                 req->rq_bulk_write = 1;
885                 break;
886         case SEC_CTX_INIT:
887                 req->rq_ctx_init = 1;
888                 break;
889         case SEC_CTX_FINI:
890                 req->rq_ctx_fini = 1;
891                 break;
892         case 0:
893                 /* init/fini rpc won't be resend, so can't be here */
894                 LASSERT(req->rq_ctx_init == 0);
895                 LASSERT(req->rq_ctx_fini == 0);
896
897                 /* cleanup flags, which should be recalculated */
898                 req->rq_pack_udesc = 0;
899                 req->rq_pack_bulk = 0;
900                 break;
901         }
902
903         sec = req->rq_cli_ctx->cc_sec;
904
905         spin_lock(&sec->ps_lock);
906         req->rq_flvr = sec->ps_flvr;
907         spin_unlock(&sec->ps_lock);
908
909         /*
910          * force SVC_NULL for context initiation rpc, SVC_INTG for context
911          * destruction rpc
912          */
913         if (unlikely(req->rq_ctx_init))
914                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
915         else if (unlikely(req->rq_ctx_fini))
916                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
917
918         /* user descriptor flag, null security can't do it anyway */
919         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
920             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
921                 req->rq_pack_udesc = 1;
922
923         /* bulk security flag */
924         if ((req->rq_bulk_read || req->rq_bulk_write) &&
925             sptlrpc_flavor_has_bulk(&req->rq_flvr))
926                 req->rq_pack_bulk = 1;
927 }
928
929 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
930 {
931         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
932                 return;
933
934         LASSERT(req->rq_clrbuf);
935         if (req->rq_pool || !req->rq_reqbuf)
936                 return;
937
938         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
939         req->rq_reqbuf = NULL;
940         req->rq_reqbuf_len = 0;
941 }
942
943 /**
944  * Given an import \a imp, check whether current user has a valid context
945  * or not. We may create a new context and try to refresh it, and try
946  * repeatedly try in case of non-fatal errors. Return 0 means success.
947  */
948 int sptlrpc_import_check_ctx(struct obd_import *imp)
949 {
950         struct ptlrpc_sec     *sec;
951         struct ptlrpc_cli_ctx *ctx;
952         struct ptlrpc_request *req = NULL;
953         int rc;
954
955         ENTRY;
956
957         might_sleep();
958
959         sec = sptlrpc_import_sec_ref(imp);
960         ctx = get_my_ctx(sec);
961         sptlrpc_sec_put(sec);
962
963         if (!ctx)
964                 RETURN(-ENOMEM);
965
966         if (cli_ctx_is_eternal(ctx) ||
967             ctx->cc_ops->validate(ctx) == 0) {
968                 sptlrpc_cli_ctx_put(ctx, 1);
969                 RETURN(0);
970         }
971
972         if (cli_ctx_is_error(ctx)) {
973                 sptlrpc_cli_ctx_put(ctx, 1);
974                 RETURN(-EACCES);
975         }
976
977         req = ptlrpc_request_cache_alloc(GFP_NOFS);
978         if (!req)
979                 RETURN(-ENOMEM);
980
981         ptlrpc_cli_req_init(req);
982         atomic_set(&req->rq_refcount, 10000);
983
984         req->rq_import = imp;
985         req->rq_flvr = sec->ps_flvr;
986         req->rq_cli_ctx = ctx;
987
988         rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
989         LASSERT(list_empty(&req->rq_ctx_chain));
990         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
991         ptlrpc_request_cache_free(req);
992
993         RETURN(rc);
994 }
995
996 /**
997  * Used by ptlrpc client, to perform the pre-defined security transformation
998  * upon the request message of \a req. After this function called,
999  * req->rq_reqmsg is still accessible as clear text.
1000  */
1001 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
1002 {
1003         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1004         int rc = 0;
1005
1006         ENTRY;
1007
1008         LASSERT(ctx);
1009         LASSERT(ctx->cc_sec);
1010         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1011
1012         /*
1013          * we wrap bulk request here because now we can be sure
1014          * the context is uptodate.
1015          */
1016         if (req->rq_bulk) {
1017                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1018                 if (rc)
1019                         RETURN(rc);
1020         }
1021
1022         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1023         case SPTLRPC_SVC_NULL:
1024         case SPTLRPC_SVC_AUTH:
1025         case SPTLRPC_SVC_INTG:
1026                 LASSERT(ctx->cc_ops->sign);
1027                 rc = ctx->cc_ops->sign(ctx, req);
1028                 break;
1029         case SPTLRPC_SVC_PRIV:
1030                 LASSERT(ctx->cc_ops->seal);
1031                 rc = ctx->cc_ops->seal(ctx, req);
1032                 break;
1033         default:
1034                 LBUG();
1035         }
1036
1037         if (rc == 0) {
1038                 LASSERT(req->rq_reqdata_len);
1039                 LASSERT(req->rq_reqdata_len % 8 == 0);
1040                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1041         }
1042
1043         RETURN(rc);
1044 }
1045
1046 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1047 {
1048         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1049         int rc;
1050
1051         ENTRY;
1052
1053         LASSERT(ctx);
1054         LASSERT(ctx->cc_sec);
1055         LASSERT(req->rq_repbuf);
1056         LASSERT(req->rq_repdata);
1057         LASSERT(req->rq_repmsg == NULL);
1058
1059         req->rq_rep_swab_mask = 0;
1060
1061         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1062         switch (rc) {
1063         case 1:
1064                 req_capsule_set_rep_swabbed(&req->rq_pill,
1065                                             MSG_PTLRPC_HEADER_OFF);
1066         case 0:
1067                 break;
1068         default:
1069                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1070                 RETURN(-EPROTO);
1071         }
1072
1073         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1074                 CERROR("replied data length %d too small\n",
1075                        req->rq_repdata_len);
1076                 RETURN(-EPROTO);
1077         }
1078
1079         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1080             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1081                 CERROR("reply policy %u doesn't match request policy %u\n",
1082                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1083                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1084                 RETURN(-EPROTO);
1085         }
1086
1087         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1088         case SPTLRPC_SVC_NULL:
1089         case SPTLRPC_SVC_AUTH:
1090         case SPTLRPC_SVC_INTG:
1091                 LASSERT(ctx->cc_ops->verify);
1092                 rc = ctx->cc_ops->verify(ctx, req);
1093                 break;
1094         case SPTLRPC_SVC_PRIV:
1095                 LASSERT(ctx->cc_ops->unseal);
1096                 rc = ctx->cc_ops->unseal(ctx, req);
1097                 break;
1098         default:
1099                 LBUG();
1100         }
1101         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1102
1103         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1104             !req->rq_ctx_init)
1105                 req->rq_rep_swab_mask = 0;
1106         RETURN(rc);
1107 }
1108
1109 /**
1110  * Used by ptlrpc client, to perform security transformation upon the reply
1111  * message of \a req. After return successfully, req->rq_repmsg points to
1112  * the reply message in clear text.
1113  *
1114  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1115  * going to change.
1116  */
1117 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1118 {
1119         LASSERT(req->rq_repbuf);
1120         LASSERT(req->rq_repdata == NULL);
1121         LASSERT(req->rq_repmsg == NULL);
1122         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1123
1124         if (req->rq_reply_off == 0 &&
1125             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1126                 CERROR("real reply with offset 0\n");
1127                 return -EPROTO;
1128         }
1129
1130         if (req->rq_reply_off % 8 != 0) {
1131                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1132                 return -EPROTO;
1133         }
1134
1135         req->rq_repdata = (struct lustre_msg *)
1136                                 (req->rq_repbuf + req->rq_reply_off);
1137         req->rq_repdata_len = req->rq_nob_received;
1138
1139         return do_cli_unwrap_reply(req);
1140 }
1141
1142 /**
1143  * Used by ptlrpc client, to perform security transformation upon the early
1144  * reply message of \a req. We expect the rq_reply_off is 0, and
1145  * rq_nob_received is the early reply size.
1146  *
1147  * Because the receive buffer might be still posted, the reply data might be
1148  * changed at any time, no matter we're holding rq_lock or not. For this reason
1149  * we allocate a separate ptlrpc_request and reply buffer for early reply
1150  * processing.
1151  *
1152  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1153  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1154  * \a *req_ret to release it.
1155  * \retval -ev error number, and \a req_ret will not be set.
1156  */
1157 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1158                                    struct ptlrpc_request **req_ret)
1159 {
1160         struct ptlrpc_request *early_req;
1161         char *early_buf;
1162         int early_bufsz, early_size;
1163         int rc;
1164
1165         ENTRY;
1166
1167         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1168         if (early_req == NULL)
1169                 RETURN(-ENOMEM);
1170
1171         ptlrpc_cli_req_init(early_req);
1172
1173         early_size = req->rq_nob_received;
1174         early_bufsz = size_roundup_power2(early_size);
1175         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1176         if (early_buf == NULL)
1177                 GOTO(err_req, rc = -ENOMEM);
1178
1179         /* sanity checkings and copy data out, do it inside spinlock */
1180         spin_lock(&req->rq_lock);
1181
1182         if (req->rq_replied) {
1183                 spin_unlock(&req->rq_lock);
1184                 GOTO(err_buf, rc = -EALREADY);
1185         }
1186
1187         LASSERT(req->rq_repbuf);
1188         LASSERT(req->rq_repdata == NULL);
1189         LASSERT(req->rq_repmsg == NULL);
1190
1191         if (req->rq_reply_off != 0) {
1192                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1193                 spin_unlock(&req->rq_lock);
1194                 GOTO(err_buf, rc = -EPROTO);
1195         }
1196
1197         if (req->rq_nob_received != early_size) {
1198                 /* even another early arrived the size should be the same */
1199                 CERROR("data size has changed from %u to %u\n",
1200                        early_size, req->rq_nob_received);
1201                 spin_unlock(&req->rq_lock);
1202                 GOTO(err_buf, rc = -EINVAL);
1203         }
1204
1205         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1206                 CERROR("early reply length %d too small\n",
1207                        req->rq_nob_received);
1208                 spin_unlock(&req->rq_lock);
1209                 GOTO(err_buf, rc = -EALREADY);
1210         }
1211
1212         memcpy(early_buf, req->rq_repbuf, early_size);
1213         spin_unlock(&req->rq_lock);
1214
1215         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1216         early_req->rq_flvr = req->rq_flvr;
1217         early_req->rq_repbuf = early_buf;
1218         early_req->rq_repbuf_len = early_bufsz;
1219         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1220         early_req->rq_repdata_len = early_size;
1221         early_req->rq_early = 1;
1222         early_req->rq_reqmsg = req->rq_reqmsg;
1223
1224         rc = do_cli_unwrap_reply(early_req);
1225         if (rc) {
1226                 DEBUG_REQ(D_ADAPTTO, early_req,
1227                           "unwrap early reply: rc = %d", rc);
1228                 GOTO(err_ctx, rc);
1229         }
1230
1231         LASSERT(early_req->rq_repmsg);
1232         *req_ret = early_req;
1233         RETURN(0);
1234
1235 err_ctx:
1236         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1237 err_buf:
1238         OBD_FREE_LARGE(early_buf, early_bufsz);
1239 err_req:
1240         ptlrpc_request_cache_free(early_req);
1241         RETURN(rc);
1242 }
1243
1244 /**
1245  * Used by ptlrpc client, to release a processed early reply \a early_req.
1246  *
1247  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1248  */
1249 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1250 {
1251         LASSERT(early_req->rq_repbuf);
1252         LASSERT(early_req->rq_repdata);
1253         LASSERT(early_req->rq_repmsg);
1254
1255         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1256         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1257         ptlrpc_request_cache_free(early_req);
1258 }
1259
1260 /**************************************************
1261  * sec ID                                         *
1262  **************************************************/
1263
1264 /*
1265  * "fixed" sec (e.g. null) use sec_id < 0
1266  */
1267 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1268
1269 int sptlrpc_get_next_secid(void)
1270 {
1271         return atomic_inc_return(&sptlrpc_sec_id);
1272 }
1273 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1274
1275 /*
1276  * client side high-level security APIs
1277  */
1278
1279 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1280                                    int grace, int force)
1281 {
1282         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1283
1284         LASSERT(policy->sp_cops);
1285         LASSERT(policy->sp_cops->flush_ctx_cache);
1286
1287         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1288 }
1289
1290 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1291 {
1292         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1293
1294         LASSERT(atomic_read(&sec->ps_refcount) == 0);
1295         LASSERT(atomic_read(&sec->ps_nctx) == 0);
1296         LASSERT(policy->sp_cops->destroy_sec);
1297
1298         CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
1299
1300         policy->sp_cops->destroy_sec(sec);
1301         sptlrpc_policy_put(policy);
1302 }
1303
1304 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1305 {
1306         sec_cop_destroy_sec(sec);
1307 }
1308 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1309
1310 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1311 {
1312         LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1313
1314         if (sec->ps_policy->sp_cops->kill_sec) {
1315                 sec->ps_policy->sp_cops->kill_sec(sec);
1316
1317                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1318         }
1319 }
1320
1321 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1322 {
1323         if (sec)
1324                 atomic_inc(&sec->ps_refcount);
1325
1326         return sec;
1327 }
1328 EXPORT_SYMBOL(sptlrpc_sec_get);
1329
1330 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1331 {
1332         if (sec) {
1333                 LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1334
1335                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1336                         sptlrpc_gc_del_sec(sec);
1337                         sec_cop_destroy_sec(sec);
1338                 }
1339         }
1340 }
1341 EXPORT_SYMBOL(sptlrpc_sec_put);
1342
1343 /*
1344  * policy module is responsible for taking refrence of import
1345  */
1346 static
1347 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1348                                        struct ptlrpc_svc_ctx *svc_ctx,
1349                                        struct sptlrpc_flavor *sf,
1350                                        enum lustre_sec_part sp)
1351 {
1352         struct ptlrpc_sec_policy *policy;
1353         struct ptlrpc_sec *sec;
1354         char str[32];
1355
1356         ENTRY;
1357
1358         if (svc_ctx) {
1359                 LASSERT(imp->imp_dlm_fake == 1);
1360
1361                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1362                        imp->imp_obd->obd_type->typ_name,
1363                        imp->imp_obd->obd_name,
1364                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1365
1366                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1367                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1368         } else {
1369                 LASSERT(imp->imp_dlm_fake == 0);
1370
1371                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1372                        imp->imp_obd->obd_type->typ_name,
1373                        imp->imp_obd->obd_name,
1374                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1375
1376                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1377                 if (!policy) {
1378                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1379                         RETURN(NULL);
1380                 }
1381         }
1382
1383         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1384         if (sec) {
1385                 atomic_inc(&sec->ps_refcount);
1386
1387                 sec->ps_part = sp;
1388
1389                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1390                         sptlrpc_gc_add_sec(sec);
1391         } else {
1392                 sptlrpc_policy_put(policy);
1393         }
1394
1395         RETURN(sec);
1396 }
1397
1398 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1399 {
1400         struct ptlrpc_sec *sec;
1401
1402         read_lock(&imp->imp_sec_lock);
1403         sec = sptlrpc_sec_get(imp->imp_sec);
1404         read_unlock(&imp->imp_sec_lock);
1405
1406         return sec;
1407 }
1408 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1409
1410 static void sptlrpc_import_sec_install(struct obd_import *imp,
1411                                        struct ptlrpc_sec *sec)
1412 {
1413         struct ptlrpc_sec *old_sec;
1414
1415         LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1416
1417         write_lock(&imp->imp_sec_lock);
1418         old_sec = imp->imp_sec;
1419         imp->imp_sec = sec;
1420         write_unlock(&imp->imp_sec_lock);
1421
1422         if (old_sec) {
1423                 sptlrpc_sec_kill(old_sec);
1424
1425                 /* balance the ref taken by this import */
1426                 sptlrpc_sec_put(old_sec);
1427         }
1428 }
1429
1430 static inline
1431 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1432 {
1433         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1434 }
1435
1436 static inline
1437 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1438 {
1439         *dst = *src;
1440 }
1441
1442 /**
1443  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1444  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1445  *
1446  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1447  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1448  */
1449 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1450                              struct ptlrpc_svc_ctx *svc_ctx,
1451                              struct sptlrpc_flavor *flvr)
1452 {
1453         struct ptlrpc_connection *conn;
1454         struct sptlrpc_flavor sf;
1455         struct ptlrpc_sec *sec, *newsec;
1456         enum lustre_sec_part sp;
1457         char str[24];
1458         int rc = 0;
1459
1460         ENTRY;
1461
1462         might_sleep();
1463
1464         if (imp == NULL)
1465                 RETURN(0);
1466
1467         conn = imp->imp_connection;
1468
1469         if (svc_ctx == NULL) {
1470                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1471                 /*
1472                  * normal import, determine flavor from rule set, except
1473                  * for mgc the flavor is predetermined.
1474                  */
1475                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1476                         sf = cliobd->cl_flvr_mgc;
1477                 else
1478                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1479                                                    cliobd->cl_sp_to,
1480                                                    &cliobd->cl_target_uuid,
1481                                                    &conn->c_self, &sf);
1482
1483                 sp = imp->imp_obd->u.cli.cl_sp_me;
1484         } else {
1485                 /* reverse import, determine flavor from incoming reqeust */
1486                 sf = *flvr;
1487
1488                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1489                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1490                                       PTLRPC_SEC_FL_ROOTONLY;
1491
1492                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1493         }
1494
1495         sec = sptlrpc_import_sec_ref(imp);
1496         if (sec) {
1497                 char str2[24];
1498
1499                 if (flavor_equal(&sf, &sec->ps_flvr))
1500                         GOTO(out, rc);
1501
1502                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1503                        imp->imp_obd->obd_name,
1504                        obd_uuid2str(&conn->c_remote_uuid),
1505                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1506                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1507         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1508                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1509                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1510                        imp->imp_obd->obd_name,
1511                        obd_uuid2str(&conn->c_remote_uuid),
1512                        LNET_NID_NET(&conn->c_self),
1513                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1514         }
1515
1516         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1517         if (newsec) {
1518                 sptlrpc_import_sec_install(imp, newsec);
1519         } else {
1520                 CERROR("import %s->%s: failed to create new sec\n",
1521                        imp->imp_obd->obd_name,
1522                        obd_uuid2str(&conn->c_remote_uuid));
1523                 rc = -EPERM;
1524         }
1525
1526 out:
1527         sptlrpc_sec_put(sec);
1528         RETURN(rc);
1529 }
1530
1531 void sptlrpc_import_sec_put(struct obd_import *imp)
1532 {
1533         if (imp->imp_sec) {
1534                 sptlrpc_sec_kill(imp->imp_sec);
1535
1536                 sptlrpc_sec_put(imp->imp_sec);
1537                 imp->imp_sec = NULL;
1538         }
1539 }
1540
1541 static void import_flush_ctx_common(struct obd_import *imp,
1542                                     uid_t uid, int grace, int force)
1543 {
1544         struct ptlrpc_sec *sec;
1545
1546         if (imp == NULL)
1547                 return;
1548
1549         sec = sptlrpc_import_sec_ref(imp);
1550         if (sec == NULL)
1551                 return;
1552
1553         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1554         sptlrpc_sec_put(sec);
1555 }
1556
1557 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1558 {
1559         /*
1560          * it's important to use grace mode, see explain in
1561          * sptlrpc_req_refresh_ctx()
1562          */
1563         import_flush_ctx_common(imp, 0, 1, 1);
1564 }
1565
1566 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1567 {
1568         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1569                                 1, 1);
1570 }
1571 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1572
1573 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1574 {
1575         import_flush_ctx_common(imp, -1, 1, 1);
1576 }
1577 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1578
1579 /**
1580  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1581  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1582  */
1583 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1584 {
1585         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1586         struct ptlrpc_sec_policy *policy;
1587         int rc;
1588
1589         LASSERT(ctx);
1590         LASSERT(ctx->cc_sec);
1591         LASSERT(ctx->cc_sec->ps_policy);
1592         LASSERT(req->rq_reqmsg == NULL);
1593         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1594
1595         policy = ctx->cc_sec->ps_policy;
1596         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1597         if (!rc) {
1598                 LASSERT(req->rq_reqmsg);
1599                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1600
1601                 /* zeroing preallocated buffer */
1602                 if (req->rq_pool)
1603                         memset(req->rq_reqmsg, 0, msgsize);
1604         }
1605
1606         return rc;
1607 }
1608
1609 /**
1610  * Used by ptlrpc client to free request buffer of \a req. After this
1611  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1612  */
1613 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1614 {
1615         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1616         struct ptlrpc_sec_policy *policy;
1617
1618         LASSERT(ctx);
1619         LASSERT(ctx->cc_sec);
1620         LASSERT(ctx->cc_sec->ps_policy);
1621         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1622
1623         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1624                 return;
1625
1626         policy = ctx->cc_sec->ps_policy;
1627         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1628         req->rq_reqmsg = NULL;
1629 }
1630
1631 /*
1632  * NOTE caller must guarantee the buffer size is enough for the enlargement
1633  */
1634 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1635                                   int segment, int newsize)
1636 {
1637         void *src, *dst;
1638         int oldsize, oldmsg_size, movesize;
1639
1640         LASSERT(segment < msg->lm_bufcount);
1641         LASSERT(msg->lm_buflens[segment] <= newsize);
1642
1643         if (msg->lm_buflens[segment] == newsize)
1644                 return;
1645
1646         /* nothing to do if we are enlarging the last segment */
1647         if (segment == msg->lm_bufcount - 1) {
1648                 msg->lm_buflens[segment] = newsize;
1649                 return;
1650         }
1651
1652         oldsize = msg->lm_buflens[segment];
1653
1654         src = lustre_msg_buf(msg, segment + 1, 0);
1655         msg->lm_buflens[segment] = newsize;
1656         dst = lustre_msg_buf(msg, segment + 1, 0);
1657         msg->lm_buflens[segment] = oldsize;
1658
1659         /* move from segment + 1 to end segment */
1660         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1661         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1662         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1663         LASSERT(movesize >= 0);
1664
1665         if (movesize)
1666                 memmove(dst, src, movesize);
1667
1668         /* note we don't clear the ares where old data live, not secret */
1669
1670         /* finally set new segment size */
1671         msg->lm_buflens[segment] = newsize;
1672 }
1673 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1674
1675 /**
1676  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1677  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1678  * preserved after the enlargement. this must be called after original request
1679  * buffer being allocated.
1680  *
1681  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1682  * so caller should refresh its local pointers if needed.
1683  */
1684 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1685                                const struct req_msg_field *field,
1686                                int newsize)
1687 {
1688         struct req_capsule *pill = &req->rq_pill;
1689         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1690         struct ptlrpc_sec_cops *cops;
1691         struct lustre_msg *msg = req->rq_reqmsg;
1692         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1693
1694         LASSERT(ctx);
1695         LASSERT(msg);
1696         LASSERT(msg->lm_bufcount > segment);
1697         LASSERT(msg->lm_buflens[segment] <= newsize);
1698
1699         if (msg->lm_buflens[segment] == newsize)
1700                 return 0;
1701
1702         cops = ctx->cc_sec->ps_policy->sp_cops;
1703         LASSERT(cops->enlarge_reqbuf);
1704         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1705 }
1706 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1707
1708 /**
1709  * Used by ptlrpc client to allocate reply buffer of \a req.
1710  *
1711  * \note After this, req->rq_repmsg is still not accessible.
1712  */
1713 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1714 {
1715         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1716         struct ptlrpc_sec_policy *policy;
1717
1718         ENTRY;
1719
1720         LASSERT(ctx);
1721         LASSERT(ctx->cc_sec);
1722         LASSERT(ctx->cc_sec->ps_policy);
1723
1724         if (req->rq_repbuf)
1725                 RETURN(0);
1726
1727         policy = ctx->cc_sec->ps_policy;
1728         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1729 }
1730
1731 /**
1732  * Used by ptlrpc client to free reply buffer of \a req. After this
1733  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1734  */
1735 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1736 {
1737         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1738         struct ptlrpc_sec_policy *policy;
1739
1740         ENTRY;
1741
1742         LASSERT(ctx);
1743         LASSERT(ctx->cc_sec);
1744         LASSERT(ctx->cc_sec->ps_policy);
1745         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1746
1747         if (req->rq_repbuf == NULL)
1748                 return;
1749         LASSERT(req->rq_repbuf_len);
1750
1751         policy = ctx->cc_sec->ps_policy;
1752         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1753         req->rq_repmsg = NULL;
1754         EXIT;
1755 }
1756 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1757
1758 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1759                                 struct ptlrpc_cli_ctx *ctx)
1760 {
1761         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1762
1763         if (!policy->sp_cops->install_rctx)
1764                 return 0;
1765         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1766 }
1767
1768 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1769                                 struct ptlrpc_svc_ctx *ctx)
1770 {
1771         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1772
1773         if (!policy->sp_sops->install_rctx)
1774                 return 0;
1775         return policy->sp_sops->install_rctx(imp, ctx);
1776 }
1777
1778 /* Get SELinux policy info from userspace */
1779 static int sepol_helper(struct obd_import *imp)
1780 {
1781         char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1782         char *argv[] = {
1783                 [0] = "/usr/sbin/l_getsepol",
1784                 [1] = "-o",
1785                 [2] = NULL,         /* obd type */
1786                 [3] = "-n",
1787                 [4] = NULL,         /* obd name */
1788                 [5] = "-t",
1789                 [6] = mtime_str,    /* policy mtime */
1790                 [7] = "-m",
1791                 [8] = mode_str,     /* enforcing mode */
1792                 [9] = NULL
1793         };
1794         char *envp[] = {
1795                 [0] = "HOME=/",
1796                 [1] = "PATH=/sbin:/usr/sbin",
1797                 [2] = NULL
1798         };
1799         signed short ret;
1800         int rc = 0;
1801
1802         if (imp == NULL || imp->imp_obd == NULL ||
1803             imp->imp_obd->obd_type == NULL) {
1804                 rc = -EINVAL;
1805         } else {
1806                 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1807                 argv[4] = imp->imp_obd->obd_name;
1808                 spin_lock(&imp->imp_sec->ps_lock);
1809                 if (ktime_to_ns(imp->imp_sec->ps_sepol_mtime) == 0 &&
1810                     imp->imp_sec->ps_sepol[0] == '\0') {
1811                         /* ps_sepol has not been initialized */
1812                         argv[5] = NULL;
1813                         argv[7] = NULL;
1814                 } else {
1815                         time64_t mtime_ms;
1816
1817                         mtime_ms = ktime_to_ms(imp->imp_sec->ps_sepol_mtime);
1818                         snprintf(mtime_str, sizeof(mtime_str), "%lld",
1819                                  mtime_ms / MSEC_PER_SEC);
1820                         mode_str[0] = imp->imp_sec->ps_sepol[0];
1821                 }
1822                 spin_unlock(&imp->imp_sec->ps_lock);
1823                 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1824                 rc = ret>>8;
1825         }
1826
1827         return rc;
1828 }
1829
1830 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1831 {
1832         ktime_t checknext;
1833
1834         if (send_sepol == 0)
1835                 return 0;
1836
1837         if (send_sepol == -1)
1838                 /* send_sepol == -1 means fetch sepol status every time */
1839                 return 1;
1840
1841         spin_lock(&imp_sec->ps_lock);
1842         checknext = imp_sec->ps_sepol_checknext;
1843         spin_unlock(&imp_sec->ps_lock);
1844
1845         /* next check is too far in time, please update */
1846         if (ktime_after(checknext,
1847                         ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1848                 goto setnext;
1849
1850         if (ktime_before(ktime_get(), checknext))
1851                 /* too early to fetch sepol status */
1852                 return 0;
1853
1854 setnext:
1855         /* define new sepol_checknext time */
1856         spin_lock(&imp_sec->ps_lock);
1857         imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1858                                                 ktime_set(send_sepol, 0));
1859         spin_unlock(&imp_sec->ps_lock);
1860
1861         return 1;
1862 }
1863
1864 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1865 {
1866         struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1867         int rc = 0;
1868
1869         ENTRY;
1870
1871         (req->rq_sepol)[0] = '\0';
1872
1873 #ifndef HAVE_SELINUX
1874         if (unlikely(send_sepol != 0))
1875                 CDEBUG(D_SEC,
1876                        "Client cannot report SELinux status, it was not built against libselinux.\n");
1877         RETURN(0);
1878 #endif
1879
1880         if (send_sepol == 0)
1881                 RETURN(0);
1882
1883         if (imp_sec == NULL)
1884                 RETURN(-EINVAL);
1885
1886         /* Retrieve SELinux status info */
1887         if (sptlrpc_sepol_needs_check(imp_sec))
1888                 rc = sepol_helper(req->rq_import);
1889         if (likely(rc == 0)) {
1890                 spin_lock(&imp_sec->ps_lock);
1891                 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1892                        sizeof(req->rq_sepol));
1893                 spin_unlock(&imp_sec->ps_lock);
1894         } else if (rc == -ENODEV) {
1895                 CDEBUG(D_SEC,
1896                        "Client cannot report SELinux status, SELinux is disabled.\n");
1897                 rc = 0;
1898         }
1899
1900         RETURN(rc);
1901 }
1902 EXPORT_SYMBOL(sptlrpc_get_sepol);
1903
1904 /*
1905  * server side security
1906  */
1907
1908 static int flavor_allowed(struct sptlrpc_flavor *exp,
1909                           struct ptlrpc_request *req)
1910 {
1911         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1912
1913         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1914                 return 1;
1915
1916         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1917             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1918             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1919             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1920                 return 1;
1921
1922         return 0;
1923 }
1924
1925 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1926
1927 /**
1928  * Given an export \a exp, check whether the flavor of incoming \a req
1929  * is allowed by the export \a exp. Main logic is about taking care of
1930  * changing configurations. Return 0 means success.
1931  */
1932 int sptlrpc_target_export_check(struct obd_export *exp,
1933                                 struct ptlrpc_request *req)
1934 {
1935         struct sptlrpc_flavor   flavor;
1936
1937         if (exp == NULL)
1938                 return 0;
1939
1940         /*
1941          * client side export has no imp_reverse, skip
1942          * FIXME maybe we should check flavor this as well???
1943          */
1944         if (exp->exp_imp_reverse == NULL)
1945                 return 0;
1946
1947         /* don't care about ctx fini rpc */
1948         if (req->rq_ctx_fini)
1949                 return 0;
1950
1951         spin_lock(&exp->exp_lock);
1952
1953         /*
1954          * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1955          * the first req with the new flavor, then treat it as current flavor,
1956          * adapt reverse sec according to it.
1957          * note the first rpc with new flavor might not be with root ctx, in
1958          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1959          */
1960         if (unlikely(exp->exp_flvr_changed) &&
1961             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1962                 /*
1963                  * make the new flavor as "current", and old ones as
1964                  * about-to-expire
1965                  */
1966                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1967                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1968                 flavor = exp->exp_flvr_old[1];
1969                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1970                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1971                 exp->exp_flvr_old[0] = exp->exp_flvr;
1972                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1973                                           EXP_FLVR_UPDATE_EXPIRE;
1974                 exp->exp_flvr = flavor;
1975
1976                 /* flavor change finished */
1977                 exp->exp_flvr_changed = 0;
1978                 LASSERT(exp->exp_flvr_adapt == 1);
1979
1980                 /* if it's gss, we only interested in root ctx init */
1981                 if (req->rq_auth_gss &&
1982                     !(req->rq_ctx_init &&
1983                     (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1984                     req->rq_auth_usr_ost))) {
1985                         spin_unlock(&exp->exp_lock);
1986                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1987                                req->rq_auth_gss, req->rq_ctx_init,
1988                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1989                                req->rq_auth_usr_ost);
1990                         return 0;
1991                 }
1992
1993                 exp->exp_flvr_adapt = 0;
1994                 spin_unlock(&exp->exp_lock);
1995
1996                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1997                                                 req->rq_svc_ctx, &flavor);
1998         }
1999
2000         /*
2001          * if it equals to the current flavor, we accept it, but need to
2002          * dealing with reverse sec/ctx
2003          */
2004         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
2005                 /*
2006                  * most cases should return here, we only interested in
2007                  * gss root ctx init
2008                  */
2009                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
2010                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2011                      !req->rq_auth_usr_ost)) {
2012                         spin_unlock(&exp->exp_lock);
2013                         return 0;
2014                 }
2015
2016                 /*
2017                  * if flavor just changed, we should not proceed, just leave
2018                  * it and current flavor will be discovered and replaced
2019                  * shortly, and let _this_ rpc pass through
2020                  */
2021                 if (exp->exp_flvr_changed) {
2022                         LASSERT(exp->exp_flvr_adapt);
2023                         spin_unlock(&exp->exp_lock);
2024                         return 0;
2025                 }
2026
2027                 if (exp->exp_flvr_adapt) {
2028                         exp->exp_flvr_adapt = 0;
2029                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2030                                exp, exp->exp_flvr.sf_rpc,
2031                                exp->exp_flvr_old[0].sf_rpc,
2032                                exp->exp_flvr_old[1].sf_rpc);
2033                         flavor = exp->exp_flvr;
2034                         spin_unlock(&exp->exp_lock);
2035
2036                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2037                                                         req->rq_svc_ctx,
2038                                                         &flavor);
2039                 } else {
2040                         CDEBUG(D_SEC,
2041                                "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2042                                exp, exp->exp_flvr.sf_rpc,
2043                                exp->exp_flvr_old[0].sf_rpc,
2044                                exp->exp_flvr_old[1].sf_rpc);
2045                         spin_unlock(&exp->exp_lock);
2046
2047                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2048                                                            req->rq_svc_ctx);
2049                 }
2050         }
2051
2052         if (exp->exp_flvr_expire[0]) {
2053                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2054                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2055                                 CDEBUG(D_SEC,
2056                                        "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2057                                        exp, exp->exp_flvr.sf_rpc,
2058                                        exp->exp_flvr_old[0].sf_rpc,
2059                                        exp->exp_flvr_old[1].sf_rpc,
2060                                        (s64)(exp->exp_flvr_expire[0] -
2061                                              ktime_get_real_seconds()));
2062                                 spin_unlock(&exp->exp_lock);
2063                                 return 0;
2064                         }
2065                 } else {
2066                         CDEBUG(D_SEC, "mark middle expired\n");
2067                         exp->exp_flvr_expire[0] = 0;
2068                 }
2069                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2070                        exp->exp_flvr.sf_rpc,
2071                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2072                        req->rq_flvr.sf_rpc);
2073         }
2074
2075         /*
2076          * now it doesn't match the current flavor, the only chance we can
2077          * accept it is match the old flavors which is not expired.
2078          */
2079         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2080                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2081                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2082                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2083                                        exp,
2084                                        exp->exp_flvr.sf_rpc,
2085                                        exp->exp_flvr_old[0].sf_rpc,
2086                                        exp->exp_flvr_old[1].sf_rpc,
2087                                        (s64)(exp->exp_flvr_expire[1] -
2088                                        ktime_get_real_seconds()));
2089                                 spin_unlock(&exp->exp_lock);
2090                                 return 0;
2091                         }
2092                 } else {
2093                         CDEBUG(D_SEC, "mark oldest expired\n");
2094                         exp->exp_flvr_expire[1] = 0;
2095                 }
2096                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2097                        exp, exp->exp_flvr.sf_rpc,
2098                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2099                        req->rq_flvr.sf_rpc);
2100         } else {
2101                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2102                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2103                        exp->exp_flvr_old[1].sf_rpc);
2104         }
2105
2106         spin_unlock(&exp->exp_lock);
2107
2108         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2109               exp, exp->exp_obd->obd_name,
2110               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2111               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2112               req->rq_flvr.sf_rpc,
2113               exp->exp_flvr.sf_rpc,
2114               exp->exp_flvr_old[0].sf_rpc,
2115               exp->exp_flvr_expire[0] ?
2116               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2117               exp->exp_flvr_old[1].sf_rpc,
2118               exp->exp_flvr_expire[1] ?
2119               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2120         return -EACCES;
2121 }
2122 EXPORT_SYMBOL(sptlrpc_target_export_check);
2123
2124 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2125                                       struct sptlrpc_rule_set *rset)
2126 {
2127         struct obd_export *exp;
2128         struct sptlrpc_flavor new_flvr;
2129
2130         LASSERT(obd);
2131
2132         spin_lock(&obd->obd_dev_lock);
2133
2134         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2135                 if (exp->exp_connection == NULL)
2136                         continue;
2137
2138                 /*
2139                  * note if this export had just been updated flavor
2140                  * (exp_flvr_changed == 1), this will override the
2141                  * previous one.
2142                  */
2143                 spin_lock(&exp->exp_lock);
2144                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2145                                              &exp->exp_connection->c_peer.nid,
2146                                              &new_flvr);
2147                 if (exp->exp_flvr_changed ||
2148                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2149                         exp->exp_flvr_old[1] = new_flvr;
2150                         exp->exp_flvr_expire[1] = 0;
2151                         exp->exp_flvr_changed = 1;
2152                         exp->exp_flvr_adapt = 1;
2153
2154                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2155                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2156                                exp->exp_flvr.sf_rpc,
2157                                exp->exp_flvr_old[1].sf_rpc);
2158                 }
2159                 spin_unlock(&exp->exp_lock);
2160         }
2161
2162         spin_unlock(&obd->obd_dev_lock);
2163 }
2164 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2165
2166 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2167 {
2168         /* peer's claim is unreliable unless gss is being used */
2169         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2170                 return svc_rc;
2171
2172         switch (req->rq_sp_from) {
2173         case LUSTRE_SP_CLI:
2174                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2175                         /* The below message is checked in sanity-sec test_33 */
2176                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2177                         svc_rc = SECSVC_DROP;
2178                 }
2179                 break;
2180         case LUSTRE_SP_MDT:
2181                 if (!req->rq_auth_usr_mdt) {
2182                         /* The below message is checked in sanity-sec test_33 */
2183                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2184                         svc_rc = SECSVC_DROP;
2185                 }
2186                 break;
2187         case LUSTRE_SP_OST:
2188                 if (!req->rq_auth_usr_ost) {
2189                         /* The below message is checked in sanity-sec test_33 */
2190                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2191                         svc_rc = SECSVC_DROP;
2192                 }
2193                 break;
2194         case LUSTRE_SP_MGS:
2195         case LUSTRE_SP_MGC:
2196                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2197                     !req->rq_auth_usr_ost) {
2198                         /* The below message is checked in sanity-sec test_33 */
2199                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2200                         svc_rc = SECSVC_DROP;
2201                 }
2202                 break;
2203         case LUSTRE_SP_ANY:
2204         default:
2205                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2206                 svc_rc = SECSVC_DROP;
2207         }
2208
2209         return svc_rc;
2210 }
2211
2212 /**
2213  * Used by ptlrpc server, to perform transformation upon request message of
2214  * incoming \a req. This must be the first thing to do with an incoming
2215  * request in ptlrpc layer.
2216  *
2217  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2218  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2219  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2220  * reply message has been prepared.
2221  * \retval SECSVC_DROP failed, this request should be dropped.
2222  */
2223 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2224 {
2225         struct ptlrpc_sec_policy *policy;
2226         struct lustre_msg *msg = req->rq_reqbuf;
2227         int rc;
2228
2229         ENTRY;
2230
2231         LASSERT(msg);
2232         LASSERT(req->rq_reqmsg == NULL);
2233         LASSERT(req->rq_repmsg == NULL);
2234         LASSERT(req->rq_svc_ctx == NULL);
2235
2236         req->rq_req_swab_mask = 0;
2237
2238         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2239         switch (rc) {
2240         case 1:
2241                 req_capsule_set_req_swabbed(&req->rq_pill,
2242                                             MSG_PTLRPC_HEADER_OFF);
2243         case 0:
2244                 break;
2245         default:
2246                 CERROR("error unpacking request from %s x%llu\n",
2247                        libcfs_idstr(&req->rq_peer), req->rq_xid);
2248                 RETURN(SECSVC_DROP);
2249         }
2250
2251         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2252         req->rq_sp_from = LUSTRE_SP_ANY;
2253         req->rq_auth_uid = -1; /* set to INVALID_UID */
2254         req->rq_auth_mapped_uid = -1;
2255
2256         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2257         if (!policy) {
2258                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2259                 RETURN(SECSVC_DROP);
2260         }
2261
2262         LASSERT(policy->sp_sops->accept);
2263         rc = policy->sp_sops->accept(req);
2264         sptlrpc_policy_put(policy);
2265         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2266         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2267
2268         /*
2269          * if it's not null flavor (which means embedded packing msg),
2270          * reset the swab mask for the comming inner msg unpacking.
2271          */
2272         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2273                 req->rq_req_swab_mask = 0;
2274
2275         /* sanity check for the request source */
2276         rc = sptlrpc_svc_check_from(req, rc);
2277         RETURN(rc);
2278 }
2279
2280 /**
2281  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2282  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2283  * a buffer of \a msglen size.
2284  */
2285 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2286 {
2287         struct ptlrpc_sec_policy *policy;
2288         struct ptlrpc_reply_state *rs;
2289         int rc;
2290
2291         ENTRY;
2292
2293         LASSERT(req->rq_svc_ctx);
2294         LASSERT(req->rq_svc_ctx->sc_policy);
2295
2296         policy = req->rq_svc_ctx->sc_policy;
2297         LASSERT(policy->sp_sops->alloc_rs);
2298
2299         rc = policy->sp_sops->alloc_rs(req, msglen);
2300         if (unlikely(rc == -ENOMEM)) {
2301                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2302
2303                 if (svcpt->scp_service->srv_max_reply_size <
2304                    msglen + sizeof(struct ptlrpc_reply_state)) {
2305                         /* Just return failure if the size is too big */
2306                         CERROR("size of message is too big (%zd), %d allowed\n",
2307                                 msglen + sizeof(struct ptlrpc_reply_state),
2308                                 svcpt->scp_service->srv_max_reply_size);
2309                         RETURN(-ENOMEM);
2310                 }
2311
2312                 /* failed alloc, try emergency pool */
2313                 rs = lustre_get_emerg_rs(svcpt);
2314                 if (rs == NULL)
2315                         RETURN(-ENOMEM);
2316
2317                 req->rq_reply_state = rs;
2318                 rc = policy->sp_sops->alloc_rs(req, msglen);
2319                 if (rc) {
2320                         lustre_put_emerg_rs(rs);
2321                         req->rq_reply_state = NULL;
2322                 }
2323         }
2324
2325         LASSERT(rc != 0 ||
2326                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2327
2328         RETURN(rc);
2329 }
2330
2331 /**
2332  * Used by ptlrpc server, to perform transformation upon reply message.
2333  *
2334  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2335  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2336  */
2337 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2338 {
2339         struct ptlrpc_sec_policy *policy;
2340         int rc;
2341
2342         ENTRY;
2343
2344         LASSERT(req->rq_svc_ctx);
2345         LASSERT(req->rq_svc_ctx->sc_policy);
2346
2347         policy = req->rq_svc_ctx->sc_policy;
2348         LASSERT(policy->sp_sops->authorize);
2349
2350         rc = policy->sp_sops->authorize(req);
2351         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2352
2353         RETURN(rc);
2354 }
2355
2356 /**
2357  * Used by ptlrpc server, to free reply_state.
2358  */
2359 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2360 {
2361         struct ptlrpc_sec_policy *policy;
2362         unsigned int prealloc;
2363
2364         ENTRY;
2365
2366         LASSERT(rs->rs_svc_ctx);
2367         LASSERT(rs->rs_svc_ctx->sc_policy);
2368
2369         policy = rs->rs_svc_ctx->sc_policy;
2370         LASSERT(policy->sp_sops->free_rs);
2371
2372         prealloc = rs->rs_prealloc;
2373         policy->sp_sops->free_rs(rs);
2374
2375         if (prealloc)
2376                 lustre_put_emerg_rs(rs);
2377         EXIT;
2378 }
2379
2380 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2381 {
2382         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2383
2384         if (ctx != NULL)
2385                 atomic_inc(&ctx->sc_refcount);
2386 }
2387
2388 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2389 {
2390         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2391
2392         if (ctx == NULL)
2393                 return;
2394
2395         LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2396         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2397                 if (ctx->sc_policy->sp_sops->free_ctx)
2398                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2399         }
2400         req->rq_svc_ctx = NULL;
2401 }
2402
2403 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2404 {
2405         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2406
2407         if (ctx == NULL)
2408                 return;
2409
2410         LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2411         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2412                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2413 }
2414 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2415
2416 /*
2417  * bulk security
2418  */
2419
2420 /**
2421  * Perform transformation upon bulk data pointed by \a desc. This is called
2422  * before transforming the request message.
2423  */
2424 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2425                           struct ptlrpc_bulk_desc *desc)
2426 {
2427         struct ptlrpc_cli_ctx *ctx;
2428
2429         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2430
2431         if (!req->rq_pack_bulk)
2432                 return 0;
2433
2434         ctx = req->rq_cli_ctx;
2435         if (ctx->cc_ops->wrap_bulk)
2436                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2437         return 0;
2438 }
2439 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2440
2441 /**
2442  * This is called after unwrap the reply message.
2443  * return nob of actual plain text size received, or error code.
2444  */
2445 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2446                                  struct ptlrpc_bulk_desc *desc,
2447                                  int nob)
2448 {
2449         struct ptlrpc_cli_ctx *ctx;
2450         int rc;
2451
2452         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2453
2454         if (!req->rq_pack_bulk)
2455                 return desc->bd_nob_transferred;
2456
2457         ctx = req->rq_cli_ctx;
2458         if (ctx->cc_ops->unwrap_bulk) {
2459                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2460                 if (rc < 0)
2461                         return rc;
2462         }
2463         return desc->bd_nob_transferred;
2464 }
2465 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2466
2467 /**
2468  * This is called after unwrap the reply message.
2469  * return 0 for success or error code.
2470  */
2471 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2472                                   struct ptlrpc_bulk_desc *desc)
2473 {
2474         struct ptlrpc_cli_ctx *ctx;
2475         int rc;
2476
2477         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2478
2479         if (!req->rq_pack_bulk)
2480                 return 0;
2481
2482         ctx = req->rq_cli_ctx;
2483         if (ctx->cc_ops->unwrap_bulk) {
2484                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2485                 if (rc < 0)
2486                         return rc;
2487         }
2488
2489         /*
2490          * if everything is going right, nob should equals to nob_transferred.
2491          * in case of privacy mode, nob_transferred needs to be adjusted.
2492          */
2493         if (desc->bd_nob != desc->bd_nob_transferred) {
2494                 CERROR("nob %d doesn't match transferred nob %d\n",
2495                        desc->bd_nob, desc->bd_nob_transferred);
2496                 return -EPROTO;
2497         }
2498
2499         return 0;
2500 }
2501 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2502
2503 #ifdef HAVE_SERVER_SUPPORT
2504 /**
2505  * Performe transformation upon outgoing bulk read.
2506  */
2507 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2508                           struct ptlrpc_bulk_desc *desc)
2509 {
2510         struct ptlrpc_svc_ctx *ctx;
2511
2512         LASSERT(req->rq_bulk_read);
2513
2514         if (!req->rq_pack_bulk)
2515                 return 0;
2516
2517         ctx = req->rq_svc_ctx;
2518         if (ctx->sc_policy->sp_sops->wrap_bulk)
2519                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2520
2521         return 0;
2522 }
2523 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2524
2525 /**
2526  * Performe transformation upon incoming bulk write.
2527  */
2528 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2529                             struct ptlrpc_bulk_desc *desc)
2530 {
2531         struct ptlrpc_svc_ctx *ctx;
2532         int rc;
2533
2534         LASSERT(req->rq_bulk_write);
2535
2536         /*
2537          * if it's in privacy mode, transferred should >= expected; otherwise
2538          * transferred should == expected.
2539          */
2540         if (desc->bd_nob_transferred < desc->bd_nob ||
2541             (desc->bd_nob_transferred > desc->bd_nob &&
2542              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2543              SPTLRPC_BULK_SVC_PRIV)) {
2544                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2545                           desc->bd_nob_transferred, desc->bd_nob);
2546                 return -ETIMEDOUT;
2547         }
2548
2549         if (!req->rq_pack_bulk)
2550                 return 0;
2551
2552         ctx = req->rq_svc_ctx;
2553         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2554                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2555                 if (rc)
2556                         CERROR("error unwrap bulk: %d\n", rc);
2557         }
2558
2559         /* return 0 to allow reply be sent */
2560         return 0;
2561 }
2562 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2563
2564 /**
2565  * Prepare buffers for incoming bulk write.
2566  */
2567 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2568                           struct ptlrpc_bulk_desc *desc)
2569 {
2570         struct ptlrpc_svc_ctx *ctx;
2571
2572         LASSERT(req->rq_bulk_write);
2573
2574         if (!req->rq_pack_bulk)
2575                 return 0;
2576
2577         ctx = req->rq_svc_ctx;
2578         if (ctx->sc_policy->sp_sops->prep_bulk)
2579                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2580
2581         return 0;
2582 }
2583 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2584
2585 #endif /* HAVE_SERVER_SUPPORT */
2586
2587 /*
2588  * user descriptor helpers
2589  */
2590
2591 int sptlrpc_current_user_desc_size(void)
2592 {
2593         int ngroups;
2594
2595         ngroups = current_cred()->group_info->ngroups;
2596
2597         if (ngroups > LUSTRE_MAX_GROUPS)
2598                 ngroups = LUSTRE_MAX_GROUPS;
2599         return sptlrpc_user_desc_size(ngroups);
2600 }
2601 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2602
2603 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2604 {
2605         struct ptlrpc_user_desc *pud;
2606         int ngroups;
2607
2608         pud = lustre_msg_buf(msg, offset, 0);
2609
2610         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2611         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2612         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2613         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2614         pud->pud_cap = ll_capability_u32(current_cap());
2615         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2616
2617         task_lock(current);
2618         ngroups = current_cred()->group_info->ngroups;
2619         if (pud->pud_ngroups > ngroups)
2620                 pud->pud_ngroups = ngroups;
2621 #ifdef HAVE_GROUP_INFO_GID
2622         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2623                pud->pud_ngroups * sizeof(__u32));
2624 #else /* !HAVE_GROUP_INFO_GID */
2625         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2626                pud->pud_ngroups * sizeof(__u32));
2627 #endif /* HAVE_GROUP_INFO_GID */
2628         task_unlock(current);
2629
2630         return 0;
2631 }
2632 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2633
2634 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2635 {
2636         struct ptlrpc_user_desc *pud;
2637         int i;
2638
2639         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2640         if (!pud)
2641                 return -EINVAL;
2642
2643         if (swabbed) {
2644                 __swab32s(&pud->pud_uid);
2645                 __swab32s(&pud->pud_gid);
2646                 __swab32s(&pud->pud_fsuid);
2647                 __swab32s(&pud->pud_fsgid);
2648                 __swab32s(&pud->pud_cap);
2649                 __swab32s(&pud->pud_ngroups);
2650         }
2651
2652         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2653                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2654                 return -EINVAL;
2655         }
2656
2657         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2658             msg->lm_buflens[offset]) {
2659                 CERROR("%u groups are claimed but bufsize only %u\n",
2660                        pud->pud_ngroups, msg->lm_buflens[offset]);
2661                 return -EINVAL;
2662         }
2663
2664         if (swabbed) {
2665                 for (i = 0; i < pud->pud_ngroups; i++)
2666                         __swab32s(&pud->pud_groups[i]);
2667         }
2668
2669         return 0;
2670 }
2671 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2672
2673 /*
2674  * misc helpers
2675  */
2676
2677 const char *sec2target_str(struct ptlrpc_sec *sec)
2678 {
2679         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2680                 return "*";
2681         if (sec_is_reverse(sec))
2682                 return "c";
2683         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2684 }
2685 EXPORT_SYMBOL(sec2target_str);
2686
2687 /*
2688  * return true if the bulk data is protected
2689  */
2690 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2691 {
2692         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2693         case SPTLRPC_BULK_SVC_INTG:
2694         case SPTLRPC_BULK_SVC_PRIV:
2695                 return 1;
2696         default:
2697                 return 0;
2698         }
2699 }
2700 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2701
2702 /*
2703  * crypto API helper/alloc blkciper
2704  */
2705
2706 /*
2707  * initialize/finalize
2708  */
2709
2710 int sptlrpc_init(void)
2711 {
2712         int rc;
2713
2714         rwlock_init(&policy_lock);
2715
2716         rc = sptlrpc_gc_init();
2717         if (rc)
2718                 goto out;
2719
2720         rc = sptlrpc_conf_init();
2721         if (rc)
2722                 goto out_gc;
2723
2724         rc = sptlrpc_enc_pool_init();
2725         if (rc)
2726                 goto out_conf;
2727
2728         rc = sptlrpc_null_init();
2729         if (rc)
2730                 goto out_pool;
2731
2732         rc = sptlrpc_plain_init();
2733         if (rc)
2734                 goto out_null;
2735
2736         rc = sptlrpc_lproc_init();
2737         if (rc)
2738                 goto out_plain;
2739
2740         return 0;
2741
2742 out_plain:
2743         sptlrpc_plain_fini();
2744 out_null:
2745         sptlrpc_null_fini();
2746 out_pool:
2747         sptlrpc_enc_pool_fini();
2748 out_conf:
2749         sptlrpc_conf_fini();
2750 out_gc:
2751         sptlrpc_gc_fini();
2752 out:
2753         return rc;
2754 }
2755
2756 void sptlrpc_fini(void)
2757 {
2758         sptlrpc_lproc_fini();
2759         sptlrpc_plain_fini();
2760         sptlrpc_null_fini();
2761         sptlrpc_enc_pool_fini();
2762         sptlrpc_conf_fini();
2763         sptlrpc_gc_fini();
2764 }