Whamcloud - gitweb
ab747f8b4c1dbdd12308309ffc0596f1d290addc
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/ptlrpc/sec.c
32  *
33  * Author: Eric Mei <ericm@clusterfs.com>
34  */
35
36 #define DEBUG_SUBSYSTEM S_SEC
37
38 #include <linux/user_namespace.h>
39 #include <linux/uidgid.h>
40 #include <linux/crypto.h>
41 #include <linux/key.h>
42
43 #include <libcfs/libcfs.h>
44 #include <obd.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
48 #include <lustre_import.h>
49 #include <lustre_dlm.h>
50 #include <lustre_sec.h>
51
52 #include "ptlrpc_internal.h"
53
54 static int send_sepol;
55 module_param(send_sepol, int, 0644);
56 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
57
58 /*
59  * policy registers
60  */
61
62 static rwlock_t policy_lock;
63 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
64         NULL,
65 };
66
67 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
68 {
69         __u16 number = policy->sp_policy;
70
71         LASSERT(policy->sp_name);
72         LASSERT(policy->sp_cops);
73         LASSERT(policy->sp_sops);
74
75         if (number >= SPTLRPC_POLICY_MAX)
76                 return -EINVAL;
77
78         write_lock(&policy_lock);
79         if (unlikely(policies[number])) {
80                 write_unlock(&policy_lock);
81                 return -EALREADY;
82         }
83         policies[number] = policy;
84         write_unlock(&policy_lock);
85
86         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
87         return 0;
88 }
89 EXPORT_SYMBOL(sptlrpc_register_policy);
90
91 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
92 {
93         __u16 number = policy->sp_policy;
94
95         LASSERT(number < SPTLRPC_POLICY_MAX);
96
97         write_lock(&policy_lock);
98         if (unlikely(policies[number] == NULL)) {
99                 write_unlock(&policy_lock);
100                 CERROR("%s: already unregistered\n", policy->sp_name);
101                 return -EINVAL;
102         }
103
104         LASSERT(policies[number] == policy);
105         policies[number] = NULL;
106         write_unlock(&policy_lock);
107
108         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
109         return 0;
110 }
111 EXPORT_SYMBOL(sptlrpc_unregister_policy);
112
113 static
114 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
115 {
116         static DEFINE_MUTEX(load_mutex);
117         struct ptlrpc_sec_policy *policy;
118         __u16 number = SPTLRPC_FLVR_POLICY(flavor);
119         int rc;
120
121         if (number >= SPTLRPC_POLICY_MAX)
122                 return NULL;
123
124         while (1) {
125                 read_lock(&policy_lock);
126                 policy = policies[number];
127                 if (policy && !try_module_get(policy->sp_owner))
128                         policy = NULL;
129                 read_unlock(&policy_lock);
130
131                 if (policy != NULL || number != SPTLRPC_POLICY_GSS)
132                         break;
133
134                 /* try to load gss module, happens only if policy at index
135                  * SPTLRPC_POLICY_GSS is not already referenced in
136                  * global array policies[]
137                  */
138                 mutex_lock(&load_mutex);
139                 /* The fact that request_module() returns 0 does not guarantee
140                  * the module has done its job. So we must check that the
141                  * requested policy is now available. This is done by checking
142                  * again for policies[number] in the loop.
143                  */
144                 rc = request_module("ptlrpc_gss");
145                 if (rc == 0)
146                         CDEBUG(D_SEC, "module ptlrpc_gss loaded on demand\n");
147                 else
148                         CERROR("Unable to load module ptlrpc_gss: rc %d\n", rc);
149                 mutex_unlock(&load_mutex);
150         }
151
152         return policy;
153 }
154
155 __u32 sptlrpc_name2flavor_base(const char *name)
156 {
157         if (!strcmp(name, "null"))
158                 return SPTLRPC_FLVR_NULL;
159         if (!strcmp(name, "plain"))
160                 return SPTLRPC_FLVR_PLAIN;
161         if (!strcmp(name, "gssnull"))
162                 return SPTLRPC_FLVR_GSSNULL;
163         if (!strcmp(name, "krb5n"))
164                 return SPTLRPC_FLVR_KRB5N;
165         if (!strcmp(name, "krb5a"))
166                 return SPTLRPC_FLVR_KRB5A;
167         if (!strcmp(name, "krb5i"))
168                 return SPTLRPC_FLVR_KRB5I;
169         if (!strcmp(name, "krb5p"))
170                 return SPTLRPC_FLVR_KRB5P;
171         if (!strcmp(name, "skn"))
172                 return SPTLRPC_FLVR_SKN;
173         if (!strcmp(name, "ska"))
174                 return SPTLRPC_FLVR_SKA;
175         if (!strcmp(name, "ski"))
176                 return SPTLRPC_FLVR_SKI;
177         if (!strcmp(name, "skpi"))
178                 return SPTLRPC_FLVR_SKPI;
179
180         return SPTLRPC_FLVR_INVALID;
181 }
182 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
183
184 const char *sptlrpc_flavor2name_base(__u32 flvr)
185 {
186         __u32   base = SPTLRPC_FLVR_BASE(flvr);
187
188         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
189                 return "null";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
191                 return "plain";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
193                 return "gssnull";
194         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
195                 return "krb5n";
196         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
197                 return "krb5a";
198         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
199                 return "krb5i";
200         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
201                 return "krb5p";
202         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
203                 return "skn";
204         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
205                 return "ska";
206         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
207                 return "ski";
208         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
209                 return "skpi";
210
211         CERROR("invalid wire flavor 0x%x\n", flvr);
212         return "invalid";
213 }
214 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
215
216 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
217                                char *buf, int bufsize)
218 {
219         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
220                 snprintf(buf, bufsize, "hash:%s",
221                         sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
222         else
223                 snprintf(buf, bufsize, "%s",
224                         sptlrpc_flavor2name_base(sf->sf_rpc));
225
226         buf[bufsize - 1] = '\0';
227         return buf;
228 }
229 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
230
231 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
232 {
233         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
234
235         /*
236          * currently we don't support customized bulk specification for
237          * flavors other than plain
238          */
239         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
240                 char bspec[16];
241
242                 bspec[0] = '-';
243                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
244                 strncat(buf, bspec, bufsize);
245         }
246
247         buf[bufsize - 1] = '\0';
248         return buf;
249 }
250 EXPORT_SYMBOL(sptlrpc_flavor2name);
251
252 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
253 {
254         buf[0] = '\0';
255
256         if (flags & PTLRPC_SEC_FL_REVERSE)
257                 strlcat(buf, "reverse,", bufsize);
258         if (flags & PTLRPC_SEC_FL_ROOTONLY)
259                 strlcat(buf, "rootonly,", bufsize);
260         if (flags & PTLRPC_SEC_FL_UDESC)
261                 strlcat(buf, "udesc,", bufsize);
262         if (flags & PTLRPC_SEC_FL_BULK)
263                 strlcat(buf, "bulk,", bufsize);
264         if (buf[0] == '\0')
265                 strlcat(buf, "-,", bufsize);
266
267         return buf;
268 }
269 EXPORT_SYMBOL(sptlrpc_secflags2str);
270
271 /*
272  * client context APIs
273  */
274
275 static
276 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
277 {
278         struct vfs_cred vcred;
279         int create = 1, remove_dead = 1;
280
281         LASSERT(sec);
282         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
283
284         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
285                                      PTLRPC_SEC_FL_ROOTONLY)) {
286                 vcred.vc_uid = 0;
287                 vcred.vc_gid = 0;
288                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
289                         create = 0;
290                         remove_dead = 0;
291                 }
292         } else {
293                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
294                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
295         }
296
297         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
298                                                    remove_dead);
299 }
300
301 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
302 {
303         atomic_inc(&ctx->cc_refcount);
304         return ctx;
305 }
306 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
307
308 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
309 {
310         struct ptlrpc_sec *sec = ctx->cc_sec;
311
312         LASSERT(sec);
313         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
314
315         if (!atomic_dec_and_test(&ctx->cc_refcount))
316                 return;
317
318         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
319 }
320 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
321
322 /**
323  * Expire the client context immediately.
324  *
325  * \pre Caller must hold at least 1 reference on the \a ctx.
326  */
327 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
328 {
329         LASSERT(ctx->cc_ops->die);
330         ctx->cc_ops->die(ctx, 0);
331 }
332 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
333
334 /**
335  * To wake up the threads who are waiting for this client context. Called
336  * after some status change happened on \a ctx.
337  */
338 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
339 {
340         struct ptlrpc_request *req, *next;
341
342         spin_lock(&ctx->cc_lock);
343         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
344                                      rq_ctx_chain) {
345                 list_del_init(&req->rq_ctx_chain);
346                 ptlrpc_client_wake_req(req);
347         }
348         spin_unlock(&ctx->cc_lock);
349 }
350 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
351
352 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
353 {
354         LASSERT(ctx->cc_ops);
355
356         if (ctx->cc_ops->display == NULL)
357                 return 0;
358
359         return ctx->cc_ops->display(ctx, buf, bufsize);
360 }
361
362 static int import_sec_check_expire(struct obd_import *imp)
363 {
364         int adapt = 0;
365
366         write_lock(&imp->imp_sec_lock);
367         if (imp->imp_sec_expire &&
368             imp->imp_sec_expire < ktime_get_real_seconds()) {
369                 adapt = 1;
370                 imp->imp_sec_expire = 0;
371         }
372         write_unlock(&imp->imp_sec_lock);
373
374         if (!adapt)
375                 return 0;
376
377         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
378         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
379 }
380
381 /**
382  * Get and validate the client side ptlrpc security facilities from
383  * \a imp. There is a race condition on client reconnect when the import is
384  * being destroyed while there are outstanding client bound requests. In
385  * this case do not output any error messages if import secuity is not
386  * found.
387  *
388  * \param[in] imp obd import associated with client
389  * \param[out] sec client side ptlrpc security
390  *
391  * \retval 0 if security retrieved successfully
392  * \retval -ve errno if there was a problem
393  */
394 static int import_sec_validate_get(struct obd_import *imp,
395                                    struct ptlrpc_sec **sec)
396 {
397         int rc;
398
399         if (unlikely(imp->imp_sec_expire)) {
400                 rc = import_sec_check_expire(imp);
401                 if (rc)
402                         return rc;
403         }
404
405         *sec = sptlrpc_import_sec_ref(imp);
406         if (*sec == NULL) {
407                 /* Only output an error when the import is still active */
408                 if (!test_bit(WORK_STRUCT_PENDING_BIT,
409                               work_data_bits(&imp->imp_zombie_work)))
410                         CERROR("import %p (%s) with no sec\n",
411                                imp, ptlrpc_import_state_name(imp->imp_state));
412                 return -EACCES;
413         }
414
415         if (unlikely((*sec)->ps_dying)) {
416                 CERROR("attempt to use dying sec %p\n", sec);
417                 sptlrpc_sec_put(*sec);
418                 return -EACCES;
419         }
420
421         return 0;
422 }
423
424 /**
425  * Given a \a req, find or allocate an appropriate context for it.
426  * \pre req->rq_cli_ctx == NULL.
427  *
428  * \retval 0 succeed, and req->rq_cli_ctx is set.
429  * \retval -ev error number, and req->rq_cli_ctx == NULL.
430  */
431 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
432 {
433         struct obd_import *imp = req->rq_import;
434         struct ptlrpc_sec *sec;
435         int rc;
436
437         ENTRY;
438
439         LASSERT(!req->rq_cli_ctx);
440         LASSERT(imp);
441
442         rc = import_sec_validate_get(imp, &sec);
443         if (rc)
444                 RETURN(rc);
445
446         req->rq_cli_ctx = get_my_ctx(sec);
447
448         sptlrpc_sec_put(sec);
449
450         if (!req->rq_cli_ctx) {
451                 CERROR("req %p: fail to get context\n", req);
452                 RETURN(-ECONNREFUSED);
453         }
454
455         RETURN(0);
456 }
457
458 /**
459  * Drop the context for \a req.
460  * \pre req->rq_cli_ctx != NULL.
461  * \post req->rq_cli_ctx == NULL.
462  *
463  * If \a sync == 0, this function should return quickly without sleep;
464  * otherwise it might trigger and wait for the whole process of sending
465  * an context-destroying rpc to server.
466  */
467 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
468 {
469         ENTRY;
470
471         LASSERT(req);
472         LASSERT(req->rq_cli_ctx);
473
474         /*
475          * request might be asked to release earlier while still
476          * in the context waiting list.
477          */
478         if (!list_empty(&req->rq_ctx_chain)) {
479                 spin_lock(&req->rq_cli_ctx->cc_lock);
480                 list_del_init(&req->rq_ctx_chain);
481                 spin_unlock(&req->rq_cli_ctx->cc_lock);
482         }
483
484         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
485         req->rq_cli_ctx = NULL;
486         EXIT;
487 }
488
489 static
490 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
491                            struct ptlrpc_cli_ctx *oldctx,
492                            struct ptlrpc_cli_ctx *newctx)
493 {
494         struct sptlrpc_flavor old_flvr;
495         char *reqmsg = NULL; /* to workaround old gcc */
496         int reqmsg_size;
497         int rc = 0;
498
499         CDEBUG(D_SEC,
500                "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
501                req, oldctx, oldctx->cc_vcred.vc_uid,
502                sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
503                sec2target_str(newctx->cc_sec), oldctx->cc_sec,
504                oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
505                newctx->cc_sec->ps_policy->sp_name);
506
507         /* save flavor */
508         old_flvr = req->rq_flvr;
509
510         /* save request message */
511         reqmsg_size = req->rq_reqlen;
512         if (reqmsg_size != 0) {
513                 LASSERT(req->rq_reqmsg);
514                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
515                 if (reqmsg == NULL)
516                         return -ENOMEM;
517                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
518         }
519
520         /* release old req/rep buf */
521         req->rq_cli_ctx = oldctx;
522         sptlrpc_cli_free_reqbuf(req);
523         sptlrpc_cli_free_repbuf(req);
524         req->rq_cli_ctx = newctx;
525
526         /* recalculate the flavor */
527         sptlrpc_req_set_flavor(req, 0);
528
529         /*
530          * alloc new request buffer
531          * we don't need to alloc reply buffer here, leave it to the
532          * rest procedure of ptlrpc
533          */
534         if (reqmsg_size != 0) {
535                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
536                 if (!rc) {
537                         LASSERT(req->rq_reqmsg);
538                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
539                 } else {
540                         CWARN("failed to alloc reqbuf: %d\n", rc);
541                         req->rq_flvr = old_flvr;
542                 }
543
544                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
545         }
546         return rc;
547 }
548
549 /**
550  * If current context of \a req is dead somehow, e.g. we just switched flavor
551  * thus marked original contexts dead, we'll find a new context for it. if
552  * no switch is needed, \a req will end up with the same context.
553  *
554  * \note a request must have a context, to keep other parts of code happy.
555  * In any case of failure during the switching, we must restore the old one.
556  */
557 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
558 {
559         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
560         struct ptlrpc_cli_ctx *newctx;
561         int rc;
562
563         ENTRY;
564
565         LASSERT(oldctx);
566
567         sptlrpc_cli_ctx_get(oldctx);
568         sptlrpc_req_put_ctx(req, 0);
569
570         rc = sptlrpc_req_get_ctx(req);
571         if (unlikely(rc)) {
572                 LASSERT(!req->rq_cli_ctx);
573
574                 /* restore old ctx */
575                 req->rq_cli_ctx = oldctx;
576                 RETURN(rc);
577         }
578
579         newctx = req->rq_cli_ctx;
580         LASSERT(newctx);
581
582         if (unlikely(newctx == oldctx &&
583                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
584                 /*
585                  * still get the old dead ctx, usually means system too busy
586                  */
587                 CDEBUG(D_SEC,
588                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
589                        newctx, newctx->cc_flags);
590
591                 schedule_timeout_interruptible(cfs_time_seconds(1));
592         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
593                             == 0)) {
594                 /*
595                  * new ctx not up to date yet
596                  */
597                 CDEBUG(D_SEC,
598                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
599                        newctx, newctx->cc_flags);
600         } else {
601                 /*
602                  * it's possible newctx == oldctx if we're switching
603                  * subflavor with the same sec.
604                  */
605                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
606                 if (rc) {
607                         /* restore old ctx */
608                         sptlrpc_req_put_ctx(req, 0);
609                         req->rq_cli_ctx = oldctx;
610                         RETURN(rc);
611                 }
612
613                 LASSERT(req->rq_cli_ctx == newctx);
614         }
615
616         sptlrpc_cli_ctx_put(oldctx, 1);
617         RETURN(0);
618 }
619 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
620
621 static
622 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
623 {
624         if (cli_ctx_is_refreshed(ctx))
625                 return 1;
626         return 0;
627 }
628
629 static
630 void ctx_refresh_interrupt(struct ptlrpc_request *req)
631 {
632
633         spin_lock(&req->rq_lock);
634         req->rq_intr = 1;
635         spin_unlock(&req->rq_lock);
636 }
637
638 static
639 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
640 {
641         spin_lock(&ctx->cc_lock);
642         if (!list_empty(&req->rq_ctx_chain))
643                 list_del_init(&req->rq_ctx_chain);
644         spin_unlock(&ctx->cc_lock);
645 }
646
647 /**
648  * To refresh the context of \req, if it's not up-to-date.
649  * \param timeout
650  * - == 0: do not wait
651  * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
652  * - > 0: not supported
653  *
654  * The status of the context could be subject to be changed by other threads
655  * at any time. We allow this race, but once we return with 0, the caller will
656  * suppose it's uptodated and keep using it until the owning rpc is done.
657  *
658  * \retval 0 only if the context is uptodated.
659  * \retval -ev error number.
660  */
661 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
662 {
663         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
664         struct ptlrpc_sec *sec;
665         int rc;
666
667         ENTRY;
668
669         LASSERT(ctx);
670
671         if (req->rq_ctx_init || req->rq_ctx_fini)
672                 RETURN(0);
673
674         if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
675                 CERROR("req %p: invalid timeout %lu\n", req, timeout);
676                 RETURN(-EINVAL);
677         }
678
679         /*
680          * during the process a request's context might change type even
681          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
682          * everything
683          */
684 again:
685         rc = import_sec_validate_get(req->rq_import, &sec);
686         if (rc)
687                 RETURN(rc);
688
689         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
690                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
691                        req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
692                 req_off_ctx_list(req, ctx);
693                 sptlrpc_req_replace_dead_ctx(req);
694                 ctx = req->rq_cli_ctx;
695         }
696         sptlrpc_sec_put(sec);
697
698         if (cli_ctx_is_eternal(ctx))
699                 RETURN(0);
700
701         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
702                 if (ctx->cc_ops->refresh)
703                         ctx->cc_ops->refresh(ctx);
704         }
705         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
706
707         LASSERT(ctx->cc_ops->validate);
708         if (ctx->cc_ops->validate(ctx) == 0) {
709                 req_off_ctx_list(req, ctx);
710                 RETURN(0);
711         }
712
713         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
714                 spin_lock(&req->rq_lock);
715                 req->rq_err = 1;
716                 spin_unlock(&req->rq_lock);
717                 req_off_ctx_list(req, ctx);
718                 RETURN(-EPERM);
719         }
720
721         /*
722          * There's a subtle issue for resending RPCs, suppose following
723          * situation:
724          *  1. the request was sent to server.
725          *  2. recovery was kicked start, after finished the request was
726          *     marked as resent.
727          *  3. resend the request.
728          *  4. old reply from server received, we accept and verify the reply.
729          *     this has to be success, otherwise the error will be aware
730          *     by application.
731          *  5. new reply from server received, dropped by LNet.
732          *
733          * Note the xid of old & new request is the same. We can't simply
734          * change xid for the resent request because the server replies on
735          * it for reply reconstruction.
736          *
737          * Commonly the original context should be uptodate because we
738          * have an expiry nice time; server will keep its context because
739          * we at least hold a ref of old context which prevent context
740          * from destroying RPC being sent. So server still can accept the
741          * request and finish the RPC. But if that's not the case:
742          *  1. If server side context has been trimmed, a NO_CONTEXT will
743          *     be returned, gss_cli_ctx_verify/unseal will switch to new
744          *     context by force.
745          *  2. Current context never be refreshed, then we are fine: we
746          *     never really send request with old context before.
747          */
748         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
749             unlikely(req->rq_reqmsg) &&
750             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
751                 req_off_ctx_list(req, ctx);
752                 RETURN(0);
753         }
754
755         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
756                 req_off_ctx_list(req, ctx);
757                 /*
758                  * don't switch ctx if import was deactivated
759                  */
760                 if (req->rq_import->imp_deactive) {
761                         spin_lock(&req->rq_lock);
762                         req->rq_err = 1;
763                         spin_unlock(&req->rq_lock);
764                         RETURN(-EINTR);
765                 }
766
767                 rc = sptlrpc_req_replace_dead_ctx(req);
768                 if (rc) {
769                         LASSERT(ctx == req->rq_cli_ctx);
770                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
771                                req, ctx, rc);
772                         spin_lock(&req->rq_lock);
773                         req->rq_err = 1;
774                         spin_unlock(&req->rq_lock);
775                         RETURN(rc);
776                 }
777
778                 ctx = req->rq_cli_ctx;
779                 goto again;
780         }
781
782         /*
783          * Now we're sure this context is during upcall, add myself into
784          * waiting list
785          */
786         spin_lock(&ctx->cc_lock);
787         if (list_empty(&req->rq_ctx_chain))
788                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
789         spin_unlock(&ctx->cc_lock);
790
791         if (timeout == 0)
792                 RETURN(-EAGAIN);
793
794         /* Clear any flags that may be present from previous sends */
795         LASSERT(req->rq_receiving_reply == 0);
796         spin_lock(&req->rq_lock);
797         req->rq_err = 0;
798         req->rq_timedout = 0;
799         req->rq_resend = 0;
800         req->rq_restart = 0;
801         spin_unlock(&req->rq_lock);
802
803         /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
804          * so wait indefinitely with non-fatal signals blocked
805          */
806         if (l_wait_event_abortable(req->rq_reply_waitq,
807                                    ctx_check_refresh(ctx)) == -ERESTARTSYS) {
808                 rc = -EINTR;
809                 ctx_refresh_interrupt(req);
810         }
811
812         /*
813          * following cases could lead us here:
814          * - successfully refreshed;
815          * - interrupted;
816          * - timedout, and we don't want recover from the failure;
817          * - timedout, and waked up upon recovery finished;
818          * - someone else mark this ctx dead by force;
819          * - someone invalidate the req and call ptlrpc_client_wake_req(),
820          *   e.g. ptlrpc_abort_inflight();
821          */
822         if (!cli_ctx_is_refreshed(ctx)) {
823                 /* timed out or interruptted */
824                 req_off_ctx_list(req, ctx);
825
826                 LASSERT(rc != 0);
827                 RETURN(rc);
828         }
829
830         goto again;
831 }
832
833 /* Bring ptlrpc_sec context up-to-date */
834 int sptlrpc_export_update_ctx(struct obd_export *exp)
835 {
836         struct obd_import *imp = exp ? exp->exp_imp_reverse : NULL;
837         struct ptlrpc_sec *sec = NULL;
838         struct ptlrpc_cli_ctx *ctx = NULL;
839         int rc = 0;
840
841         if (imp)
842                 sec = sptlrpc_import_sec_ref(imp);
843         if (sec) {
844                 ctx = get_my_ctx(sec);
845                 sptlrpc_sec_put(sec);
846         }
847
848         if (ctx) {
849                 if (ctx->cc_ops->refresh)
850                         rc = ctx->cc_ops->refresh(ctx);
851                 sptlrpc_cli_ctx_put(ctx, 1);
852         }
853         return rc;
854 }
855
856 /**
857  * Initialize flavor settings for \a req, according to \a opcode.
858  *
859  * \note this could be called in two situations:
860  * - new request from ptlrpc_pre_req(), with proper @opcode
861  * - old request which changed ctx in the middle, with @opcode == 0
862  */
863 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
864 {
865         struct ptlrpc_sec *sec;
866
867         LASSERT(req->rq_import);
868         LASSERT(req->rq_cli_ctx);
869         LASSERT(req->rq_cli_ctx->cc_sec);
870         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
871
872         /* special security flags according to opcode */
873         switch (opcode) {
874         case OST_READ:
875         case MDS_READPAGE:
876         case MGS_CONFIG_READ:
877         case OBD_IDX_READ:
878                 req->rq_bulk_read = 1;
879                 break;
880         case OST_WRITE:
881         case MDS_WRITEPAGE:
882                 req->rq_bulk_write = 1;
883                 break;
884         case SEC_CTX_INIT:
885                 req->rq_ctx_init = 1;
886                 break;
887         case SEC_CTX_FINI:
888                 req->rq_ctx_fini = 1;
889                 break;
890         case 0:
891                 /* init/fini rpc won't be resend, so can't be here */
892                 LASSERT(req->rq_ctx_init == 0);
893                 LASSERT(req->rq_ctx_fini == 0);
894
895                 /* cleanup flags, which should be recalculated */
896                 req->rq_pack_udesc = 0;
897                 req->rq_pack_bulk = 0;
898                 break;
899         }
900
901         sec = req->rq_cli_ctx->cc_sec;
902
903         spin_lock(&sec->ps_lock);
904         req->rq_flvr = sec->ps_flvr;
905         spin_unlock(&sec->ps_lock);
906
907         /*
908          * force SVC_NULL for context initiation rpc, SVC_INTG for context
909          * destruction rpc
910          */
911         if (unlikely(req->rq_ctx_init))
912                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
913         else if (unlikely(req->rq_ctx_fini))
914                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
915
916         /* user descriptor flag, null security can't do it anyway */
917         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
918             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
919                 req->rq_pack_udesc = 1;
920
921         /* bulk security flag */
922         if ((req->rq_bulk_read || req->rq_bulk_write) &&
923             sptlrpc_flavor_has_bulk(&req->rq_flvr))
924                 req->rq_pack_bulk = 1;
925 }
926
927 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
928 {
929         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
930                 return;
931
932         LASSERT(req->rq_clrbuf);
933         if (req->rq_pool || !req->rq_reqbuf)
934                 return;
935
936         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
937         req->rq_reqbuf = NULL;
938         req->rq_reqbuf_len = 0;
939 }
940
941 /**
942  * Given an import \a imp, check whether current user has a valid context
943  * or not. We may create a new context and try to refresh it, and try
944  * repeatedly try in case of non-fatal errors. Return 0 means success.
945  */
946 int sptlrpc_import_check_ctx(struct obd_import *imp)
947 {
948         struct ptlrpc_sec     *sec;
949         struct ptlrpc_cli_ctx *ctx;
950         struct ptlrpc_request *req = NULL;
951         int rc;
952
953         ENTRY;
954
955         might_sleep();
956
957         sec = sptlrpc_import_sec_ref(imp);
958         ctx = get_my_ctx(sec);
959         sptlrpc_sec_put(sec);
960
961         if (!ctx)
962                 RETURN(-ENOMEM);
963
964         if (cli_ctx_is_eternal(ctx) ||
965             ctx->cc_ops->validate(ctx) == 0) {
966                 sptlrpc_cli_ctx_put(ctx, 1);
967                 RETURN(0);
968         }
969
970         if (cli_ctx_is_error(ctx)) {
971                 sptlrpc_cli_ctx_put(ctx, 1);
972                 RETURN(-EACCES);
973         }
974
975         req = ptlrpc_request_cache_alloc(GFP_NOFS);
976         if (!req)
977                 RETURN(-ENOMEM);
978
979         ptlrpc_cli_req_init(req);
980         atomic_set(&req->rq_refcount, 10000);
981
982         req->rq_import = imp;
983         req->rq_flvr = sec->ps_flvr;
984         req->rq_cli_ctx = ctx;
985
986         rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
987         LASSERT(list_empty(&req->rq_ctx_chain));
988         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
989         ptlrpc_request_cache_free(req);
990
991         RETURN(rc);
992 }
993
994 /**
995  * Used by ptlrpc client, to perform the pre-defined security transformation
996  * upon the request message of \a req. After this function called,
997  * req->rq_reqmsg is still accessible as clear text.
998  */
999 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
1000 {
1001         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1002         int rc = 0;
1003
1004         ENTRY;
1005
1006         LASSERT(ctx);
1007         LASSERT(ctx->cc_sec);
1008         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1009
1010         /*
1011          * we wrap bulk request here because now we can be sure
1012          * the context is uptodate.
1013          */
1014         if (req->rq_bulk) {
1015                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1016                 if (rc)
1017                         RETURN(rc);
1018         }
1019
1020         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1021         case SPTLRPC_SVC_NULL:
1022         case SPTLRPC_SVC_AUTH:
1023         case SPTLRPC_SVC_INTG:
1024                 LASSERT(ctx->cc_ops->sign);
1025                 rc = ctx->cc_ops->sign(ctx, req);
1026                 break;
1027         case SPTLRPC_SVC_PRIV:
1028                 LASSERT(ctx->cc_ops->seal);
1029                 rc = ctx->cc_ops->seal(ctx, req);
1030                 break;
1031         default:
1032                 LBUG();
1033         }
1034
1035         if (rc == 0) {
1036                 LASSERT(req->rq_reqdata_len);
1037                 LASSERT(req->rq_reqdata_len % 8 == 0);
1038                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1039         }
1040
1041         RETURN(rc);
1042 }
1043
1044 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1045 {
1046         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1047         int rc;
1048
1049         ENTRY;
1050
1051         LASSERT(ctx);
1052         LASSERT(ctx->cc_sec);
1053         LASSERT(req->rq_repbuf);
1054         LASSERT(req->rq_repdata);
1055         LASSERT(req->rq_repmsg == NULL);
1056
1057         req->rq_rep_swab_mask = 0;
1058
1059         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1060         switch (rc) {
1061         case 1:
1062                 req_capsule_set_rep_swabbed(&req->rq_pill,
1063                                             MSG_PTLRPC_HEADER_OFF);
1064         case 0:
1065                 break;
1066         default:
1067                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1068                 RETURN(-EPROTO);
1069         }
1070
1071         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1072                 CERROR("replied data length %d too small\n",
1073                        req->rq_repdata_len);
1074                 RETURN(-EPROTO);
1075         }
1076
1077         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1078             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1079                 CERROR("reply policy %u doesn't match request policy %u\n",
1080                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1081                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1082                 RETURN(-EPROTO);
1083         }
1084
1085         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1086         case SPTLRPC_SVC_NULL:
1087         case SPTLRPC_SVC_AUTH:
1088         case SPTLRPC_SVC_INTG:
1089                 LASSERT(ctx->cc_ops->verify);
1090                 rc = ctx->cc_ops->verify(ctx, req);
1091                 break;
1092         case SPTLRPC_SVC_PRIV:
1093                 LASSERT(ctx->cc_ops->unseal);
1094                 rc = ctx->cc_ops->unseal(ctx, req);
1095                 break;
1096         default:
1097                 LBUG();
1098         }
1099         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1100
1101         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1102             !req->rq_ctx_init)
1103                 req->rq_rep_swab_mask = 0;
1104         RETURN(rc);
1105 }
1106
1107 /**
1108  * Used by ptlrpc client, to perform security transformation upon the reply
1109  * message of \a req. After return successfully, req->rq_repmsg points to
1110  * the reply message in clear text.
1111  *
1112  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1113  * going to change.
1114  */
1115 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1116 {
1117         LASSERT(req->rq_repbuf);
1118         LASSERT(req->rq_repdata == NULL);
1119         LASSERT(req->rq_repmsg == NULL);
1120         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1121
1122         if (req->rq_reply_off == 0 &&
1123             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1124                 CERROR("real reply with offset 0\n");
1125                 return -EPROTO;
1126         }
1127
1128         if (req->rq_reply_off % 8 != 0) {
1129                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1130                 return -EPROTO;
1131         }
1132
1133         req->rq_repdata = (struct lustre_msg *)
1134                                 (req->rq_repbuf + req->rq_reply_off);
1135         req->rq_repdata_len = req->rq_nob_received;
1136
1137         return do_cli_unwrap_reply(req);
1138 }
1139
1140 /**
1141  * Used by ptlrpc client, to perform security transformation upon the early
1142  * reply message of \a req. We expect the rq_reply_off is 0, and
1143  * rq_nob_received is the early reply size.
1144  *
1145  * Because the receive buffer might be still posted, the reply data might be
1146  * changed at any time, no matter we're holding rq_lock or not. For this reason
1147  * we allocate a separate ptlrpc_request and reply buffer for early reply
1148  * processing.
1149  *
1150  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1151  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1152  * \a *req_ret to release it.
1153  * \retval -ev error number, and \a req_ret will not be set.
1154  */
1155 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1156                                    struct ptlrpc_request **req_ret)
1157 {
1158         struct ptlrpc_request *early_req;
1159         char *early_buf;
1160         int early_bufsz, early_size;
1161         int rc;
1162
1163         ENTRY;
1164
1165         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1166         if (early_req == NULL)
1167                 RETURN(-ENOMEM);
1168
1169         ptlrpc_cli_req_init(early_req);
1170
1171         early_size = req->rq_nob_received;
1172         early_bufsz = size_roundup_power2(early_size);
1173         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1174         if (early_buf == NULL)
1175                 GOTO(err_req, rc = -ENOMEM);
1176
1177         /* sanity checkings and copy data out, do it inside spinlock */
1178         spin_lock(&req->rq_lock);
1179
1180         if (req->rq_replied) {
1181                 spin_unlock(&req->rq_lock);
1182                 GOTO(err_buf, rc = -EALREADY);
1183         }
1184
1185         LASSERT(req->rq_repbuf);
1186         LASSERT(req->rq_repdata == NULL);
1187         LASSERT(req->rq_repmsg == NULL);
1188
1189         if (req->rq_reply_off != 0) {
1190                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1191                 spin_unlock(&req->rq_lock);
1192                 GOTO(err_buf, rc = -EPROTO);
1193         }
1194
1195         if (req->rq_nob_received != early_size) {
1196                 /* even another early arrived the size should be the same */
1197                 CERROR("data size has changed from %u to %u\n",
1198                        early_size, req->rq_nob_received);
1199                 spin_unlock(&req->rq_lock);
1200                 GOTO(err_buf, rc = -EINVAL);
1201         }
1202
1203         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1204                 CERROR("early reply length %d too small\n",
1205                        req->rq_nob_received);
1206                 spin_unlock(&req->rq_lock);
1207                 GOTO(err_buf, rc = -EALREADY);
1208         }
1209
1210         memcpy(early_buf, req->rq_repbuf, early_size);
1211         spin_unlock(&req->rq_lock);
1212
1213         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1214         early_req->rq_flvr = req->rq_flvr;
1215         early_req->rq_repbuf = early_buf;
1216         early_req->rq_repbuf_len = early_bufsz;
1217         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1218         early_req->rq_repdata_len = early_size;
1219         early_req->rq_early = 1;
1220         early_req->rq_reqmsg = req->rq_reqmsg;
1221
1222         rc = do_cli_unwrap_reply(early_req);
1223         if (rc) {
1224                 DEBUG_REQ(D_ADAPTTO, early_req,
1225                           "unwrap early reply: rc = %d", rc);
1226                 GOTO(err_ctx, rc);
1227         }
1228
1229         LASSERT(early_req->rq_repmsg);
1230         *req_ret = early_req;
1231         RETURN(0);
1232
1233 err_ctx:
1234         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1235 err_buf:
1236         OBD_FREE_LARGE(early_buf, early_bufsz);
1237 err_req:
1238         ptlrpc_request_cache_free(early_req);
1239         RETURN(rc);
1240 }
1241
1242 /**
1243  * Used by ptlrpc client, to release a processed early reply \a early_req.
1244  *
1245  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1246  */
1247 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1248 {
1249         LASSERT(early_req->rq_repbuf);
1250         LASSERT(early_req->rq_repdata);
1251         LASSERT(early_req->rq_repmsg);
1252
1253         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1254         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1255         ptlrpc_request_cache_free(early_req);
1256 }
1257
1258 /**************************************************
1259  * sec ID                                         *
1260  **************************************************/
1261
1262 /*
1263  * "fixed" sec (e.g. null) use sec_id < 0
1264  */
1265 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1266
1267 int sptlrpc_get_next_secid(void)
1268 {
1269         return atomic_inc_return(&sptlrpc_sec_id);
1270 }
1271 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1272
1273 /*
1274  * client side high-level security APIs
1275  */
1276
1277 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1278                                    int grace, int force)
1279 {
1280         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1281
1282         LASSERT(policy->sp_cops);
1283         LASSERT(policy->sp_cops->flush_ctx_cache);
1284
1285         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1286 }
1287
1288 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1289 {
1290         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1291
1292         LASSERT(atomic_read(&sec->ps_refcount) == 0);
1293         LASSERT(atomic_read(&sec->ps_nctx) == 0);
1294         LASSERT(policy->sp_cops->destroy_sec);
1295
1296         CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
1297
1298         policy->sp_cops->destroy_sec(sec);
1299         sptlrpc_policy_put(policy);
1300 }
1301
1302 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1303 {
1304         sec_cop_destroy_sec(sec);
1305 }
1306 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1307
1308 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1309 {
1310         LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1311
1312         if (sec->ps_policy->sp_cops->kill_sec) {
1313                 sec->ps_policy->sp_cops->kill_sec(sec);
1314
1315                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1316         }
1317 }
1318
1319 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1320 {
1321         if (sec)
1322                 atomic_inc(&sec->ps_refcount);
1323
1324         return sec;
1325 }
1326 EXPORT_SYMBOL(sptlrpc_sec_get);
1327
1328 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1329 {
1330         if (sec) {
1331                 LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1332
1333                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1334                         sptlrpc_gc_del_sec(sec);
1335                         sec_cop_destroy_sec(sec);
1336                 }
1337         }
1338 }
1339 EXPORT_SYMBOL(sptlrpc_sec_put);
1340
1341 /*
1342  * policy module is responsible for taking refrence of import
1343  */
1344 static
1345 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1346                                        struct ptlrpc_svc_ctx *svc_ctx,
1347                                        struct sptlrpc_flavor *sf,
1348                                        enum lustre_sec_part sp)
1349 {
1350         struct ptlrpc_sec_policy *policy;
1351         struct ptlrpc_sec *sec;
1352         char str[32];
1353
1354         ENTRY;
1355
1356         if (svc_ctx) {
1357                 LASSERT(imp->imp_dlm_fake == 1);
1358
1359                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1360                        imp->imp_obd->obd_type->typ_name,
1361                        imp->imp_obd->obd_name,
1362                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1363
1364                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1365                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1366         } else {
1367                 LASSERT(imp->imp_dlm_fake == 0);
1368
1369                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1370                        imp->imp_obd->obd_type->typ_name,
1371                        imp->imp_obd->obd_name,
1372                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1373
1374                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1375                 if (!policy) {
1376                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1377                         RETURN(NULL);
1378                 }
1379         }
1380
1381         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1382         if (sec) {
1383                 atomic_inc(&sec->ps_refcount);
1384
1385                 sec->ps_part = sp;
1386
1387                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1388                         sptlrpc_gc_add_sec(sec);
1389         } else {
1390                 sptlrpc_policy_put(policy);
1391         }
1392
1393         RETURN(sec);
1394 }
1395
1396 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1397 {
1398         struct ptlrpc_sec *sec;
1399
1400         read_lock(&imp->imp_sec_lock);
1401         sec = sptlrpc_sec_get(imp->imp_sec);
1402         read_unlock(&imp->imp_sec_lock);
1403
1404         return sec;
1405 }
1406 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1407
1408 static void sptlrpc_import_sec_install(struct obd_import *imp,
1409                                        struct ptlrpc_sec *sec)
1410 {
1411         struct ptlrpc_sec *old_sec;
1412
1413         LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1414
1415         write_lock(&imp->imp_sec_lock);
1416         old_sec = imp->imp_sec;
1417         imp->imp_sec = sec;
1418         write_unlock(&imp->imp_sec_lock);
1419
1420         if (old_sec) {
1421                 sptlrpc_sec_kill(old_sec);
1422
1423                 /* balance the ref taken by this import */
1424                 sptlrpc_sec_put(old_sec);
1425         }
1426 }
1427
1428 static inline
1429 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1430 {
1431         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1432 }
1433
1434 static inline
1435 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1436 {
1437         *dst = *src;
1438 }
1439
1440 /**
1441  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1442  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1443  *
1444  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1445  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1446  */
1447 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1448                              struct ptlrpc_svc_ctx *svc_ctx,
1449                              struct sptlrpc_flavor *flvr)
1450 {
1451         struct ptlrpc_connection *conn;
1452         struct sptlrpc_flavor sf;
1453         struct ptlrpc_sec *sec, *newsec;
1454         enum lustre_sec_part sp;
1455         char str[24];
1456         int rc = 0;
1457
1458         ENTRY;
1459
1460         might_sleep();
1461
1462         if (imp == NULL)
1463                 RETURN(0);
1464
1465         conn = imp->imp_connection;
1466
1467         if (svc_ctx == NULL) {
1468                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1469                 /*
1470                  * normal import, determine flavor from rule set, except
1471                  * for mgc the flavor is predetermined.
1472                  */
1473                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1474                         sf = cliobd->cl_flvr_mgc;
1475                 else
1476                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1477                                                    cliobd->cl_sp_to,
1478                                                    &cliobd->cl_target_uuid,
1479                                                    &conn->c_self, &sf);
1480
1481                 sp = imp->imp_obd->u.cli.cl_sp_me;
1482         } else {
1483                 /* reverse import, determine flavor from incoming reqeust */
1484                 sf = *flvr;
1485
1486                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1487                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1488                                       PTLRPC_SEC_FL_ROOTONLY;
1489
1490                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1491         }
1492
1493         sec = sptlrpc_import_sec_ref(imp);
1494         if (sec) {
1495                 char str2[24];
1496
1497                 if (flavor_equal(&sf, &sec->ps_flvr))
1498                         GOTO(out, rc);
1499
1500                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1501                        imp->imp_obd->obd_name,
1502                        obd_uuid2str(&conn->c_remote_uuid),
1503                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1504                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1505         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1506                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1507                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1508                        imp->imp_obd->obd_name,
1509                        obd_uuid2str(&conn->c_remote_uuid),
1510                        LNET_NID_NET(&conn->c_self),
1511                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1512         }
1513
1514         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1515         if (newsec) {
1516                 sptlrpc_import_sec_install(imp, newsec);
1517         } else {
1518                 CERROR("import %s->%s: failed to create new sec\n",
1519                        imp->imp_obd->obd_name,
1520                        obd_uuid2str(&conn->c_remote_uuid));
1521                 rc = -EPERM;
1522         }
1523
1524 out:
1525         sptlrpc_sec_put(sec);
1526         RETURN(rc);
1527 }
1528
1529 void sptlrpc_import_sec_put(struct obd_import *imp)
1530 {
1531         if (imp->imp_sec) {
1532                 sptlrpc_sec_kill(imp->imp_sec);
1533
1534                 sptlrpc_sec_put(imp->imp_sec);
1535                 imp->imp_sec = NULL;
1536         }
1537 }
1538
1539 static void import_flush_ctx_common(struct obd_import *imp,
1540                                     uid_t uid, int grace, int force)
1541 {
1542         struct ptlrpc_sec *sec;
1543
1544         if (imp == NULL)
1545                 return;
1546
1547         sec = sptlrpc_import_sec_ref(imp);
1548         if (sec == NULL)
1549                 return;
1550
1551         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1552         sptlrpc_sec_put(sec);
1553 }
1554
1555 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1556 {
1557         /*
1558          * it's important to use grace mode, see explain in
1559          * sptlrpc_req_refresh_ctx()
1560          */
1561         import_flush_ctx_common(imp, 0, 1, 1);
1562 }
1563
1564 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1565 {
1566         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1567                                 1, 1);
1568 }
1569 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1570
1571 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1572 {
1573         import_flush_ctx_common(imp, -1, 1, 1);
1574 }
1575 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1576
1577 /**
1578  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1579  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1580  */
1581 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1582 {
1583         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1584         struct ptlrpc_sec_policy *policy;
1585         int rc;
1586
1587         LASSERT(ctx);
1588         LASSERT(ctx->cc_sec);
1589         LASSERT(ctx->cc_sec->ps_policy);
1590         LASSERT(req->rq_reqmsg == NULL);
1591         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1592
1593         policy = ctx->cc_sec->ps_policy;
1594         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1595         if (!rc) {
1596                 LASSERT(req->rq_reqmsg);
1597                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1598
1599                 /* zeroing preallocated buffer */
1600                 if (req->rq_pool)
1601                         memset(req->rq_reqmsg, 0, msgsize);
1602         }
1603
1604         return rc;
1605 }
1606
1607 /**
1608  * Used by ptlrpc client to free request buffer of \a req. After this
1609  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1610  */
1611 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1612 {
1613         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1614         struct ptlrpc_sec_policy *policy;
1615
1616         LASSERT(ctx);
1617         LASSERT(ctx->cc_sec);
1618         LASSERT(ctx->cc_sec->ps_policy);
1619         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1620
1621         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1622                 return;
1623
1624         policy = ctx->cc_sec->ps_policy;
1625         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1626         req->rq_reqmsg = NULL;
1627 }
1628
1629 /*
1630  * NOTE caller must guarantee the buffer size is enough for the enlargement
1631  */
1632 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1633                                   int segment, int newsize)
1634 {
1635         void *src, *dst;
1636         int oldsize, oldmsg_size, movesize;
1637
1638         LASSERT(segment < msg->lm_bufcount);
1639         LASSERT(msg->lm_buflens[segment] <= newsize);
1640
1641         if (msg->lm_buflens[segment] == newsize)
1642                 return;
1643
1644         /* nothing to do if we are enlarging the last segment */
1645         if (segment == msg->lm_bufcount - 1) {
1646                 msg->lm_buflens[segment] = newsize;
1647                 return;
1648         }
1649
1650         oldsize = msg->lm_buflens[segment];
1651
1652         src = lustre_msg_buf(msg, segment + 1, 0);
1653         msg->lm_buflens[segment] = newsize;
1654         dst = lustre_msg_buf(msg, segment + 1, 0);
1655         msg->lm_buflens[segment] = oldsize;
1656
1657         /* move from segment + 1 to end segment */
1658         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1659         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1660         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1661         LASSERT(movesize >= 0);
1662
1663         if (movesize)
1664                 memmove(dst, src, movesize);
1665
1666         /* note we don't clear the ares where old data live, not secret */
1667
1668         /* finally set new segment size */
1669         msg->lm_buflens[segment] = newsize;
1670 }
1671 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1672
1673 /**
1674  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1675  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1676  * preserved after the enlargement. this must be called after original request
1677  * buffer being allocated.
1678  *
1679  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1680  * so caller should refresh its local pointers if needed.
1681  */
1682 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1683                                const struct req_msg_field *field,
1684                                int newsize)
1685 {
1686         struct req_capsule *pill = &req->rq_pill;
1687         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1688         struct ptlrpc_sec_cops *cops;
1689         struct lustre_msg *msg = req->rq_reqmsg;
1690         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1691
1692         LASSERT(ctx);
1693         LASSERT(msg);
1694         LASSERT(msg->lm_bufcount > segment);
1695         LASSERT(msg->lm_buflens[segment] <= newsize);
1696
1697         if (msg->lm_buflens[segment] == newsize)
1698                 return 0;
1699
1700         cops = ctx->cc_sec->ps_policy->sp_cops;
1701         LASSERT(cops->enlarge_reqbuf);
1702         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1703 }
1704 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1705
1706 /**
1707  * Used by ptlrpc client to allocate reply buffer of \a req.
1708  *
1709  * \note After this, req->rq_repmsg is still not accessible.
1710  */
1711 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1712 {
1713         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1714         struct ptlrpc_sec_policy *policy;
1715
1716         ENTRY;
1717
1718         LASSERT(ctx);
1719         LASSERT(ctx->cc_sec);
1720         LASSERT(ctx->cc_sec->ps_policy);
1721
1722         if (req->rq_repbuf)
1723                 RETURN(0);
1724
1725         policy = ctx->cc_sec->ps_policy;
1726         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1727 }
1728
1729 /**
1730  * Used by ptlrpc client to free reply buffer of \a req. After this
1731  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1732  */
1733 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1734 {
1735         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1736         struct ptlrpc_sec_policy *policy;
1737
1738         ENTRY;
1739
1740         LASSERT(ctx);
1741         LASSERT(ctx->cc_sec);
1742         LASSERT(ctx->cc_sec->ps_policy);
1743         LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1744
1745         if (req->rq_repbuf == NULL)
1746                 return;
1747         LASSERT(req->rq_repbuf_len);
1748
1749         policy = ctx->cc_sec->ps_policy;
1750         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1751         req->rq_repmsg = NULL;
1752         EXIT;
1753 }
1754 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1755
1756 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1757                                 struct ptlrpc_cli_ctx *ctx)
1758 {
1759         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1760
1761         if (!policy->sp_cops->install_rctx)
1762                 return 0;
1763         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1764 }
1765
1766 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1767                                 struct ptlrpc_svc_ctx *ctx)
1768 {
1769         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1770
1771         if (!policy->sp_sops->install_rctx)
1772                 return 0;
1773         return policy->sp_sops->install_rctx(imp, ctx);
1774 }
1775
1776 /* Get SELinux policy info from userspace */
1777 static int sepol_helper(struct obd_import *imp)
1778 {
1779         char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1780         char *argv[] = {
1781                 [0] = "/usr/sbin/l_getsepol",
1782                 [1] = "-o",
1783                 [2] = NULL,         /* obd type */
1784                 [3] = "-n",
1785                 [4] = NULL,         /* obd name */
1786                 [5] = "-t",
1787                 [6] = mtime_str,    /* policy mtime */
1788                 [7] = "-m",
1789                 [8] = mode_str,     /* enforcing mode */
1790                 [9] = NULL
1791         };
1792         char *envp[] = {
1793                 [0] = "HOME=/",
1794                 [1] = "PATH=/sbin:/usr/sbin",
1795                 [2] = NULL
1796         };
1797         signed short ret;
1798         int rc = 0;
1799
1800         if (imp == NULL || imp->imp_obd == NULL ||
1801             imp->imp_obd->obd_type == NULL) {
1802                 rc = -EINVAL;
1803         } else {
1804                 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1805                 argv[4] = imp->imp_obd->obd_name;
1806                 spin_lock(&imp->imp_sec->ps_lock);
1807                 if (ktime_to_ns(imp->imp_sec->ps_sepol_mtime) == 0 &&
1808                     imp->imp_sec->ps_sepol[0] == '\0') {
1809                         /* ps_sepol has not been initialized */
1810                         argv[5] = NULL;
1811                         argv[7] = NULL;
1812                 } else {
1813                         time64_t mtime_ms;
1814
1815                         mtime_ms = ktime_to_ms(imp->imp_sec->ps_sepol_mtime);
1816                         snprintf(mtime_str, sizeof(mtime_str), "%lld",
1817                                  mtime_ms / MSEC_PER_SEC);
1818                         mode_str[0] = imp->imp_sec->ps_sepol[0];
1819                 }
1820                 spin_unlock(&imp->imp_sec->ps_lock);
1821                 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1822                 rc = ret>>8;
1823         }
1824
1825         return rc;
1826 }
1827
1828 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1829 {
1830         ktime_t checknext;
1831
1832         if (send_sepol == 0)
1833                 return 0;
1834
1835         if (send_sepol == -1)
1836                 /* send_sepol == -1 means fetch sepol status every time */
1837                 return 1;
1838
1839         spin_lock(&imp_sec->ps_lock);
1840         checknext = imp_sec->ps_sepol_checknext;
1841         spin_unlock(&imp_sec->ps_lock);
1842
1843         /* next check is too far in time, please update */
1844         if (ktime_after(checknext,
1845                         ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1846                 goto setnext;
1847
1848         if (ktime_before(ktime_get(), checknext))
1849                 /* too early to fetch sepol status */
1850                 return 0;
1851
1852 setnext:
1853         /* define new sepol_checknext time */
1854         spin_lock(&imp_sec->ps_lock);
1855         imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1856                                                 ktime_set(send_sepol, 0));
1857         spin_unlock(&imp_sec->ps_lock);
1858
1859         return 1;
1860 }
1861
1862 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1863 {
1864         struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1865         int rc = 0;
1866
1867         ENTRY;
1868
1869         (req->rq_sepol)[0] = '\0';
1870
1871 #ifndef HAVE_SELINUX
1872         if (unlikely(send_sepol != 0))
1873                 CDEBUG(D_SEC,
1874                        "Client cannot report SELinux status, it was not built against libselinux.\n");
1875         RETURN(0);
1876 #endif
1877
1878         if (send_sepol == 0)
1879                 RETURN(0);
1880
1881         if (imp_sec == NULL)
1882                 RETURN(-EINVAL);
1883
1884         /* Retrieve SELinux status info */
1885         if (sptlrpc_sepol_needs_check(imp_sec))
1886                 rc = sepol_helper(req->rq_import);
1887         if (likely(rc == 0)) {
1888                 spin_lock(&imp_sec->ps_lock);
1889                 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1890                        sizeof(req->rq_sepol));
1891                 spin_unlock(&imp_sec->ps_lock);
1892         } else if (rc == -ENODEV) {
1893                 CDEBUG(D_SEC,
1894                        "Client cannot report SELinux status, SELinux is disabled.\n");
1895                 rc = 0;
1896         }
1897
1898         RETURN(rc);
1899 }
1900 EXPORT_SYMBOL(sptlrpc_get_sepol);
1901
1902 /*
1903  * server side security
1904  */
1905
1906 static int flavor_allowed(struct sptlrpc_flavor *exp,
1907                           struct ptlrpc_request *req)
1908 {
1909         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1910
1911         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1912                 return 1;
1913
1914         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1915             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1916             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1917             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1918                 return 1;
1919
1920         return 0;
1921 }
1922
1923 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1924
1925 /**
1926  * Given an export \a exp, check whether the flavor of incoming \a req
1927  * is allowed by the export \a exp. Main logic is about taking care of
1928  * changing configurations. Return 0 means success.
1929  */
1930 int sptlrpc_target_export_check(struct obd_export *exp,
1931                                 struct ptlrpc_request *req)
1932 {
1933         struct sptlrpc_flavor   flavor;
1934
1935         if (exp == NULL)
1936                 return 0;
1937
1938         /*
1939          * client side export has no imp_reverse, skip
1940          * FIXME maybe we should check flavor this as well???
1941          */
1942         if (exp->exp_imp_reverse == NULL)
1943                 return 0;
1944
1945         /* don't care about ctx fini rpc */
1946         if (req->rq_ctx_fini)
1947                 return 0;
1948
1949         spin_lock(&exp->exp_lock);
1950
1951         /*
1952          * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1953          * the first req with the new flavor, then treat it as current flavor,
1954          * adapt reverse sec according to it.
1955          * note the first rpc with new flavor might not be with root ctx, in
1956          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1957          */
1958         if (unlikely(exp->exp_flvr_changed) &&
1959             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1960                 /*
1961                  * make the new flavor as "current", and old ones as
1962                  * about-to-expire
1963                  */
1964                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1965                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1966                 flavor = exp->exp_flvr_old[1];
1967                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1968                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1969                 exp->exp_flvr_old[0] = exp->exp_flvr;
1970                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1971                                           EXP_FLVR_UPDATE_EXPIRE;
1972                 exp->exp_flvr = flavor;
1973
1974                 /* flavor change finished */
1975                 exp->exp_flvr_changed = 0;
1976                 LASSERT(exp->exp_flvr_adapt == 1);
1977
1978                 /* if it's gss, we only interested in root ctx init */
1979                 if (req->rq_auth_gss &&
1980                     !(req->rq_ctx_init &&
1981                     (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1982                     req->rq_auth_usr_ost))) {
1983                         spin_unlock(&exp->exp_lock);
1984                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1985                                req->rq_auth_gss, req->rq_ctx_init,
1986                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1987                                req->rq_auth_usr_ost);
1988                         return 0;
1989                 }
1990
1991                 exp->exp_flvr_adapt = 0;
1992                 spin_unlock(&exp->exp_lock);
1993
1994                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1995                                                 req->rq_svc_ctx, &flavor);
1996         }
1997
1998         /*
1999          * if it equals to the current flavor, we accept it, but need to
2000          * dealing with reverse sec/ctx
2001          */
2002         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
2003                 /*
2004                  * most cases should return here, we only interested in
2005                  * gss root ctx init
2006                  */
2007                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
2008                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2009                      !req->rq_auth_usr_ost)) {
2010                         spin_unlock(&exp->exp_lock);
2011                         return 0;
2012                 }
2013
2014                 /*
2015                  * if flavor just changed, we should not proceed, just leave
2016                  * it and current flavor will be discovered and replaced
2017                  * shortly, and let _this_ rpc pass through
2018                  */
2019                 if (exp->exp_flvr_changed) {
2020                         LASSERT(exp->exp_flvr_adapt);
2021                         spin_unlock(&exp->exp_lock);
2022                         return 0;
2023                 }
2024
2025                 if (exp->exp_flvr_adapt) {
2026                         exp->exp_flvr_adapt = 0;
2027                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2028                                exp, exp->exp_flvr.sf_rpc,
2029                                exp->exp_flvr_old[0].sf_rpc,
2030                                exp->exp_flvr_old[1].sf_rpc);
2031                         flavor = exp->exp_flvr;
2032                         spin_unlock(&exp->exp_lock);
2033
2034                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2035                                                         req->rq_svc_ctx,
2036                                                         &flavor);
2037                 } else {
2038                         CDEBUG(D_SEC,
2039                                "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2040                                exp, exp->exp_flvr.sf_rpc,
2041                                exp->exp_flvr_old[0].sf_rpc,
2042                                exp->exp_flvr_old[1].sf_rpc);
2043                         spin_unlock(&exp->exp_lock);
2044
2045                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2046                                                            req->rq_svc_ctx);
2047                 }
2048         }
2049
2050         if (exp->exp_flvr_expire[0]) {
2051                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2052                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2053                                 CDEBUG(D_SEC,
2054                                        "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2055                                        exp, exp->exp_flvr.sf_rpc,
2056                                        exp->exp_flvr_old[0].sf_rpc,
2057                                        exp->exp_flvr_old[1].sf_rpc,
2058                                        (s64)(exp->exp_flvr_expire[0] -
2059                                              ktime_get_real_seconds()));
2060                                 spin_unlock(&exp->exp_lock);
2061                                 return 0;
2062                         }
2063                 } else {
2064                         CDEBUG(D_SEC, "mark middle expired\n");
2065                         exp->exp_flvr_expire[0] = 0;
2066                 }
2067                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2068                        exp->exp_flvr.sf_rpc,
2069                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2070                        req->rq_flvr.sf_rpc);
2071         }
2072
2073         /*
2074          * now it doesn't match the current flavor, the only chance we can
2075          * accept it is match the old flavors which is not expired.
2076          */
2077         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2078                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2079                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2080                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2081                                        exp,
2082                                        exp->exp_flvr.sf_rpc,
2083                                        exp->exp_flvr_old[0].sf_rpc,
2084                                        exp->exp_flvr_old[1].sf_rpc,
2085                                        (s64)(exp->exp_flvr_expire[1] -
2086                                        ktime_get_real_seconds()));
2087                                 spin_unlock(&exp->exp_lock);
2088                                 return 0;
2089                         }
2090                 } else {
2091                         CDEBUG(D_SEC, "mark oldest expired\n");
2092                         exp->exp_flvr_expire[1] = 0;
2093                 }
2094                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2095                        exp, exp->exp_flvr.sf_rpc,
2096                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2097                        req->rq_flvr.sf_rpc);
2098         } else {
2099                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2100                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2101                        exp->exp_flvr_old[1].sf_rpc);
2102         }
2103
2104         spin_unlock(&exp->exp_lock);
2105
2106         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2107               exp, exp->exp_obd->obd_name,
2108               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2109               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2110               req->rq_flvr.sf_rpc,
2111               exp->exp_flvr.sf_rpc,
2112               exp->exp_flvr_old[0].sf_rpc,
2113               exp->exp_flvr_expire[0] ?
2114               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2115               exp->exp_flvr_old[1].sf_rpc,
2116               exp->exp_flvr_expire[1] ?
2117               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2118         return -EACCES;
2119 }
2120 EXPORT_SYMBOL(sptlrpc_target_export_check);
2121
2122 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2123                                       struct sptlrpc_rule_set *rset)
2124 {
2125         struct obd_export *exp;
2126         struct sptlrpc_flavor new_flvr;
2127
2128         LASSERT(obd);
2129
2130         spin_lock(&obd->obd_dev_lock);
2131
2132         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2133                 if (exp->exp_connection == NULL)
2134                         continue;
2135
2136                 /*
2137                  * note if this export had just been updated flavor
2138                  * (exp_flvr_changed == 1), this will override the
2139                  * previous one.
2140                  */
2141                 spin_lock(&exp->exp_lock);
2142                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2143                                              &exp->exp_connection->c_peer.nid,
2144                                              &new_flvr);
2145                 if (exp->exp_flvr_changed ||
2146                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2147                         exp->exp_flvr_old[1] = new_flvr;
2148                         exp->exp_flvr_expire[1] = 0;
2149                         exp->exp_flvr_changed = 1;
2150                         exp->exp_flvr_adapt = 1;
2151
2152                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2153                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2154                                exp->exp_flvr.sf_rpc,
2155                                exp->exp_flvr_old[1].sf_rpc);
2156                 }
2157                 spin_unlock(&exp->exp_lock);
2158         }
2159
2160         spin_unlock(&obd->obd_dev_lock);
2161 }
2162 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2163
2164 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2165 {
2166         /* peer's claim is unreliable unless gss is being used */
2167         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2168                 return svc_rc;
2169
2170         switch (req->rq_sp_from) {
2171         case LUSTRE_SP_CLI:
2172                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2173                         /* The below message is checked in sanity-sec test_33 */
2174                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2175                         svc_rc = SECSVC_DROP;
2176                 }
2177                 break;
2178         case LUSTRE_SP_MDT:
2179                 if (!req->rq_auth_usr_mdt) {
2180                         /* The below message is checked in sanity-sec test_33 */
2181                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2182                         svc_rc = SECSVC_DROP;
2183                 }
2184                 break;
2185         case LUSTRE_SP_OST:
2186                 if (!req->rq_auth_usr_ost) {
2187                         /* The below message is checked in sanity-sec test_33 */
2188                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2189                         svc_rc = SECSVC_DROP;
2190                 }
2191                 break;
2192         case LUSTRE_SP_MGS:
2193         case LUSTRE_SP_MGC:
2194                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2195                     !req->rq_auth_usr_ost) {
2196                         /* The below message is checked in sanity-sec test_33 */
2197                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2198                         svc_rc = SECSVC_DROP;
2199                 }
2200                 break;
2201         case LUSTRE_SP_ANY:
2202         default:
2203                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2204                 svc_rc = SECSVC_DROP;
2205         }
2206
2207         return svc_rc;
2208 }
2209
2210 /**
2211  * Used by ptlrpc server, to perform transformation upon request message of
2212  * incoming \a req. This must be the first thing to do with an incoming
2213  * request in ptlrpc layer.
2214  *
2215  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2216  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2217  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2218  * reply message has been prepared.
2219  * \retval SECSVC_DROP failed, this request should be dropped.
2220  */
2221 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2222 {
2223         struct ptlrpc_sec_policy *policy;
2224         struct lustre_msg *msg = req->rq_reqbuf;
2225         int rc;
2226
2227         ENTRY;
2228
2229         LASSERT(msg);
2230         LASSERT(req->rq_reqmsg == NULL);
2231         LASSERT(req->rq_repmsg == NULL);
2232         LASSERT(req->rq_svc_ctx == NULL);
2233
2234         req->rq_req_swab_mask = 0;
2235
2236         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2237         switch (rc) {
2238         case 1:
2239                 req_capsule_set_req_swabbed(&req->rq_pill,
2240                                             MSG_PTLRPC_HEADER_OFF);
2241         case 0:
2242                 break;
2243         default:
2244                 CERROR("error unpacking request from %s x%llu\n",
2245                        libcfs_idstr(&req->rq_peer), req->rq_xid);
2246                 RETURN(SECSVC_DROP);
2247         }
2248
2249         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2250         req->rq_sp_from = LUSTRE_SP_ANY;
2251         req->rq_auth_uid = -1; /* set to INVALID_UID */
2252         req->rq_auth_mapped_uid = -1;
2253
2254         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2255         if (!policy) {
2256                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2257                 RETURN(SECSVC_DROP);
2258         }
2259
2260         LASSERT(policy->sp_sops->accept);
2261         rc = policy->sp_sops->accept(req);
2262         sptlrpc_policy_put(policy);
2263         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2264         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2265
2266         /*
2267          * if it's not null flavor (which means embedded packing msg),
2268          * reset the swab mask for the comming inner msg unpacking.
2269          */
2270         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2271                 req->rq_req_swab_mask = 0;
2272
2273         /* sanity check for the request source */
2274         rc = sptlrpc_svc_check_from(req, rc);
2275         RETURN(rc);
2276 }
2277
2278 /**
2279  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2280  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2281  * a buffer of \a msglen size.
2282  */
2283 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2284 {
2285         struct ptlrpc_sec_policy *policy;
2286         struct ptlrpc_reply_state *rs;
2287         int rc;
2288
2289         ENTRY;
2290
2291         LASSERT(req->rq_svc_ctx);
2292         LASSERT(req->rq_svc_ctx->sc_policy);
2293
2294         policy = req->rq_svc_ctx->sc_policy;
2295         LASSERT(policy->sp_sops->alloc_rs);
2296
2297         rc = policy->sp_sops->alloc_rs(req, msglen);
2298         if (unlikely(rc == -ENOMEM)) {
2299                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2300
2301                 if (svcpt->scp_service->srv_max_reply_size <
2302                    msglen + sizeof(struct ptlrpc_reply_state)) {
2303                         /* Just return failure if the size is too big */
2304                         CERROR("size of message is too big (%zd), %d allowed\n",
2305                                 msglen + sizeof(struct ptlrpc_reply_state),
2306                                 svcpt->scp_service->srv_max_reply_size);
2307                         RETURN(-ENOMEM);
2308                 }
2309
2310                 /* failed alloc, try emergency pool */
2311                 rs = lustre_get_emerg_rs(svcpt);
2312                 if (rs == NULL)
2313                         RETURN(-ENOMEM);
2314
2315                 req->rq_reply_state = rs;
2316                 rc = policy->sp_sops->alloc_rs(req, msglen);
2317                 if (rc) {
2318                         lustre_put_emerg_rs(rs);
2319                         req->rq_reply_state = NULL;
2320                 }
2321         }
2322
2323         LASSERT(rc != 0 ||
2324                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2325
2326         RETURN(rc);
2327 }
2328
2329 /**
2330  * Used by ptlrpc server, to perform transformation upon reply message.
2331  *
2332  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2333  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2334  */
2335 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2336 {
2337         struct ptlrpc_sec_policy *policy;
2338         int rc;
2339
2340         ENTRY;
2341
2342         LASSERT(req->rq_svc_ctx);
2343         LASSERT(req->rq_svc_ctx->sc_policy);
2344
2345         policy = req->rq_svc_ctx->sc_policy;
2346         LASSERT(policy->sp_sops->authorize);
2347
2348         rc = policy->sp_sops->authorize(req);
2349         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2350
2351         RETURN(rc);
2352 }
2353
2354 /**
2355  * Used by ptlrpc server, to free reply_state.
2356  */
2357 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2358 {
2359         struct ptlrpc_sec_policy *policy;
2360         unsigned int prealloc;
2361
2362         ENTRY;
2363
2364         LASSERT(rs->rs_svc_ctx);
2365         LASSERT(rs->rs_svc_ctx->sc_policy);
2366
2367         policy = rs->rs_svc_ctx->sc_policy;
2368         LASSERT(policy->sp_sops->free_rs);
2369
2370         prealloc = rs->rs_prealloc;
2371         policy->sp_sops->free_rs(rs);
2372
2373         if (prealloc)
2374                 lustre_put_emerg_rs(rs);
2375         EXIT;
2376 }
2377
2378 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2379 {
2380         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2381
2382         if (ctx != NULL)
2383                 atomic_inc(&ctx->sc_refcount);
2384 }
2385
2386 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2387 {
2388         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2389
2390         if (ctx == NULL)
2391                 return;
2392
2393         LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2394         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2395                 if (ctx->sc_policy->sp_sops->free_ctx)
2396                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2397         }
2398         req->rq_svc_ctx = NULL;
2399 }
2400
2401 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2402 {
2403         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2404
2405         if (ctx == NULL)
2406                 return;
2407
2408         LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2409         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2410                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2411 }
2412 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2413
2414 /*
2415  * bulk security
2416  */
2417
2418 /**
2419  * Perform transformation upon bulk data pointed by \a desc. This is called
2420  * before transforming the request message.
2421  */
2422 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2423                           struct ptlrpc_bulk_desc *desc)
2424 {
2425         struct ptlrpc_cli_ctx *ctx;
2426
2427         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2428
2429         if (!req->rq_pack_bulk)
2430                 return 0;
2431
2432         ctx = req->rq_cli_ctx;
2433         if (ctx->cc_ops->wrap_bulk)
2434                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2435         return 0;
2436 }
2437 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2438
2439 /**
2440  * This is called after unwrap the reply message.
2441  * return nob of actual plain text size received, or error code.
2442  */
2443 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2444                                  struct ptlrpc_bulk_desc *desc,
2445                                  int nob)
2446 {
2447         struct ptlrpc_cli_ctx *ctx;
2448         int rc;
2449
2450         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2451
2452         if (!req->rq_pack_bulk)
2453                 return desc->bd_nob_transferred;
2454
2455         ctx = req->rq_cli_ctx;
2456         if (ctx->cc_ops->unwrap_bulk) {
2457                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2458                 if (rc < 0)
2459                         return rc;
2460         }
2461         return desc->bd_nob_transferred;
2462 }
2463 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2464
2465 /**
2466  * This is called after unwrap the reply message.
2467  * return 0 for success or error code.
2468  */
2469 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2470                                   struct ptlrpc_bulk_desc *desc)
2471 {
2472         struct ptlrpc_cli_ctx *ctx;
2473         int rc;
2474
2475         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2476
2477         if (!req->rq_pack_bulk)
2478                 return 0;
2479
2480         ctx = req->rq_cli_ctx;
2481         if (ctx->cc_ops->unwrap_bulk) {
2482                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2483                 if (rc < 0)
2484                         return rc;
2485         }
2486
2487         /*
2488          * if everything is going right, nob should equals to nob_transferred.
2489          * in case of privacy mode, nob_transferred needs to be adjusted.
2490          */
2491         if (desc->bd_nob != desc->bd_nob_transferred) {
2492                 CERROR("nob %d doesn't match transferred nob %d\n",
2493                        desc->bd_nob, desc->bd_nob_transferred);
2494                 return -EPROTO;
2495         }
2496
2497         return 0;
2498 }
2499 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2500
2501 #ifdef HAVE_SERVER_SUPPORT
2502 /**
2503  * Performe transformation upon outgoing bulk read.
2504  */
2505 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2506                           struct ptlrpc_bulk_desc *desc)
2507 {
2508         struct ptlrpc_svc_ctx *ctx;
2509
2510         LASSERT(req->rq_bulk_read);
2511
2512         if (!req->rq_pack_bulk)
2513                 return 0;
2514
2515         ctx = req->rq_svc_ctx;
2516         if (ctx->sc_policy->sp_sops->wrap_bulk)
2517                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2518
2519         return 0;
2520 }
2521 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2522
2523 /**
2524  * Performe transformation upon incoming bulk write.
2525  */
2526 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2527                             struct ptlrpc_bulk_desc *desc)
2528 {
2529         struct ptlrpc_svc_ctx *ctx;
2530         int rc;
2531
2532         LASSERT(req->rq_bulk_write);
2533
2534         /*
2535          * if it's in privacy mode, transferred should >= expected; otherwise
2536          * transferred should == expected.
2537          */
2538         if (desc->bd_nob_transferred < desc->bd_nob ||
2539             (desc->bd_nob_transferred > desc->bd_nob &&
2540              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2541              SPTLRPC_BULK_SVC_PRIV)) {
2542                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2543                           desc->bd_nob_transferred, desc->bd_nob);
2544                 return -ETIMEDOUT;
2545         }
2546
2547         if (!req->rq_pack_bulk)
2548                 return 0;
2549
2550         ctx = req->rq_svc_ctx;
2551         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2552                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2553                 if (rc)
2554                         CERROR("error unwrap bulk: %d\n", rc);
2555         }
2556
2557         /* return 0 to allow reply be sent */
2558         return 0;
2559 }
2560 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2561
2562 /**
2563  * Prepare buffers for incoming bulk write.
2564  */
2565 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2566                           struct ptlrpc_bulk_desc *desc)
2567 {
2568         struct ptlrpc_svc_ctx *ctx;
2569
2570         LASSERT(req->rq_bulk_write);
2571
2572         if (!req->rq_pack_bulk)
2573                 return 0;
2574
2575         ctx = req->rq_svc_ctx;
2576         if (ctx->sc_policy->sp_sops->prep_bulk)
2577                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2578
2579         return 0;
2580 }
2581 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2582
2583 #endif /* HAVE_SERVER_SUPPORT */
2584
2585 /*
2586  * user descriptor helpers
2587  */
2588
2589 int sptlrpc_current_user_desc_size(void)
2590 {
2591         int ngroups;
2592
2593         ngroups = current_cred()->group_info->ngroups;
2594
2595         if (ngroups > LUSTRE_MAX_GROUPS)
2596                 ngroups = LUSTRE_MAX_GROUPS;
2597         return sptlrpc_user_desc_size(ngroups);
2598 }
2599 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2600
2601 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2602 {
2603         struct ptlrpc_user_desc *pud;
2604         int ngroups;
2605
2606         pud = lustre_msg_buf(msg, offset, 0);
2607
2608         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2609         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2610         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2611         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2612         pud->pud_cap = ll_capability_u32(current_cap());
2613         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2614
2615         task_lock(current);
2616         ngroups = current_cred()->group_info->ngroups;
2617         if (pud->pud_ngroups > ngroups)
2618                 pud->pud_ngroups = ngroups;
2619 #ifdef HAVE_GROUP_INFO_GID
2620         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2621                pud->pud_ngroups * sizeof(__u32));
2622 #else /* !HAVE_GROUP_INFO_GID */
2623         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2624                pud->pud_ngroups * sizeof(__u32));
2625 #endif /* HAVE_GROUP_INFO_GID */
2626         task_unlock(current);
2627
2628         return 0;
2629 }
2630 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2631
2632 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2633 {
2634         struct ptlrpc_user_desc *pud;
2635         int i;
2636
2637         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2638         if (!pud)
2639                 return -EINVAL;
2640
2641         if (swabbed) {
2642                 __swab32s(&pud->pud_uid);
2643                 __swab32s(&pud->pud_gid);
2644                 __swab32s(&pud->pud_fsuid);
2645                 __swab32s(&pud->pud_fsgid);
2646                 __swab32s(&pud->pud_cap);
2647                 __swab32s(&pud->pud_ngroups);
2648         }
2649
2650         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2651                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2652                 return -EINVAL;
2653         }
2654
2655         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2656             msg->lm_buflens[offset]) {
2657                 CERROR("%u groups are claimed but bufsize only %u\n",
2658                        pud->pud_ngroups, msg->lm_buflens[offset]);
2659                 return -EINVAL;
2660         }
2661
2662         if (swabbed) {
2663                 for (i = 0; i < pud->pud_ngroups; i++)
2664                         __swab32s(&pud->pud_groups[i]);
2665         }
2666
2667         return 0;
2668 }
2669 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2670
2671 /*
2672  * misc helpers
2673  */
2674
2675 const char *sec2target_str(struct ptlrpc_sec *sec)
2676 {
2677         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2678                 return "*";
2679         if (sec_is_reverse(sec))
2680                 return "c";
2681         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2682 }
2683 EXPORT_SYMBOL(sec2target_str);
2684
2685 /*
2686  * return true if the bulk data is protected
2687  */
2688 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2689 {
2690         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2691         case SPTLRPC_BULK_SVC_INTG:
2692         case SPTLRPC_BULK_SVC_PRIV:
2693                 return 1;
2694         default:
2695                 return 0;
2696         }
2697 }
2698 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2699
2700 /*
2701  * crypto API helper/alloc blkciper
2702  */
2703
2704 /*
2705  * initialize/finalize
2706  */
2707
2708 int sptlrpc_init(void)
2709 {
2710         int rc;
2711
2712         rwlock_init(&policy_lock);
2713
2714         rc = sptlrpc_gc_init();
2715         if (rc)
2716                 goto out;
2717
2718         rc = sptlrpc_conf_init();
2719         if (rc)
2720                 goto out_gc;
2721
2722         rc = sptlrpc_enc_pool_init();
2723         if (rc)
2724                 goto out_conf;
2725
2726         rc = sptlrpc_null_init();
2727         if (rc)
2728                 goto out_pool;
2729
2730         rc = sptlrpc_plain_init();
2731         if (rc)
2732                 goto out_null;
2733
2734         rc = sptlrpc_lproc_init();
2735         if (rc)
2736                 goto out_plain;
2737
2738         return 0;
2739
2740 out_plain:
2741         sptlrpc_plain_fini();
2742 out_null:
2743         sptlrpc_null_fini();
2744 out_pool:
2745         sptlrpc_enc_pool_fini();
2746 out_conf:
2747         sptlrpc_conf_fini();
2748 out_gc:
2749         sptlrpc_gc_fini();
2750 out:
2751         return rc;
2752 }
2753
2754 void sptlrpc_fini(void)
2755 {
2756         sptlrpc_lproc_fini();
2757         sptlrpc_plain_fini();
2758         sptlrpc_null_fini();
2759         sptlrpc_enc_pool_fini();
2760         sptlrpc_conf_fini();
2761         sptlrpc_gc_fini();
2762 }