Whamcloud - gitweb
LU-14355 ptlrpc: do not output error when imp_sec is freed
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/ptlrpc/sec.c
33  *
34  * Author: Eric Mei <ericm@clusterfs.com>
35  */
36
37 #define DEBUG_SUBSYSTEM S_SEC
38
39 #include <linux/user_namespace.h>
40 #include <linux/uidgid.h>
41 #include <linux/crypto.h>
42 #include <linux/key.h>
43
44 #include <libcfs/libcfs.h>
45 #include <obd.h>
46 #include <obd_class.h>
47 #include <obd_support.h>
48 #include <lustre_net.h>
49 #include <lustre_import.h>
50 #include <lustre_dlm.h>
51 #include <lustre_sec.h>
52
53 #include "ptlrpc_internal.h"
54
55 static int send_sepol;
56 module_param(send_sepol, int, 0644);
57 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
58
59 /*
60  * policy registers
61  */
62
63 static rwlock_t policy_lock;
64 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
65         NULL,
66 };
67
68 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
69 {
70         __u16 number = policy->sp_policy;
71
72         LASSERT(policy->sp_name);
73         LASSERT(policy->sp_cops);
74         LASSERT(policy->sp_sops);
75
76         if (number >= SPTLRPC_POLICY_MAX)
77                 return -EINVAL;
78
79         write_lock(&policy_lock);
80         if (unlikely(policies[number])) {
81                 write_unlock(&policy_lock);
82                 return -EALREADY;
83         }
84         policies[number] = policy;
85         write_unlock(&policy_lock);
86
87         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
88         return 0;
89 }
90 EXPORT_SYMBOL(sptlrpc_register_policy);
91
92 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
93 {
94         __u16 number = policy->sp_policy;
95
96         LASSERT(number < SPTLRPC_POLICY_MAX);
97
98         write_lock(&policy_lock);
99         if (unlikely(policies[number] == NULL)) {
100                 write_unlock(&policy_lock);
101                 CERROR("%s: already unregistered\n", policy->sp_name);
102                 return -EINVAL;
103         }
104
105         LASSERT(policies[number] == policy);
106         policies[number] = NULL;
107         write_unlock(&policy_lock);
108
109         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
110         return 0;
111 }
112 EXPORT_SYMBOL(sptlrpc_unregister_policy);
113
114 static
115 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
116 {
117         static DEFINE_MUTEX(load_mutex);
118         static atomic_t           loaded = ATOMIC_INIT(0);
119         struct ptlrpc_sec_policy *policy;
120         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
121         __u16                     flag = 0;
122
123         if (number >= SPTLRPC_POLICY_MAX)
124                 return NULL;
125
126         while (1) {
127                 read_lock(&policy_lock);
128                 policy = policies[number];
129                 if (policy && !try_module_get(policy->sp_owner))
130                         policy = NULL;
131                 if (policy == NULL)
132                         flag = atomic_read(&loaded);
133                 read_unlock(&policy_lock);
134
135                 if (policy != NULL || flag != 0 ||
136                     number != SPTLRPC_POLICY_GSS)
137                         break;
138
139                 /* try to load gss module, once */
140                 mutex_lock(&load_mutex);
141                 if (atomic_read(&loaded) == 0) {
142                         if (request_module("ptlrpc_gss") == 0)
143                                 CDEBUG(D_SEC,
144                                        "module ptlrpc_gss loaded on demand\n");
145                         else
146                                 CERROR("Unable to load module ptlrpc_gss\n");
147
148                         atomic_set(&loaded, 1);
149                 }
150                 mutex_unlock(&load_mutex);
151         }
152
153         return policy;
154 }
155
156 __u32 sptlrpc_name2flavor_base(const char *name)
157 {
158         if (!strcmp(name, "null"))
159                 return SPTLRPC_FLVR_NULL;
160         if (!strcmp(name, "plain"))
161                 return SPTLRPC_FLVR_PLAIN;
162         if (!strcmp(name, "gssnull"))
163                 return SPTLRPC_FLVR_GSSNULL;
164         if (!strcmp(name, "krb5n"))
165                 return SPTLRPC_FLVR_KRB5N;
166         if (!strcmp(name, "krb5a"))
167                 return SPTLRPC_FLVR_KRB5A;
168         if (!strcmp(name, "krb5i"))
169                 return SPTLRPC_FLVR_KRB5I;
170         if (!strcmp(name, "krb5p"))
171                 return SPTLRPC_FLVR_KRB5P;
172         if (!strcmp(name, "skn"))
173                 return SPTLRPC_FLVR_SKN;
174         if (!strcmp(name, "ska"))
175                 return SPTLRPC_FLVR_SKA;
176         if (!strcmp(name, "ski"))
177                 return SPTLRPC_FLVR_SKI;
178         if (!strcmp(name, "skpi"))
179                 return SPTLRPC_FLVR_SKPI;
180
181         return SPTLRPC_FLVR_INVALID;
182 }
183 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
184
185 const char *sptlrpc_flavor2name_base(__u32 flvr)
186 {
187         __u32   base = SPTLRPC_FLVR_BASE(flvr);
188
189         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
190                 return "null";
191         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
192                 return "plain";
193         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
194                 return "gssnull";
195         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
196                 return "krb5n";
197         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
198                 return "krb5a";
199         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
200                 return "krb5i";
201         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
202                 return "krb5p";
203         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
204                 return "skn";
205         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
206                 return "ska";
207         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
208                 return "ski";
209         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
210                 return "skpi";
211
212         CERROR("invalid wire flavor 0x%x\n", flvr);
213         return "invalid";
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
216
217 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
218                                char *buf, int bufsize)
219 {
220         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
221                 snprintf(buf, bufsize, "hash:%s",
222                         sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
223         else
224                 snprintf(buf, bufsize, "%s",
225                         sptlrpc_flavor2name_base(sf->sf_rpc));
226
227         buf[bufsize - 1] = '\0';
228         return buf;
229 }
230 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
231
232 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
233 {
234         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
235
236         /*
237          * currently we don't support customized bulk specification for
238          * flavors other than plain
239          */
240         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
241                 char bspec[16];
242
243                 bspec[0] = '-';
244                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
245                 strncat(buf, bspec, bufsize);
246         }
247
248         buf[bufsize - 1] = '\0';
249         return buf;
250 }
251 EXPORT_SYMBOL(sptlrpc_flavor2name);
252
253 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
254 {
255         buf[0] = '\0';
256
257         if (flags & PTLRPC_SEC_FL_REVERSE)
258                 strlcat(buf, "reverse,", bufsize);
259         if (flags & PTLRPC_SEC_FL_ROOTONLY)
260                 strlcat(buf, "rootonly,", bufsize);
261         if (flags & PTLRPC_SEC_FL_UDESC)
262                 strlcat(buf, "udesc,", bufsize);
263         if (flags & PTLRPC_SEC_FL_BULK)
264                 strlcat(buf, "bulk,", bufsize);
265         if (buf[0] == '\0')
266                 strlcat(buf, "-,", bufsize);
267
268         return buf;
269 }
270 EXPORT_SYMBOL(sptlrpc_secflags2str);
271
272 /*
273  * client context APIs
274  */
275
276 static
277 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
278 {
279         struct vfs_cred vcred;
280         int create = 1, remove_dead = 1;
281
282         LASSERT(sec);
283         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
284
285         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
286                                      PTLRPC_SEC_FL_ROOTONLY)) {
287                 vcred.vc_uid = 0;
288                 vcred.vc_gid = 0;
289                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
290                         create = 0;
291                         remove_dead = 0;
292                 }
293         } else {
294                 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
295                 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
296         }
297
298         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
299                                                    remove_dead);
300 }
301
302 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
303 {
304         atomic_inc(&ctx->cc_refcount);
305         return ctx;
306 }
307 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
308
309 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
310 {
311         struct ptlrpc_sec *sec = ctx->cc_sec;
312
313         LASSERT(sec);
314         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
315
316         if (!atomic_dec_and_test(&ctx->cc_refcount))
317                 return;
318
319         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
320 }
321 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
322
323 /**
324  * Expire the client context immediately.
325  *
326  * \pre Caller must hold at least 1 reference on the \a ctx.
327  */
328 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
329 {
330         LASSERT(ctx->cc_ops->die);
331         ctx->cc_ops->die(ctx, 0);
332 }
333 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
334
335 /**
336  * To wake up the threads who are waiting for this client context. Called
337  * after some status change happened on \a ctx.
338  */
339 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
340 {
341         struct ptlrpc_request *req, *next;
342
343         spin_lock(&ctx->cc_lock);
344         list_for_each_entry_safe(req, next, &ctx->cc_req_list,
345                                      rq_ctx_chain) {
346                 list_del_init(&req->rq_ctx_chain);
347                 ptlrpc_client_wake_req(req);
348         }
349         spin_unlock(&ctx->cc_lock);
350 }
351 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
352
353 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
354 {
355         LASSERT(ctx->cc_ops);
356
357         if (ctx->cc_ops->display == NULL)
358                 return 0;
359
360         return ctx->cc_ops->display(ctx, buf, bufsize);
361 }
362
363 static int import_sec_check_expire(struct obd_import *imp)
364 {
365         int adapt = 0;
366
367         write_lock(&imp->imp_sec_lock);
368         if (imp->imp_sec_expire &&
369             imp->imp_sec_expire < ktime_get_real_seconds()) {
370                 adapt = 1;
371                 imp->imp_sec_expire = 0;
372         }
373         write_unlock(&imp->imp_sec_lock);
374
375         if (!adapt)
376                 return 0;
377
378         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
379         return sptlrpc_import_sec_adapt(imp, NULL, NULL);
380 }
381
382 /**
383  * Get and validate the client side ptlrpc security facilities from
384  * \a imp. There is a race condition on client reconnect when the import is
385  * being destroyed while there are outstanding client bound requests. In
386  * this case do not output any error messages if import secuity is not
387  * found.
388  *
389  * \param[in] imp obd import associated with client
390  * \param[out] sec client side ptlrpc security
391  *
392  * \retval 0 if security retrieved successfully
393  * \retval -ve errno if there was a problem
394  */
395 static int import_sec_validate_get(struct obd_import *imp,
396                                    struct ptlrpc_sec **sec)
397 {
398         int rc;
399
400         if (unlikely(imp->imp_sec_expire)) {
401                 rc = import_sec_check_expire(imp);
402                 if (rc)
403                         return rc;
404         }
405
406         *sec = sptlrpc_import_sec_ref(imp);
407         if (*sec == NULL) {
408                 /* Only output an error when the import is still active */
409                 if (!test_bit(WORK_STRUCT_PENDING_BIT,
410                               work_data_bits(&imp->imp_zombie_work)))
411                         CERROR("import %p (%s) with no sec\n",
412                                imp, ptlrpc_import_state_name(imp->imp_state));
413                 return -EACCES;
414         }
415
416         if (unlikely((*sec)->ps_dying)) {
417                 CERROR("attempt to use dying sec %p\n", sec);
418                 sptlrpc_sec_put(*sec);
419                 return -EACCES;
420         }
421
422         return 0;
423 }
424
425 /**
426  * Given a \a req, find or allocate an appropriate context for it.
427  * \pre req->rq_cli_ctx == NULL.
428  *
429  * \retval 0 succeed, and req->rq_cli_ctx is set.
430  * \retval -ev error number, and req->rq_cli_ctx == NULL.
431  */
432 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
433 {
434         struct obd_import *imp = req->rq_import;
435         struct ptlrpc_sec *sec;
436         int rc;
437
438         ENTRY;
439
440         LASSERT(!req->rq_cli_ctx);
441         LASSERT(imp);
442
443         rc = import_sec_validate_get(imp, &sec);
444         if (rc)
445                 RETURN(rc);
446
447         req->rq_cli_ctx = get_my_ctx(sec);
448
449         sptlrpc_sec_put(sec);
450
451         if (!req->rq_cli_ctx) {
452                 CERROR("req %p: fail to get context\n", req);
453                 RETURN(-ECONNREFUSED);
454         }
455
456         RETURN(0);
457 }
458
459 /**
460  * Drop the context for \a req.
461  * \pre req->rq_cli_ctx != NULL.
462  * \post req->rq_cli_ctx == NULL.
463  *
464  * If \a sync == 0, this function should return quickly without sleep;
465  * otherwise it might trigger and wait for the whole process of sending
466  * an context-destroying rpc to server.
467  */
468 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
469 {
470         ENTRY;
471
472         LASSERT(req);
473         LASSERT(req->rq_cli_ctx);
474
475         /*
476          * request might be asked to release earlier while still
477          * in the context waiting list.
478          */
479         if (!list_empty(&req->rq_ctx_chain)) {
480                 spin_lock(&req->rq_cli_ctx->cc_lock);
481                 list_del_init(&req->rq_ctx_chain);
482                 spin_unlock(&req->rq_cli_ctx->cc_lock);
483         }
484
485         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
486         req->rq_cli_ctx = NULL;
487         EXIT;
488 }
489
490 static
491 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
492                            struct ptlrpc_cli_ctx *oldctx,
493                            struct ptlrpc_cli_ctx *newctx)
494 {
495         struct sptlrpc_flavor   old_flvr;
496         char *reqmsg = NULL; /* to workaround old gcc */
497         int reqmsg_size;
498         int rc = 0;
499
500         LASSERT(req->rq_reqmsg);
501         LASSERT(req->rq_reqlen);
502         LASSERT(req->rq_replen);
503
504         CDEBUG(D_SEC,
505                "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
506                req, oldctx, oldctx->cc_vcred.vc_uid,
507                sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
508                sec2target_str(newctx->cc_sec), oldctx->cc_sec,
509                oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
510                newctx->cc_sec->ps_policy->sp_name);
511
512         /* save flavor */
513         old_flvr = req->rq_flvr;
514
515         /* save request message */
516         reqmsg_size = req->rq_reqlen;
517         if (reqmsg_size != 0) {
518                 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
519                 if (reqmsg == NULL)
520                         return -ENOMEM;
521                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
522         }
523
524         /* release old req/rep buf */
525         req->rq_cli_ctx = oldctx;
526         sptlrpc_cli_free_reqbuf(req);
527         sptlrpc_cli_free_repbuf(req);
528         req->rq_cli_ctx = newctx;
529
530         /* recalculate the flavor */
531         sptlrpc_req_set_flavor(req, 0);
532
533         /*
534          * alloc new request buffer
535          * we don't need to alloc reply buffer here, leave it to the
536          * rest procedure of ptlrpc
537          */
538         if (reqmsg_size != 0) {
539                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
540                 if (!rc) {
541                         LASSERT(req->rq_reqmsg);
542                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
543                 } else {
544                         CWARN("failed to alloc reqbuf: %d\n", rc);
545                         req->rq_flvr = old_flvr;
546                 }
547
548                 OBD_FREE_LARGE(reqmsg, reqmsg_size);
549         }
550         return rc;
551 }
552
553 /**
554  * If current context of \a req is dead somehow, e.g. we just switched flavor
555  * thus marked original contexts dead, we'll find a new context for it. if
556  * no switch is needed, \a req will end up with the same context.
557  *
558  * \note a request must have a context, to keep other parts of code happy.
559  * In any case of failure during the switching, we must restore the old one.
560  */
561 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
562 {
563         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
564         struct ptlrpc_cli_ctx *newctx;
565         int rc;
566
567         ENTRY;
568
569         LASSERT(oldctx);
570
571         sptlrpc_cli_ctx_get(oldctx);
572         sptlrpc_req_put_ctx(req, 0);
573
574         rc = sptlrpc_req_get_ctx(req);
575         if (unlikely(rc)) {
576                 LASSERT(!req->rq_cli_ctx);
577
578                 /* restore old ctx */
579                 req->rq_cli_ctx = oldctx;
580                 RETURN(rc);
581         }
582
583         newctx = req->rq_cli_ctx;
584         LASSERT(newctx);
585
586         if (unlikely(newctx == oldctx &&
587                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
588                 /*
589                  * still get the old dead ctx, usually means system too busy
590                  */
591                 CDEBUG(D_SEC,
592                        "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
593                        newctx, newctx->cc_flags);
594
595                 schedule_timeout_interruptible(cfs_time_seconds(1));
596         } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
597                             == 0)) {
598                 /*
599                  * new ctx not up to date yet
600                  */
601                 CDEBUG(D_SEC,
602                        "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
603                        newctx, newctx->cc_flags);
604         } else {
605                 /*
606                  * it's possible newctx == oldctx if we're switching
607                  * subflavor with the same sec.
608                  */
609                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
610                 if (rc) {
611                         /* restore old ctx */
612                         sptlrpc_req_put_ctx(req, 0);
613                         req->rq_cli_ctx = oldctx;
614                         RETURN(rc);
615                 }
616
617                 LASSERT(req->rq_cli_ctx == newctx);
618         }
619
620         sptlrpc_cli_ctx_put(oldctx, 1);
621         RETURN(0);
622 }
623 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
624
625 static
626 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
627 {
628         if (cli_ctx_is_refreshed(ctx))
629                 return 1;
630         return 0;
631 }
632
633 static
634 void ctx_refresh_interrupt(struct ptlrpc_request *req)
635 {
636
637         spin_lock(&req->rq_lock);
638         req->rq_intr = 1;
639         spin_unlock(&req->rq_lock);
640 }
641
642 static
643 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
644 {
645         spin_lock(&ctx->cc_lock);
646         if (!list_empty(&req->rq_ctx_chain))
647                 list_del_init(&req->rq_ctx_chain);
648         spin_unlock(&ctx->cc_lock);
649 }
650
651 /**
652  * To refresh the context of \req, if it's not up-to-date.
653  * \param timeout
654  * - == 0: do not wait
655  * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
656  * - > 0: not supported
657  *
658  * The status of the context could be subject to be changed by other threads
659  * at any time. We allow this race, but once we return with 0, the caller will
660  * suppose it's uptodated and keep using it until the owning rpc is done.
661  *
662  * \retval 0 only if the context is uptodated.
663  * \retval -ev error number.
664  */
665 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
666 {
667         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
668         struct ptlrpc_sec *sec;
669         int rc;
670
671         ENTRY;
672
673         LASSERT(ctx);
674
675         if (req->rq_ctx_init || req->rq_ctx_fini)
676                 RETURN(0);
677
678         if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
679                 CERROR("req %p: invalid timeout %lu\n", req, timeout);
680                 RETURN(-EINVAL);
681         }
682
683         /*
684          * during the process a request's context might change type even
685          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
686          * everything
687          */
688 again:
689         rc = import_sec_validate_get(req->rq_import, &sec);
690         if (rc)
691                 RETURN(rc);
692
693         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
694                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
695                        req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
696                 req_off_ctx_list(req, ctx);
697                 sptlrpc_req_replace_dead_ctx(req);
698                 ctx = req->rq_cli_ctx;
699         }
700         sptlrpc_sec_put(sec);
701
702         if (cli_ctx_is_eternal(ctx))
703                 RETURN(0);
704
705         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
706                 if (ctx->cc_ops->refresh)
707                         ctx->cc_ops->refresh(ctx);
708         }
709         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
710
711         LASSERT(ctx->cc_ops->validate);
712         if (ctx->cc_ops->validate(ctx) == 0) {
713                 req_off_ctx_list(req, ctx);
714                 RETURN(0);
715         }
716
717         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
718                 spin_lock(&req->rq_lock);
719                 req->rq_err = 1;
720                 spin_unlock(&req->rq_lock);
721                 req_off_ctx_list(req, ctx);
722                 RETURN(-EPERM);
723         }
724
725         /*
726          * There's a subtle issue for resending RPCs, suppose following
727          * situation:
728          *  1. the request was sent to server.
729          *  2. recovery was kicked start, after finished the request was
730          *     marked as resent.
731          *  3. resend the request.
732          *  4. old reply from server received, we accept and verify the reply.
733          *     this has to be success, otherwise the error will be aware
734          *     by application.
735          *  5. new reply from server received, dropped by LNet.
736          *
737          * Note the xid of old & new request is the same. We can't simply
738          * change xid for the resent request because the server replies on
739          * it for reply reconstruction.
740          *
741          * Commonly the original context should be uptodate because we
742          * have an expiry nice time; server will keep its context because
743          * we at least hold a ref of old context which prevent context
744          * from destroying RPC being sent. So server still can accept the
745          * request and finish the RPC. But if that's not the case:
746          *  1. If server side context has been trimmed, a NO_CONTEXT will
747          *     be returned, gss_cli_ctx_verify/unseal will switch to new
748          *     context by force.
749          *  2. Current context never be refreshed, then we are fine: we
750          *     never really send request with old context before.
751          */
752         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
753             unlikely(req->rq_reqmsg) &&
754             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
755                 req_off_ctx_list(req, ctx);
756                 RETURN(0);
757         }
758
759         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
760                 req_off_ctx_list(req, ctx);
761                 /*
762                  * don't switch ctx if import was deactivated
763                  */
764                 if (req->rq_import->imp_deactive) {
765                         spin_lock(&req->rq_lock);
766                         req->rq_err = 1;
767                         spin_unlock(&req->rq_lock);
768                         RETURN(-EINTR);
769                 }
770
771                 rc = sptlrpc_req_replace_dead_ctx(req);
772                 if (rc) {
773                         LASSERT(ctx == req->rq_cli_ctx);
774                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
775                                req, ctx, rc);
776                         spin_lock(&req->rq_lock);
777                         req->rq_err = 1;
778                         spin_unlock(&req->rq_lock);
779                         RETURN(rc);
780                 }
781
782                 ctx = req->rq_cli_ctx;
783                 goto again;
784         }
785
786         /*
787          * Now we're sure this context is during upcall, add myself into
788          * waiting list
789          */
790         spin_lock(&ctx->cc_lock);
791         if (list_empty(&req->rq_ctx_chain))
792                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
793         spin_unlock(&ctx->cc_lock);
794
795         if (timeout == 0)
796                 RETURN(-EWOULDBLOCK);
797
798         /* Clear any flags that may be present from previous sends */
799         LASSERT(req->rq_receiving_reply == 0);
800         spin_lock(&req->rq_lock);
801         req->rq_err = 0;
802         req->rq_timedout = 0;
803         req->rq_resend = 0;
804         req->rq_restart = 0;
805         spin_unlock(&req->rq_lock);
806
807         /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
808          * so wait indefinitely with non-fatal signals blocked
809          */
810         if (l_wait_event_abortable(req->rq_reply_waitq,
811                                    ctx_check_refresh(ctx)) == -ERESTARTSYS) {
812                 rc = -EINTR;
813                 ctx_refresh_interrupt(req);
814         }
815
816         /*
817          * following cases could lead us here:
818          * - successfully refreshed;
819          * - interrupted;
820          * - timedout, and we don't want recover from the failure;
821          * - timedout, and waked up upon recovery finished;
822          * - someone else mark this ctx dead by force;
823          * - someone invalidate the req and call ptlrpc_client_wake_req(),
824          *   e.g. ptlrpc_abort_inflight();
825          */
826         if (!cli_ctx_is_refreshed(ctx)) {
827                 /* timed out or interruptted */
828                 req_off_ctx_list(req, ctx);
829
830                 LASSERT(rc != 0);
831                 RETURN(rc);
832         }
833
834         goto again;
835 }
836
837 /* Bring ptlrpc_sec context up-to-date */
838 int sptlrpc_export_update_ctx(struct obd_export *exp)
839 {
840         struct obd_import *imp = exp ? exp->exp_imp_reverse : NULL;
841         struct ptlrpc_sec *sec = NULL;
842         struct ptlrpc_cli_ctx *ctx = NULL;
843         int rc = 0;
844
845         if (imp)
846                 sec = sptlrpc_import_sec_ref(imp);
847         if (sec) {
848                 ctx = get_my_ctx(sec);
849                 sptlrpc_sec_put(sec);
850         }
851
852         if (ctx) {
853                 if (ctx->cc_ops->refresh)
854                         rc = ctx->cc_ops->refresh(ctx);
855                 sptlrpc_cli_ctx_put(ctx, 1);
856         }
857         return rc;
858 }
859
860 /**
861  * Initialize flavor settings for \a req, according to \a opcode.
862  *
863  * \note this could be called in two situations:
864  * - new request from ptlrpc_pre_req(), with proper @opcode
865  * - old request which changed ctx in the middle, with @opcode == 0
866  */
867 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
868 {
869         struct ptlrpc_sec *sec;
870
871         LASSERT(req->rq_import);
872         LASSERT(req->rq_cli_ctx);
873         LASSERT(req->rq_cli_ctx->cc_sec);
874         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
875
876         /* special security flags according to opcode */
877         switch (opcode) {
878         case OST_READ:
879         case MDS_READPAGE:
880         case MGS_CONFIG_READ:
881         case OBD_IDX_READ:
882                 req->rq_bulk_read = 1;
883                 break;
884         case OST_WRITE:
885         case MDS_WRITEPAGE:
886                 req->rq_bulk_write = 1;
887                 break;
888         case SEC_CTX_INIT:
889                 req->rq_ctx_init = 1;
890                 break;
891         case SEC_CTX_FINI:
892                 req->rq_ctx_fini = 1;
893                 break;
894         case 0:
895                 /* init/fini rpc won't be resend, so can't be here */
896                 LASSERT(req->rq_ctx_init == 0);
897                 LASSERT(req->rq_ctx_fini == 0);
898
899                 /* cleanup flags, which should be recalculated */
900                 req->rq_pack_udesc = 0;
901                 req->rq_pack_bulk = 0;
902                 break;
903         }
904
905         sec = req->rq_cli_ctx->cc_sec;
906
907         spin_lock(&sec->ps_lock);
908         req->rq_flvr = sec->ps_flvr;
909         spin_unlock(&sec->ps_lock);
910
911         /*
912          * force SVC_NULL for context initiation rpc, SVC_INTG for context
913          * destruction rpc
914          */
915         if (unlikely(req->rq_ctx_init))
916                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
917         else if (unlikely(req->rq_ctx_fini))
918                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
919
920         /* user descriptor flag, null security can't do it anyway */
921         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
922             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
923                 req->rq_pack_udesc = 1;
924
925         /* bulk security flag */
926         if ((req->rq_bulk_read || req->rq_bulk_write) &&
927             sptlrpc_flavor_has_bulk(&req->rq_flvr))
928                 req->rq_pack_bulk = 1;
929 }
930
931 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
932 {
933         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
934                 return;
935
936         LASSERT(req->rq_clrbuf);
937         if (req->rq_pool || !req->rq_reqbuf)
938                 return;
939
940         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
941         req->rq_reqbuf = NULL;
942         req->rq_reqbuf_len = 0;
943 }
944
945 /**
946  * Given an import \a imp, check whether current user has a valid context
947  * or not. We may create a new context and try to refresh it, and try
948  * repeatedly try in case of non-fatal errors. Return 0 means success.
949  */
950 int sptlrpc_import_check_ctx(struct obd_import *imp)
951 {
952         struct ptlrpc_sec     *sec;
953         struct ptlrpc_cli_ctx *ctx;
954         struct ptlrpc_request *req = NULL;
955         int rc;
956
957         ENTRY;
958
959         might_sleep();
960
961         sec = sptlrpc_import_sec_ref(imp);
962         ctx = get_my_ctx(sec);
963         sptlrpc_sec_put(sec);
964
965         if (!ctx)
966                 RETURN(-ENOMEM);
967
968         if (cli_ctx_is_eternal(ctx) ||
969             ctx->cc_ops->validate(ctx) == 0) {
970                 sptlrpc_cli_ctx_put(ctx, 1);
971                 RETURN(0);
972         }
973
974         if (cli_ctx_is_error(ctx)) {
975                 sptlrpc_cli_ctx_put(ctx, 1);
976                 RETURN(-EACCES);
977         }
978
979         req = ptlrpc_request_cache_alloc(GFP_NOFS);
980         if (!req)
981                 RETURN(-ENOMEM);
982
983         ptlrpc_cli_req_init(req);
984         atomic_set(&req->rq_refcount, 10000);
985
986         req->rq_import = imp;
987         req->rq_flvr = sec->ps_flvr;
988         req->rq_cli_ctx = ctx;
989
990         rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
991         LASSERT(list_empty(&req->rq_ctx_chain));
992         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
993         ptlrpc_request_cache_free(req);
994
995         RETURN(rc);
996 }
997
998 /**
999  * Used by ptlrpc client, to perform the pre-defined security transformation
1000  * upon the request message of \a req. After this function called,
1001  * req->rq_reqmsg is still accessible as clear text.
1002  */
1003 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
1004 {
1005         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1006         int rc = 0;
1007
1008         ENTRY;
1009
1010         LASSERT(ctx);
1011         LASSERT(ctx->cc_sec);
1012         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1013
1014         /*
1015          * we wrap bulk request here because now we can be sure
1016          * the context is uptodate.
1017          */
1018         if (req->rq_bulk) {
1019                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1020                 if (rc)
1021                         RETURN(rc);
1022         }
1023
1024         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1025         case SPTLRPC_SVC_NULL:
1026         case SPTLRPC_SVC_AUTH:
1027         case SPTLRPC_SVC_INTG:
1028                 LASSERT(ctx->cc_ops->sign);
1029                 rc = ctx->cc_ops->sign(ctx, req);
1030                 break;
1031         case SPTLRPC_SVC_PRIV:
1032                 LASSERT(ctx->cc_ops->seal);
1033                 rc = ctx->cc_ops->seal(ctx, req);
1034                 break;
1035         default:
1036                 LBUG();
1037         }
1038
1039         if (rc == 0) {
1040                 LASSERT(req->rq_reqdata_len);
1041                 LASSERT(req->rq_reqdata_len % 8 == 0);
1042                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1043         }
1044
1045         RETURN(rc);
1046 }
1047
1048 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1049 {
1050         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1051         int rc;
1052
1053         ENTRY;
1054
1055         LASSERT(ctx);
1056         LASSERT(ctx->cc_sec);
1057         LASSERT(req->rq_repbuf);
1058         LASSERT(req->rq_repdata);
1059         LASSERT(req->rq_repmsg == NULL);
1060
1061         req->rq_rep_swab_mask = 0;
1062
1063         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1064         switch (rc) {
1065         case 1:
1066                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1067         case 0:
1068                 break;
1069         default:
1070                 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1071                 RETURN(-EPROTO);
1072         }
1073
1074         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1075                 CERROR("replied data length %d too small\n",
1076                        req->rq_repdata_len);
1077                 RETURN(-EPROTO);
1078         }
1079
1080         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1081             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1082                 CERROR("reply policy %u doesn't match request policy %u\n",
1083                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1084                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1085                 RETURN(-EPROTO);
1086         }
1087
1088         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1089         case SPTLRPC_SVC_NULL:
1090         case SPTLRPC_SVC_AUTH:
1091         case SPTLRPC_SVC_INTG:
1092                 LASSERT(ctx->cc_ops->verify);
1093                 rc = ctx->cc_ops->verify(ctx, req);
1094                 break;
1095         case SPTLRPC_SVC_PRIV:
1096                 LASSERT(ctx->cc_ops->unseal);
1097                 rc = ctx->cc_ops->unseal(ctx, req);
1098                 break;
1099         default:
1100                 LBUG();
1101         }
1102         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1103
1104         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1105             !req->rq_ctx_init)
1106                 req->rq_rep_swab_mask = 0;
1107         RETURN(rc);
1108 }
1109
1110 /**
1111  * Used by ptlrpc client, to perform security transformation upon the reply
1112  * message of \a req. After return successfully, req->rq_repmsg points to
1113  * the reply message in clear text.
1114  *
1115  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1116  * going to change.
1117  */
1118 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1119 {
1120         LASSERT(req->rq_repbuf);
1121         LASSERT(req->rq_repdata == NULL);
1122         LASSERT(req->rq_repmsg == NULL);
1123         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1124
1125         if (req->rq_reply_off == 0 &&
1126             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1127                 CERROR("real reply with offset 0\n");
1128                 return -EPROTO;
1129         }
1130
1131         if (req->rq_reply_off % 8 != 0) {
1132                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1133                 return -EPROTO;
1134         }
1135
1136         req->rq_repdata = (struct lustre_msg *)
1137                                 (req->rq_repbuf + req->rq_reply_off);
1138         req->rq_repdata_len = req->rq_nob_received;
1139
1140         return do_cli_unwrap_reply(req);
1141 }
1142
1143 /**
1144  * Used by ptlrpc client, to perform security transformation upon the early
1145  * reply message of \a req. We expect the rq_reply_off is 0, and
1146  * rq_nob_received is the early reply size.
1147  *
1148  * Because the receive buffer might be still posted, the reply data might be
1149  * changed at any time, no matter we're holding rq_lock or not. For this reason
1150  * we allocate a separate ptlrpc_request and reply buffer for early reply
1151  * processing.
1152  *
1153  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1154  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1155  * \a *req_ret to release it.
1156  * \retval -ev error number, and \a req_ret will not be set.
1157  */
1158 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1159                                    struct ptlrpc_request **req_ret)
1160 {
1161         struct ptlrpc_request *early_req;
1162         char *early_buf;
1163         int early_bufsz, early_size;
1164         int rc;
1165
1166         ENTRY;
1167
1168         early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1169         if (early_req == NULL)
1170                 RETURN(-ENOMEM);
1171
1172         ptlrpc_cli_req_init(early_req);
1173
1174         early_size = req->rq_nob_received;
1175         early_bufsz = size_roundup_power2(early_size);
1176         OBD_ALLOC_LARGE(early_buf, early_bufsz);
1177         if (early_buf == NULL)
1178                 GOTO(err_req, rc = -ENOMEM);
1179
1180         /* sanity checkings and copy data out, do it inside spinlock */
1181         spin_lock(&req->rq_lock);
1182
1183         if (req->rq_replied) {
1184                 spin_unlock(&req->rq_lock);
1185                 GOTO(err_buf, rc = -EALREADY);
1186         }
1187
1188         LASSERT(req->rq_repbuf);
1189         LASSERT(req->rq_repdata == NULL);
1190         LASSERT(req->rq_repmsg == NULL);
1191
1192         if (req->rq_reply_off != 0) {
1193                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1194                 spin_unlock(&req->rq_lock);
1195                 GOTO(err_buf, rc = -EPROTO);
1196         }
1197
1198         if (req->rq_nob_received != early_size) {
1199                 /* even another early arrived the size should be the same */
1200                 CERROR("data size has changed from %u to %u\n",
1201                        early_size, req->rq_nob_received);
1202                 spin_unlock(&req->rq_lock);
1203                 GOTO(err_buf, rc = -EINVAL);
1204         }
1205
1206         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1207                 CERROR("early reply length %d too small\n",
1208                        req->rq_nob_received);
1209                 spin_unlock(&req->rq_lock);
1210                 GOTO(err_buf, rc = -EALREADY);
1211         }
1212
1213         memcpy(early_buf, req->rq_repbuf, early_size);
1214         spin_unlock(&req->rq_lock);
1215
1216         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1217         early_req->rq_flvr = req->rq_flvr;
1218         early_req->rq_repbuf = early_buf;
1219         early_req->rq_repbuf_len = early_bufsz;
1220         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1221         early_req->rq_repdata_len = early_size;
1222         early_req->rq_early = 1;
1223         early_req->rq_reqmsg = req->rq_reqmsg;
1224
1225         rc = do_cli_unwrap_reply(early_req);
1226         if (rc) {
1227                 DEBUG_REQ(D_ADAPTTO, early_req,
1228                           "unwrap early reply: rc = %d", rc);
1229                 GOTO(err_ctx, rc);
1230         }
1231
1232         LASSERT(early_req->rq_repmsg);
1233         *req_ret = early_req;
1234         RETURN(0);
1235
1236 err_ctx:
1237         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1238 err_buf:
1239         OBD_FREE_LARGE(early_buf, early_bufsz);
1240 err_req:
1241         ptlrpc_request_cache_free(early_req);
1242         RETURN(rc);
1243 }
1244
1245 /**
1246  * Used by ptlrpc client, to release a processed early reply \a early_req.
1247  *
1248  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1249  */
1250 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1251 {
1252         LASSERT(early_req->rq_repbuf);
1253         LASSERT(early_req->rq_repdata);
1254         LASSERT(early_req->rq_repmsg);
1255
1256         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1257         OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1258         ptlrpc_request_cache_free(early_req);
1259 }
1260
1261 /**************************************************
1262  * sec ID                                         *
1263  **************************************************/
1264
1265 /*
1266  * "fixed" sec (e.g. null) use sec_id < 0
1267  */
1268 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1269
1270 int sptlrpc_get_next_secid(void)
1271 {
1272         return atomic_inc_return(&sptlrpc_sec_id);
1273 }
1274 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1275
1276 /*
1277  * client side high-level security APIs
1278  */
1279
1280 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1281                                    int grace, int force)
1282 {
1283         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1284
1285         LASSERT(policy->sp_cops);
1286         LASSERT(policy->sp_cops->flush_ctx_cache);
1287
1288         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1289 }
1290
1291 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1292 {
1293         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1294
1295         LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1296         LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1297         LASSERT(policy->sp_cops->destroy_sec);
1298
1299         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1300
1301         policy->sp_cops->destroy_sec(sec);
1302         sptlrpc_policy_put(policy);
1303 }
1304
1305 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1306 {
1307         sec_cop_destroy_sec(sec);
1308 }
1309 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1310
1311 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1312 {
1313         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1314
1315         if (sec->ps_policy->sp_cops->kill_sec) {
1316                 sec->ps_policy->sp_cops->kill_sec(sec);
1317
1318                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1319         }
1320 }
1321
1322 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1323 {
1324         if (sec)
1325                 atomic_inc(&sec->ps_refcount);
1326
1327         return sec;
1328 }
1329 EXPORT_SYMBOL(sptlrpc_sec_get);
1330
1331 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1332 {
1333         if (sec) {
1334                 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1335
1336                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1337                         sptlrpc_gc_del_sec(sec);
1338                         sec_cop_destroy_sec(sec);
1339                 }
1340         }
1341 }
1342 EXPORT_SYMBOL(sptlrpc_sec_put);
1343
1344 /*
1345  * policy module is responsible for taking refrence of import
1346  */
1347 static
1348 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1349                                        struct ptlrpc_svc_ctx *svc_ctx,
1350                                        struct sptlrpc_flavor *sf,
1351                                        enum lustre_sec_part sp)
1352 {
1353         struct ptlrpc_sec_policy *policy;
1354         struct ptlrpc_sec *sec;
1355         char str[32];
1356
1357         ENTRY;
1358
1359         if (svc_ctx) {
1360                 LASSERT(imp->imp_dlm_fake == 1);
1361
1362                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1363                        imp->imp_obd->obd_type->typ_name,
1364                        imp->imp_obd->obd_name,
1365                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1366
1367                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1368                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1369         } else {
1370                 LASSERT(imp->imp_dlm_fake == 0);
1371
1372                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1373                        imp->imp_obd->obd_type->typ_name,
1374                        imp->imp_obd->obd_name,
1375                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1376
1377                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1378                 if (!policy) {
1379                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1380                         RETURN(NULL);
1381                 }
1382         }
1383
1384         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1385         if (sec) {
1386                 atomic_inc(&sec->ps_refcount);
1387
1388                 sec->ps_part = sp;
1389
1390                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1391                         sptlrpc_gc_add_sec(sec);
1392         } else {
1393                 sptlrpc_policy_put(policy);
1394         }
1395
1396         RETURN(sec);
1397 }
1398
1399 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1400 {
1401         struct ptlrpc_sec *sec;
1402
1403         read_lock(&imp->imp_sec_lock);
1404         sec = sptlrpc_sec_get(imp->imp_sec);
1405         read_unlock(&imp->imp_sec_lock);
1406
1407         return sec;
1408 }
1409 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1410
1411 static void sptlrpc_import_sec_install(struct obd_import *imp,
1412                                        struct ptlrpc_sec *sec)
1413 {
1414         struct ptlrpc_sec *old_sec;
1415
1416         LASSERT_ATOMIC_POS(&sec->ps_refcount);
1417
1418         write_lock(&imp->imp_sec_lock);
1419         old_sec = imp->imp_sec;
1420         imp->imp_sec = sec;
1421         write_unlock(&imp->imp_sec_lock);
1422
1423         if (old_sec) {
1424                 sptlrpc_sec_kill(old_sec);
1425
1426                 /* balance the ref taken by this import */
1427                 sptlrpc_sec_put(old_sec);
1428         }
1429 }
1430
1431 static inline
1432 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1433 {
1434         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1435 }
1436
1437 static inline
1438 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1439 {
1440         *dst = *src;
1441 }
1442
1443 /**
1444  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1445  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1446  *
1447  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1448  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1449  */
1450 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1451                              struct ptlrpc_svc_ctx *svc_ctx,
1452                              struct sptlrpc_flavor *flvr)
1453 {
1454         struct ptlrpc_connection *conn;
1455         struct sptlrpc_flavor sf;
1456         struct ptlrpc_sec *sec, *newsec;
1457         enum lustre_sec_part sp;
1458         char str[24];
1459         int rc = 0;
1460
1461         ENTRY;
1462
1463         might_sleep();
1464
1465         if (imp == NULL)
1466                 RETURN(0);
1467
1468         conn = imp->imp_connection;
1469
1470         if (svc_ctx == NULL) {
1471                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1472                 /*
1473                  * normal import, determine flavor from rule set, except
1474                  * for mgc the flavor is predetermined.
1475                  */
1476                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1477                         sf = cliobd->cl_flvr_mgc;
1478                 else
1479                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1480                                                    cliobd->cl_sp_to,
1481                                                    &cliobd->cl_target_uuid,
1482                                                    conn->c_self, &sf);
1483
1484                 sp = imp->imp_obd->u.cli.cl_sp_me;
1485         } else {
1486                 /* reverse import, determine flavor from incoming reqeust */
1487                 sf = *flvr;
1488
1489                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1490                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1491                                       PTLRPC_SEC_FL_ROOTONLY;
1492
1493                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1494         }
1495
1496         sec = sptlrpc_import_sec_ref(imp);
1497         if (sec) {
1498                 char str2[24];
1499
1500                 if (flavor_equal(&sf, &sec->ps_flvr))
1501                         GOTO(out, rc);
1502
1503                 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1504                        imp->imp_obd->obd_name,
1505                        obd_uuid2str(&conn->c_remote_uuid),
1506                        sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1507                        sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1508         } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1509                    SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1510                 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1511                        imp->imp_obd->obd_name,
1512                        obd_uuid2str(&conn->c_remote_uuid),
1513                        LNET_NIDNET(conn->c_self),
1514                        sptlrpc_flavor2name(&sf, str, sizeof(str)));
1515         }
1516
1517         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1518         if (newsec) {
1519                 sptlrpc_import_sec_install(imp, newsec);
1520         } else {
1521                 CERROR("import %s->%s: failed to create new sec\n",
1522                        imp->imp_obd->obd_name,
1523                        obd_uuid2str(&conn->c_remote_uuid));
1524                 rc = -EPERM;
1525         }
1526
1527 out:
1528         sptlrpc_sec_put(sec);
1529         RETURN(rc);
1530 }
1531
1532 void sptlrpc_import_sec_put(struct obd_import *imp)
1533 {
1534         if (imp->imp_sec) {
1535                 sptlrpc_sec_kill(imp->imp_sec);
1536
1537                 sptlrpc_sec_put(imp->imp_sec);
1538                 imp->imp_sec = NULL;
1539         }
1540 }
1541
1542 static void import_flush_ctx_common(struct obd_import *imp,
1543                                     uid_t uid, int grace, int force)
1544 {
1545         struct ptlrpc_sec *sec;
1546
1547         if (imp == NULL)
1548                 return;
1549
1550         sec = sptlrpc_import_sec_ref(imp);
1551         if (sec == NULL)
1552                 return;
1553
1554         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1555         sptlrpc_sec_put(sec);
1556 }
1557
1558 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1559 {
1560         /*
1561          * it's important to use grace mode, see explain in
1562          * sptlrpc_req_refresh_ctx()
1563          */
1564         import_flush_ctx_common(imp, 0, 1, 1);
1565 }
1566
1567 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1568 {
1569         import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1570                                 1, 1);
1571 }
1572 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1573
1574 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1575 {
1576         import_flush_ctx_common(imp, -1, 1, 1);
1577 }
1578 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1579
1580 /**
1581  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1582  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1583  */
1584 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1585 {
1586         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1587         struct ptlrpc_sec_policy *policy;
1588         int rc;
1589
1590         LASSERT(ctx);
1591         LASSERT(ctx->cc_sec);
1592         LASSERT(ctx->cc_sec->ps_policy);
1593         LASSERT(req->rq_reqmsg == NULL);
1594         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1595
1596         policy = ctx->cc_sec->ps_policy;
1597         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1598         if (!rc) {
1599                 LASSERT(req->rq_reqmsg);
1600                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1601
1602                 /* zeroing preallocated buffer */
1603                 if (req->rq_pool)
1604                         memset(req->rq_reqmsg, 0, msgsize);
1605         }
1606
1607         return rc;
1608 }
1609
1610 /**
1611  * Used by ptlrpc client to free request buffer of \a req. After this
1612  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1613  */
1614 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1615 {
1616         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1617         struct ptlrpc_sec_policy *policy;
1618
1619         LASSERT(ctx);
1620         LASSERT(ctx->cc_sec);
1621         LASSERT(ctx->cc_sec->ps_policy);
1622         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1623
1624         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1625                 return;
1626
1627         policy = ctx->cc_sec->ps_policy;
1628         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1629         req->rq_reqmsg = NULL;
1630 }
1631
1632 /*
1633  * NOTE caller must guarantee the buffer size is enough for the enlargement
1634  */
1635 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1636                                   int segment, int newsize)
1637 {
1638         void *src, *dst;
1639         int oldsize, oldmsg_size, movesize;
1640
1641         LASSERT(segment < msg->lm_bufcount);
1642         LASSERT(msg->lm_buflens[segment] <= newsize);
1643
1644         if (msg->lm_buflens[segment] == newsize)
1645                 return;
1646
1647         /* nothing to do if we are enlarging the last segment */
1648         if (segment == msg->lm_bufcount - 1) {
1649                 msg->lm_buflens[segment] = newsize;
1650                 return;
1651         }
1652
1653         oldsize = msg->lm_buflens[segment];
1654
1655         src = lustre_msg_buf(msg, segment + 1, 0);
1656         msg->lm_buflens[segment] = newsize;
1657         dst = lustre_msg_buf(msg, segment + 1, 0);
1658         msg->lm_buflens[segment] = oldsize;
1659
1660         /* move from segment + 1 to end segment */
1661         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1662         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1663         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1664         LASSERT(movesize >= 0);
1665
1666         if (movesize)
1667                 memmove(dst, src, movesize);
1668
1669         /* note we don't clear the ares where old data live, not secret */
1670
1671         /* finally set new segment size */
1672         msg->lm_buflens[segment] = newsize;
1673 }
1674 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1675
1676 /**
1677  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1678  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1679  * preserved after the enlargement. this must be called after original request
1680  * buffer being allocated.
1681  *
1682  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1683  * so caller should refresh its local pointers if needed.
1684  */
1685 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1686                                const struct req_msg_field *field,
1687                                int newsize)
1688 {
1689         struct req_capsule *pill = &req->rq_pill;
1690         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1691         struct ptlrpc_sec_cops *cops;
1692         struct lustre_msg *msg = req->rq_reqmsg;
1693         int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1694
1695         LASSERT(ctx);
1696         LASSERT(msg);
1697         LASSERT(msg->lm_bufcount > segment);
1698         LASSERT(msg->lm_buflens[segment] <= newsize);
1699
1700         if (msg->lm_buflens[segment] == newsize)
1701                 return 0;
1702
1703         cops = ctx->cc_sec->ps_policy->sp_cops;
1704         LASSERT(cops->enlarge_reqbuf);
1705         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1706 }
1707 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1708
1709 /**
1710  * Used by ptlrpc client to allocate reply buffer of \a req.
1711  *
1712  * \note After this, req->rq_repmsg is still not accessible.
1713  */
1714 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1715 {
1716         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1717         struct ptlrpc_sec_policy *policy;
1718
1719         ENTRY;
1720
1721         LASSERT(ctx);
1722         LASSERT(ctx->cc_sec);
1723         LASSERT(ctx->cc_sec->ps_policy);
1724
1725         if (req->rq_repbuf)
1726                 RETURN(0);
1727
1728         policy = ctx->cc_sec->ps_policy;
1729         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1730 }
1731
1732 /**
1733  * Used by ptlrpc client to free reply buffer of \a req. After this
1734  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1735  */
1736 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1737 {
1738         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1739         struct ptlrpc_sec_policy *policy;
1740
1741         ENTRY;
1742
1743         LASSERT(ctx);
1744         LASSERT(ctx->cc_sec);
1745         LASSERT(ctx->cc_sec->ps_policy);
1746         LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1747
1748         if (req->rq_repbuf == NULL)
1749                 return;
1750         LASSERT(req->rq_repbuf_len);
1751
1752         policy = ctx->cc_sec->ps_policy;
1753         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1754         req->rq_repmsg = NULL;
1755         EXIT;
1756 }
1757 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1758
1759 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1760                                 struct ptlrpc_cli_ctx *ctx)
1761 {
1762         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1763
1764         if (!policy->sp_cops->install_rctx)
1765                 return 0;
1766         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1767 }
1768
1769 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1770                                 struct ptlrpc_svc_ctx *ctx)
1771 {
1772         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1773
1774         if (!policy->sp_sops->install_rctx)
1775                 return 0;
1776         return policy->sp_sops->install_rctx(imp, ctx);
1777 }
1778
1779 /* Get SELinux policy info from userspace */
1780 static int sepol_helper(struct obd_import *imp)
1781 {
1782         char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1783         char *argv[] = {
1784                 [0] = "/usr/sbin/l_getsepol",
1785                 [1] = "-o",
1786                 [2] = NULL,         /* obd type */
1787                 [3] = "-n",
1788                 [4] = NULL,         /* obd name */
1789                 [5] = "-t",
1790                 [6] = mtime_str,    /* policy mtime */
1791                 [7] = "-m",
1792                 [8] = mode_str,     /* enforcing mode */
1793                 [9] = NULL
1794         };
1795         char *envp[] = {
1796                 [0] = "HOME=/",
1797                 [1] = "PATH=/sbin:/usr/sbin",
1798                 [2] = NULL
1799         };
1800         signed short ret;
1801         int rc = 0;
1802
1803         if (imp == NULL || imp->imp_obd == NULL ||
1804             imp->imp_obd->obd_type == NULL) {
1805                 rc = -EINVAL;
1806         } else {
1807                 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1808                 argv[4] = imp->imp_obd->obd_name;
1809                 spin_lock(&imp->imp_sec->ps_lock);
1810                 if (ktime_to_ns(imp->imp_sec->ps_sepol_mtime) == 0 &&
1811                     imp->imp_sec->ps_sepol[0] == '\0') {
1812                         /* ps_sepol has not been initialized */
1813                         argv[5] = NULL;
1814                         argv[7] = NULL;
1815                 } else {
1816                         time64_t mtime_ms;
1817
1818                         mtime_ms = ktime_to_ms(imp->imp_sec->ps_sepol_mtime);
1819                         snprintf(mtime_str, sizeof(mtime_str), "%lld",
1820                                  mtime_ms / MSEC_PER_SEC);
1821                         mode_str[0] = imp->imp_sec->ps_sepol[0];
1822                 }
1823                 spin_unlock(&imp->imp_sec->ps_lock);
1824                 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1825                 rc = ret>>8;
1826         }
1827
1828         return rc;
1829 }
1830
1831 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1832 {
1833         ktime_t checknext;
1834
1835         if (send_sepol == 0 || !selinux_is_enabled())
1836                 return 0;
1837
1838         if (send_sepol == -1)
1839                 /* send_sepol == -1 means fetch sepol status every time */
1840                 return 1;
1841
1842         spin_lock(&imp_sec->ps_lock);
1843         checknext = imp_sec->ps_sepol_checknext;
1844         spin_unlock(&imp_sec->ps_lock);
1845
1846         /* next check is too far in time, please update */
1847         if (ktime_after(checknext,
1848                         ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1849                 goto setnext;
1850
1851         if (ktime_before(ktime_get(), checknext))
1852                 /* too early to fetch sepol status */
1853                 return 0;
1854
1855 setnext:
1856         /* define new sepol_checknext time */
1857         spin_lock(&imp_sec->ps_lock);
1858         imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1859                                                 ktime_set(send_sepol, 0));
1860         spin_unlock(&imp_sec->ps_lock);
1861
1862         return 1;
1863 }
1864
1865 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1866 {
1867         struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1868         int rc = 0;
1869
1870         ENTRY;
1871
1872         (req->rq_sepol)[0] = '\0';
1873
1874 #ifndef HAVE_SELINUX
1875         if (unlikely(send_sepol != 0))
1876                 CDEBUG(D_SEC,
1877                        "Client cannot report SELinux status, it was not built against libselinux.\n");
1878         RETURN(0);
1879 #endif
1880
1881         if (send_sepol == 0 || !selinux_is_enabled())
1882                 RETURN(0);
1883
1884         if (imp_sec == NULL)
1885                 RETURN(-EINVAL);
1886
1887         /* Retrieve SELinux status info */
1888         if (sptlrpc_sepol_needs_check(imp_sec))
1889                 rc = sepol_helper(req->rq_import);
1890         if (likely(rc == 0)) {
1891                 spin_lock(&imp_sec->ps_lock);
1892                 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1893                        sizeof(req->rq_sepol));
1894                 spin_unlock(&imp_sec->ps_lock);
1895         }
1896
1897         RETURN(rc);
1898 }
1899 EXPORT_SYMBOL(sptlrpc_get_sepol);
1900
1901 /*
1902  * server side security
1903  */
1904
1905 static int flavor_allowed(struct sptlrpc_flavor *exp,
1906                           struct ptlrpc_request *req)
1907 {
1908         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1909
1910         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1911                 return 1;
1912
1913         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1914             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1915             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1916             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1917                 return 1;
1918
1919         return 0;
1920 }
1921
1922 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1923
1924 /**
1925  * Given an export \a exp, check whether the flavor of incoming \a req
1926  * is allowed by the export \a exp. Main logic is about taking care of
1927  * changing configurations. Return 0 means success.
1928  */
1929 int sptlrpc_target_export_check(struct obd_export *exp,
1930                                 struct ptlrpc_request *req)
1931 {
1932         struct sptlrpc_flavor   flavor;
1933
1934         if (exp == NULL)
1935                 return 0;
1936
1937         /*
1938          * client side export has no imp_reverse, skip
1939          * FIXME maybe we should check flavor this as well???
1940          */
1941         if (exp->exp_imp_reverse == NULL)
1942                 return 0;
1943
1944         /* don't care about ctx fini rpc */
1945         if (req->rq_ctx_fini)
1946                 return 0;
1947
1948         spin_lock(&exp->exp_lock);
1949
1950         /*
1951          * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1952          * the first req with the new flavor, then treat it as current flavor,
1953          * adapt reverse sec according to it.
1954          * note the first rpc with new flavor might not be with root ctx, in
1955          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1956          */
1957         if (unlikely(exp->exp_flvr_changed) &&
1958             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1959                 /*
1960                  * make the new flavor as "current", and old ones as
1961                  * about-to-expire
1962                  */
1963                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1964                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1965                 flavor = exp->exp_flvr_old[1];
1966                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1967                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1968                 exp->exp_flvr_old[0] = exp->exp_flvr;
1969                 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1970                                           EXP_FLVR_UPDATE_EXPIRE;
1971                 exp->exp_flvr = flavor;
1972
1973                 /* flavor change finished */
1974                 exp->exp_flvr_changed = 0;
1975                 LASSERT(exp->exp_flvr_adapt == 1);
1976
1977                 /* if it's gss, we only interested in root ctx init */
1978                 if (req->rq_auth_gss &&
1979                     !(req->rq_ctx_init &&
1980                     (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1981                     req->rq_auth_usr_ost))) {
1982                         spin_unlock(&exp->exp_lock);
1983                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1984                                req->rq_auth_gss, req->rq_ctx_init,
1985                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1986                                req->rq_auth_usr_ost);
1987                         return 0;
1988                 }
1989
1990                 exp->exp_flvr_adapt = 0;
1991                 spin_unlock(&exp->exp_lock);
1992
1993                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1994                                                 req->rq_svc_ctx, &flavor);
1995         }
1996
1997         /*
1998          * if it equals to the current flavor, we accept it, but need to
1999          * dealing with reverse sec/ctx
2000          */
2001         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
2002                 /*
2003                  * most cases should return here, we only interested in
2004                  * gss root ctx init
2005                  */
2006                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
2007                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2008                      !req->rq_auth_usr_ost)) {
2009                         spin_unlock(&exp->exp_lock);
2010                         return 0;
2011                 }
2012
2013                 /*
2014                  * if flavor just changed, we should not proceed, just leave
2015                  * it and current flavor will be discovered and replaced
2016                  * shortly, and let _this_ rpc pass through
2017                  */
2018                 if (exp->exp_flvr_changed) {
2019                         LASSERT(exp->exp_flvr_adapt);
2020                         spin_unlock(&exp->exp_lock);
2021                         return 0;
2022                 }
2023
2024                 if (exp->exp_flvr_adapt) {
2025                         exp->exp_flvr_adapt = 0;
2026                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2027                                exp, exp->exp_flvr.sf_rpc,
2028                                exp->exp_flvr_old[0].sf_rpc,
2029                                exp->exp_flvr_old[1].sf_rpc);
2030                         flavor = exp->exp_flvr;
2031                         spin_unlock(&exp->exp_lock);
2032
2033                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2034                                                         req->rq_svc_ctx,
2035                                                         &flavor);
2036                 } else {
2037                         CDEBUG(D_SEC,
2038                                "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2039                                exp, exp->exp_flvr.sf_rpc,
2040                                exp->exp_flvr_old[0].sf_rpc,
2041                                exp->exp_flvr_old[1].sf_rpc);
2042                         spin_unlock(&exp->exp_lock);
2043
2044                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2045                                                            req->rq_svc_ctx);
2046                 }
2047         }
2048
2049         if (exp->exp_flvr_expire[0]) {
2050                 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2051                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2052                                 CDEBUG(D_SEC,
2053                                        "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2054                                        exp, exp->exp_flvr.sf_rpc,
2055                                        exp->exp_flvr_old[0].sf_rpc,
2056                                        exp->exp_flvr_old[1].sf_rpc,
2057                                        (s64)(exp->exp_flvr_expire[0] -
2058                                              ktime_get_real_seconds()));
2059                                 spin_unlock(&exp->exp_lock);
2060                                 return 0;
2061                         }
2062                 } else {
2063                         CDEBUG(D_SEC, "mark middle expired\n");
2064                         exp->exp_flvr_expire[0] = 0;
2065                 }
2066                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2067                        exp->exp_flvr.sf_rpc,
2068                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2069                        req->rq_flvr.sf_rpc);
2070         }
2071
2072         /*
2073          * now it doesn't match the current flavor, the only chance we can
2074          * accept it is match the old flavors which is not expired.
2075          */
2076         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2077                 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2078                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2079                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2080                                        exp,
2081                                        exp->exp_flvr.sf_rpc,
2082                                        exp->exp_flvr_old[0].sf_rpc,
2083                                        exp->exp_flvr_old[1].sf_rpc,
2084                                        (s64)(exp->exp_flvr_expire[1] -
2085                                        ktime_get_real_seconds()));
2086                                 spin_unlock(&exp->exp_lock);
2087                                 return 0;
2088                         }
2089                 } else {
2090                         CDEBUG(D_SEC, "mark oldest expired\n");
2091                         exp->exp_flvr_expire[1] = 0;
2092                 }
2093                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2094                        exp, exp->exp_flvr.sf_rpc,
2095                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2096                        req->rq_flvr.sf_rpc);
2097         } else {
2098                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2099                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2100                        exp->exp_flvr_old[1].sf_rpc);
2101         }
2102
2103         spin_unlock(&exp->exp_lock);
2104
2105         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2106               exp, exp->exp_obd->obd_name,
2107               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2108               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2109               req->rq_flvr.sf_rpc,
2110               exp->exp_flvr.sf_rpc,
2111               exp->exp_flvr_old[0].sf_rpc,
2112               exp->exp_flvr_expire[0] ?
2113               (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2114               exp->exp_flvr_old[1].sf_rpc,
2115               exp->exp_flvr_expire[1] ?
2116               (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2117         return -EACCES;
2118 }
2119 EXPORT_SYMBOL(sptlrpc_target_export_check);
2120
2121 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2122                                       struct sptlrpc_rule_set *rset)
2123 {
2124         struct obd_export *exp;
2125         struct sptlrpc_flavor new_flvr;
2126
2127         LASSERT(obd);
2128
2129         spin_lock(&obd->obd_dev_lock);
2130
2131         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2132                 if (exp->exp_connection == NULL)
2133                         continue;
2134
2135                 /*
2136                  * note if this export had just been updated flavor
2137                  * (exp_flvr_changed == 1), this will override the
2138                  * previous one.
2139                  */
2140                 spin_lock(&exp->exp_lock);
2141                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2142                                              exp->exp_connection->c_peer.nid,
2143                                              &new_flvr);
2144                 if (exp->exp_flvr_changed ||
2145                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2146                         exp->exp_flvr_old[1] = new_flvr;
2147                         exp->exp_flvr_expire[1] = 0;
2148                         exp->exp_flvr_changed = 1;
2149                         exp->exp_flvr_adapt = 1;
2150
2151                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2152                                exp, sptlrpc_part2name(exp->exp_sp_peer),
2153                                exp->exp_flvr.sf_rpc,
2154                                exp->exp_flvr_old[1].sf_rpc);
2155                 }
2156                 spin_unlock(&exp->exp_lock);
2157         }
2158
2159         spin_unlock(&obd->obd_dev_lock);
2160 }
2161 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2162
2163 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2164 {
2165         /* peer's claim is unreliable unless gss is being used */
2166         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2167                 return svc_rc;
2168
2169         switch (req->rq_sp_from) {
2170         case LUSTRE_SP_CLI:
2171                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2172                         /* The below message is checked in sanity-sec test_33 */
2173                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
2174                         svc_rc = SECSVC_DROP;
2175                 }
2176                 break;
2177         case LUSTRE_SP_MDT:
2178                 if (!req->rq_auth_usr_mdt) {
2179                         /* The below message is checked in sanity-sec test_33 */
2180                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2181                         svc_rc = SECSVC_DROP;
2182                 }
2183                 break;
2184         case LUSTRE_SP_OST:
2185                 if (!req->rq_auth_usr_ost) {
2186                         /* The below message is checked in sanity-sec test_33 */
2187                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2188                         svc_rc = SECSVC_DROP;
2189                 }
2190                 break;
2191         case LUSTRE_SP_MGS:
2192         case LUSTRE_SP_MGC:
2193                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2194                     !req->rq_auth_usr_ost) {
2195                         /* The below message is checked in sanity-sec test_33 */
2196                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2197                         svc_rc = SECSVC_DROP;
2198                 }
2199                 break;
2200         case LUSTRE_SP_ANY:
2201         default:
2202                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2203                 svc_rc = SECSVC_DROP;
2204         }
2205
2206         return svc_rc;
2207 }
2208
2209 /**
2210  * Used by ptlrpc server, to perform transformation upon request message of
2211  * incoming \a req. This must be the first thing to do with an incoming
2212  * request in ptlrpc layer.
2213  *
2214  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2215  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2216  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2217  * reply message has been prepared.
2218  * \retval SECSVC_DROP failed, this request should be dropped.
2219  */
2220 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2221 {
2222         struct ptlrpc_sec_policy *policy;
2223         struct lustre_msg *msg = req->rq_reqbuf;
2224         int rc;
2225
2226         ENTRY;
2227
2228         LASSERT(msg);
2229         LASSERT(req->rq_reqmsg == NULL);
2230         LASSERT(req->rq_repmsg == NULL);
2231         LASSERT(req->rq_svc_ctx == NULL);
2232
2233         req->rq_req_swab_mask = 0;
2234
2235         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2236         switch (rc) {
2237         case 1:
2238                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2239         case 0:
2240                 break;
2241         default:
2242                 CERROR("error unpacking request from %s x%llu\n",
2243                        libcfs_id2str(req->rq_peer), req->rq_xid);
2244                 RETURN(SECSVC_DROP);
2245         }
2246
2247         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2248         req->rq_sp_from = LUSTRE_SP_ANY;
2249         req->rq_auth_uid = -1; /* set to INVALID_UID */
2250         req->rq_auth_mapped_uid = -1;
2251
2252         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2253         if (!policy) {
2254                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2255                 RETURN(SECSVC_DROP);
2256         }
2257
2258         LASSERT(policy->sp_sops->accept);
2259         rc = policy->sp_sops->accept(req);
2260         sptlrpc_policy_put(policy);
2261         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2262         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2263
2264         /*
2265          * if it's not null flavor (which means embedded packing msg),
2266          * reset the swab mask for the comming inner msg unpacking.
2267          */
2268         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2269                 req->rq_req_swab_mask = 0;
2270
2271         /* sanity check for the request source */
2272         rc = sptlrpc_svc_check_from(req, rc);
2273         RETURN(rc);
2274 }
2275
2276 /**
2277  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2278  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2279  * a buffer of \a msglen size.
2280  */
2281 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2282 {
2283         struct ptlrpc_sec_policy *policy;
2284         struct ptlrpc_reply_state *rs;
2285         int rc;
2286
2287         ENTRY;
2288
2289         LASSERT(req->rq_svc_ctx);
2290         LASSERT(req->rq_svc_ctx->sc_policy);
2291
2292         policy = req->rq_svc_ctx->sc_policy;
2293         LASSERT(policy->sp_sops->alloc_rs);
2294
2295         rc = policy->sp_sops->alloc_rs(req, msglen);
2296         if (unlikely(rc == -ENOMEM)) {
2297                 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2298
2299                 if (svcpt->scp_service->srv_max_reply_size <
2300                    msglen + sizeof(struct ptlrpc_reply_state)) {
2301                         /* Just return failure if the size is too big */
2302                         CERROR("size of message is too big (%zd), %d allowed\n",
2303                                 msglen + sizeof(struct ptlrpc_reply_state),
2304                                 svcpt->scp_service->srv_max_reply_size);
2305                         RETURN(-ENOMEM);
2306                 }
2307
2308                 /* failed alloc, try emergency pool */
2309                 rs = lustre_get_emerg_rs(svcpt);
2310                 if (rs == NULL)
2311                         RETURN(-ENOMEM);
2312
2313                 req->rq_reply_state = rs;
2314                 rc = policy->sp_sops->alloc_rs(req, msglen);
2315                 if (rc) {
2316                         lustre_put_emerg_rs(rs);
2317                         req->rq_reply_state = NULL;
2318                 }
2319         }
2320
2321         LASSERT(rc != 0 ||
2322                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2323
2324         RETURN(rc);
2325 }
2326
2327 /**
2328  * Used by ptlrpc server, to perform transformation upon reply message.
2329  *
2330  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2331  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2332  */
2333 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2334 {
2335         struct ptlrpc_sec_policy *policy;
2336         int rc;
2337
2338         ENTRY;
2339
2340         LASSERT(req->rq_svc_ctx);
2341         LASSERT(req->rq_svc_ctx->sc_policy);
2342
2343         policy = req->rq_svc_ctx->sc_policy;
2344         LASSERT(policy->sp_sops->authorize);
2345
2346         rc = policy->sp_sops->authorize(req);
2347         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2348
2349         RETURN(rc);
2350 }
2351
2352 /**
2353  * Used by ptlrpc server, to free reply_state.
2354  */
2355 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2356 {
2357         struct ptlrpc_sec_policy *policy;
2358         unsigned int prealloc;
2359
2360         ENTRY;
2361
2362         LASSERT(rs->rs_svc_ctx);
2363         LASSERT(rs->rs_svc_ctx->sc_policy);
2364
2365         policy = rs->rs_svc_ctx->sc_policy;
2366         LASSERT(policy->sp_sops->free_rs);
2367
2368         prealloc = rs->rs_prealloc;
2369         policy->sp_sops->free_rs(rs);
2370
2371         if (prealloc)
2372                 lustre_put_emerg_rs(rs);
2373         EXIT;
2374 }
2375
2376 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2377 {
2378         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2379
2380         if (ctx != NULL)
2381                 atomic_inc(&ctx->sc_refcount);
2382 }
2383
2384 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2385 {
2386         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2387
2388         if (ctx == NULL)
2389                 return;
2390
2391         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2392         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2393                 if (ctx->sc_policy->sp_sops->free_ctx)
2394                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2395         }
2396         req->rq_svc_ctx = NULL;
2397 }
2398
2399 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2400 {
2401         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2402
2403         if (ctx == NULL)
2404                 return;
2405
2406         LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2407         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2408                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2409 }
2410 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2411
2412 /*
2413  * bulk security
2414  */
2415
2416 /**
2417  * Perform transformation upon bulk data pointed by \a desc. This is called
2418  * before transforming the request message.
2419  */
2420 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2421                           struct ptlrpc_bulk_desc *desc)
2422 {
2423         struct ptlrpc_cli_ctx *ctx;
2424
2425         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2426
2427         if (!req->rq_pack_bulk)
2428                 return 0;
2429
2430         ctx = req->rq_cli_ctx;
2431         if (ctx->cc_ops->wrap_bulk)
2432                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2433         return 0;
2434 }
2435 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2436
2437 /**
2438  * This is called after unwrap the reply message.
2439  * return nob of actual plain text size received, or error code.
2440  */
2441 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2442                                  struct ptlrpc_bulk_desc *desc,
2443                                  int nob)
2444 {
2445         struct ptlrpc_cli_ctx *ctx;
2446         int rc;
2447
2448         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2449
2450         if (!req->rq_pack_bulk)
2451                 return desc->bd_nob_transferred;
2452
2453         ctx = req->rq_cli_ctx;
2454         if (ctx->cc_ops->unwrap_bulk) {
2455                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2456                 if (rc < 0)
2457                         return rc;
2458         }
2459         return desc->bd_nob_transferred;
2460 }
2461 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2462
2463 /**
2464  * This is called after unwrap the reply message.
2465  * return 0 for success or error code.
2466  */
2467 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2468                                   struct ptlrpc_bulk_desc *desc)
2469 {
2470         struct ptlrpc_cli_ctx *ctx;
2471         int rc;
2472
2473         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2474
2475         if (!req->rq_pack_bulk)
2476                 return 0;
2477
2478         ctx = req->rq_cli_ctx;
2479         if (ctx->cc_ops->unwrap_bulk) {
2480                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2481                 if (rc < 0)
2482                         return rc;
2483         }
2484
2485         /*
2486          * if everything is going right, nob should equals to nob_transferred.
2487          * in case of privacy mode, nob_transferred needs to be adjusted.
2488          */
2489         if (desc->bd_nob != desc->bd_nob_transferred) {
2490                 CERROR("nob %d doesn't match transferred nob %d\n",
2491                        desc->bd_nob, desc->bd_nob_transferred);
2492                 return -EPROTO;
2493         }
2494
2495         return 0;
2496 }
2497 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2498
2499 #ifdef HAVE_SERVER_SUPPORT
2500 /**
2501  * Performe transformation upon outgoing bulk read.
2502  */
2503 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2504                           struct ptlrpc_bulk_desc *desc)
2505 {
2506         struct ptlrpc_svc_ctx *ctx;
2507
2508         LASSERT(req->rq_bulk_read);
2509
2510         if (!req->rq_pack_bulk)
2511                 return 0;
2512
2513         ctx = req->rq_svc_ctx;
2514         if (ctx->sc_policy->sp_sops->wrap_bulk)
2515                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2516
2517         return 0;
2518 }
2519 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2520
2521 /**
2522  * Performe transformation upon incoming bulk write.
2523  */
2524 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2525                             struct ptlrpc_bulk_desc *desc)
2526 {
2527         struct ptlrpc_svc_ctx *ctx;
2528         int rc;
2529
2530         LASSERT(req->rq_bulk_write);
2531
2532         /*
2533          * if it's in privacy mode, transferred should >= expected; otherwise
2534          * transferred should == expected.
2535          */
2536         if (desc->bd_nob_transferred < desc->bd_nob ||
2537             (desc->bd_nob_transferred > desc->bd_nob &&
2538              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2539              SPTLRPC_BULK_SVC_PRIV)) {
2540                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2541                           desc->bd_nob_transferred, desc->bd_nob);
2542                 return -ETIMEDOUT;
2543         }
2544
2545         if (!req->rq_pack_bulk)
2546                 return 0;
2547
2548         ctx = req->rq_svc_ctx;
2549         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2550                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2551                 if (rc)
2552                         CERROR("error unwrap bulk: %d\n", rc);
2553         }
2554
2555         /* return 0 to allow reply be sent */
2556         return 0;
2557 }
2558 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2559
2560 /**
2561  * Prepare buffers for incoming bulk write.
2562  */
2563 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2564                           struct ptlrpc_bulk_desc *desc)
2565 {
2566         struct ptlrpc_svc_ctx *ctx;
2567
2568         LASSERT(req->rq_bulk_write);
2569
2570         if (!req->rq_pack_bulk)
2571                 return 0;
2572
2573         ctx = req->rq_svc_ctx;
2574         if (ctx->sc_policy->sp_sops->prep_bulk)
2575                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2576
2577         return 0;
2578 }
2579 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2580
2581 #endif /* HAVE_SERVER_SUPPORT */
2582
2583 /*
2584  * user descriptor helpers
2585  */
2586
2587 int sptlrpc_current_user_desc_size(void)
2588 {
2589         int ngroups;
2590
2591         ngroups = current_cred()->group_info->ngroups;
2592
2593         if (ngroups > LUSTRE_MAX_GROUPS)
2594                 ngroups = LUSTRE_MAX_GROUPS;
2595         return sptlrpc_user_desc_size(ngroups);
2596 }
2597 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2598
2599 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2600 {
2601         struct ptlrpc_user_desc *pud;
2602         int ngroups;
2603
2604         pud = lustre_msg_buf(msg, offset, 0);
2605
2606         pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2607         pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2608         pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2609         pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2610         pud->pud_cap = cfs_curproc_cap_pack();
2611         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2612
2613         task_lock(current);
2614         ngroups = current_cred()->group_info->ngroups;
2615         if (pud->pud_ngroups > ngroups)
2616                 pud->pud_ngroups = ngroups;
2617 #ifdef HAVE_GROUP_INFO_GID
2618         memcpy(pud->pud_groups, current_cred()->group_info->gid,
2619                pud->pud_ngroups * sizeof(__u32));
2620 #else /* !HAVE_GROUP_INFO_GID */
2621         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2622                pud->pud_ngroups * sizeof(__u32));
2623 #endif /* HAVE_GROUP_INFO_GID */
2624         task_unlock(current);
2625
2626         return 0;
2627 }
2628 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2629
2630 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2631 {
2632         struct ptlrpc_user_desc *pud;
2633         int i;
2634
2635         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2636         if (!pud)
2637                 return -EINVAL;
2638
2639         if (swabbed) {
2640                 __swab32s(&pud->pud_uid);
2641                 __swab32s(&pud->pud_gid);
2642                 __swab32s(&pud->pud_fsuid);
2643                 __swab32s(&pud->pud_fsgid);
2644                 __swab32s(&pud->pud_cap);
2645                 __swab32s(&pud->pud_ngroups);
2646         }
2647
2648         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2649                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2650                 return -EINVAL;
2651         }
2652
2653         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2654             msg->lm_buflens[offset]) {
2655                 CERROR("%u groups are claimed but bufsize only %u\n",
2656                        pud->pud_ngroups, msg->lm_buflens[offset]);
2657                 return -EINVAL;
2658         }
2659
2660         if (swabbed) {
2661                 for (i = 0; i < pud->pud_ngroups; i++)
2662                         __swab32s(&pud->pud_groups[i]);
2663         }
2664
2665         return 0;
2666 }
2667 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2668
2669 /*
2670  * misc helpers
2671  */
2672
2673 const char *sec2target_str(struct ptlrpc_sec *sec)
2674 {
2675         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2676                 return "*";
2677         if (sec_is_reverse(sec))
2678                 return "c";
2679         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2680 }
2681 EXPORT_SYMBOL(sec2target_str);
2682
2683 /*
2684  * return true if the bulk data is protected
2685  */
2686 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2687 {
2688         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2689         case SPTLRPC_BULK_SVC_INTG:
2690         case SPTLRPC_BULK_SVC_PRIV:
2691                 return 1;
2692         default:
2693                 return 0;
2694         }
2695 }
2696 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2697
2698 /*
2699  * crypto API helper/alloc blkciper
2700  */
2701
2702 /*
2703  * initialize/finalize
2704  */
2705
2706 int sptlrpc_init(void)
2707 {
2708         int rc;
2709
2710         rwlock_init(&policy_lock);
2711
2712         rc = sptlrpc_gc_init();
2713         if (rc)
2714                 goto out;
2715
2716         rc = sptlrpc_conf_init();
2717         if (rc)
2718                 goto out_gc;
2719
2720         rc = sptlrpc_enc_pool_init();
2721         if (rc)
2722                 goto out_conf;
2723
2724         rc = sptlrpc_null_init();
2725         if (rc)
2726                 goto out_pool;
2727
2728         rc = sptlrpc_plain_init();
2729         if (rc)
2730                 goto out_null;
2731
2732         rc = sptlrpc_lproc_init();
2733         if (rc)
2734                 goto out_plain;
2735
2736         return 0;
2737
2738 out_plain:
2739         sptlrpc_plain_fini();
2740 out_null:
2741         sptlrpc_null_fini();
2742 out_pool:
2743         sptlrpc_enc_pool_fini();
2744 out_conf:
2745         sptlrpc_conf_fini();
2746 out_gc:
2747         sptlrpc_gc_fini();
2748 out:
2749         return rc;
2750 }
2751
2752 void sptlrpc_fini(void)
2753 {
2754         sptlrpc_lproc_fini();
2755         sptlrpc_plain_fini();
2756         sptlrpc_null_fini();
2757         sptlrpc_enc_pool_fini();
2758         sptlrpc_conf_fini();
2759         sptlrpc_gc_fini();
2760 }