4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Eric Mei <ericm@clusterfs.com>
36 #define DEBUG_SUBSYSTEM S_SEC
38 #include <linux/user_namespace.h>
39 #include <linux/uidgid.h>
40 #include <linux/crypto.h>
41 #include <linux/key.h>
43 #include <libcfs/libcfs.h>
45 #include <obd_class.h>
46 #include <obd_support.h>
47 #include <lustre_net.h>
48 #include <lustre_import.h>
49 #include <lustre_dlm.h>
50 #include <lustre_sec.h>
51 #include <libcfs/libcfs_crypto.h>
53 #include "ptlrpc_internal.h"
55 static int send_sepol;
56 module_param(send_sepol, int, 0644);
57 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
63 static rwlock_t policy_lock;
64 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
68 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
70 __u16 number = policy->sp_policy;
72 LASSERT(policy->sp_name);
73 LASSERT(policy->sp_cops);
74 LASSERT(policy->sp_sops);
76 if (number >= SPTLRPC_POLICY_MAX)
79 write_lock(&policy_lock);
80 if (unlikely(policies[number])) {
81 write_unlock(&policy_lock);
84 policies[number] = policy;
85 write_unlock(&policy_lock);
87 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
90 EXPORT_SYMBOL(sptlrpc_register_policy);
92 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
94 __u16 number = policy->sp_policy;
96 LASSERT(number < SPTLRPC_POLICY_MAX);
98 write_lock(&policy_lock);
99 if (unlikely(policies[number] == NULL)) {
100 write_unlock(&policy_lock);
101 CERROR("%s: already unregistered\n", policy->sp_name);
105 LASSERT(policies[number] == policy);
106 policies[number] = NULL;
107 write_unlock(&policy_lock);
109 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
112 EXPORT_SYMBOL(sptlrpc_unregister_policy);
115 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
117 static DEFINE_MUTEX(load_mutex);
118 struct ptlrpc_sec_policy *policy;
119 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
122 if (number >= SPTLRPC_POLICY_MAX)
126 read_lock(&policy_lock);
127 policy = policies[number];
128 if (policy && !try_module_get(policy->sp_owner))
130 read_unlock(&policy_lock);
132 if (policy != NULL || number != SPTLRPC_POLICY_GSS)
135 /* try to load gss module, happens only if policy at index
136 * SPTLRPC_POLICY_GSS is not already referenced in
137 * global array policies[]
139 mutex_lock(&load_mutex);
140 /* The fact that request_module() returns 0 does not guarantee
141 * the module has done its job. So we must check that the
142 * requested policy is now available. This is done by checking
143 * again for policies[number] in the loop.
145 rc = request_module("ptlrpc_gss");
147 CDEBUG(D_SEC, "module ptlrpc_gss loaded on demand\n");
149 CERROR("Unable to load module ptlrpc_gss: rc %d\n", rc);
150 mutex_unlock(&load_mutex);
156 __u32 sptlrpc_name2flavor_base(const char *name)
158 if (!strcmp(name, "null"))
159 return SPTLRPC_FLVR_NULL;
160 if (!strcmp(name, "plain"))
161 return SPTLRPC_FLVR_PLAIN;
162 if (!strcmp(name, "gssnull"))
163 return SPTLRPC_FLVR_GSSNULL;
164 if (!strcmp(name, "krb5n"))
165 return SPTLRPC_FLVR_KRB5N;
166 if (!strcmp(name, "krb5a"))
167 return SPTLRPC_FLVR_KRB5A;
168 if (!strcmp(name, "krb5i"))
169 return SPTLRPC_FLVR_KRB5I;
170 if (!strcmp(name, "krb5p"))
171 return SPTLRPC_FLVR_KRB5P;
172 if (!strcmp(name, "skn"))
173 return SPTLRPC_FLVR_SKN;
174 if (!strcmp(name, "ska"))
175 return SPTLRPC_FLVR_SKA;
176 if (!strcmp(name, "ski"))
177 return SPTLRPC_FLVR_SKI;
178 if (!strcmp(name, "skpi"))
179 return SPTLRPC_FLVR_SKPI;
181 return SPTLRPC_FLVR_INVALID;
183 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
185 const char *sptlrpc_flavor2name_base(__u32 flvr)
187 __u32 base = SPTLRPC_FLVR_BASE(flvr);
189 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
191 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
193 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
195 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
197 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
199 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
201 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
203 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
205 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
207 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
209 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
212 CERROR("invalid wire flavor 0x%x\n", flvr);
215 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
217 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
218 char *buf, int bufsize)
220 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
221 snprintf(buf, bufsize, "hash:%s",
222 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
224 snprintf(buf, bufsize, "%s",
225 sptlrpc_flavor2name_base(sf->sf_rpc));
227 buf[bufsize - 1] = '\0';
230 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
232 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
236 ln = snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
239 * currently we don't support customized bulk specification for
240 * flavors other than plain
242 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
246 sptlrpc_flavor2name_bulk(sf, bspec + 1, sizeof(bspec) - 1);
247 strncat(buf, bspec, bufsize - ln);
250 buf[bufsize - 1] = '\0';
253 EXPORT_SYMBOL(sptlrpc_flavor2name);
255 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
259 if (flags & PTLRPC_SEC_FL_REVERSE)
260 strlcat(buf, "reverse,", bufsize);
261 if (flags & PTLRPC_SEC_FL_ROOTONLY)
262 strlcat(buf, "rootonly,", bufsize);
263 if (flags & PTLRPC_SEC_FL_UDESC)
264 strlcat(buf, "udesc,", bufsize);
265 if (flags & PTLRPC_SEC_FL_BULK)
266 strlcat(buf, "bulk,", bufsize);
268 strlcat(buf, "-,", bufsize);
272 EXPORT_SYMBOL(sptlrpc_secflags2str);
275 * client context APIs
278 /* existingroot to tell we only want to fetch an already existing root ctx */
280 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec, bool existingroot)
282 struct vfs_cred vcred;
283 int create = 1, remove_dead = 1;
286 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
289 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
290 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
294 if (!(sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_ROOTONLY) &&
296 return ERR_PTR(-EINVAL);
297 } else if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
298 PTLRPC_SEC_FL_ROOTONLY)) {
301 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
306 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
307 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
310 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
314 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
316 atomic_inc(&ctx->cc_refcount);
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
321 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
323 struct ptlrpc_sec *sec = ctx->cc_sec;
326 LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
328 if (!atomic_dec_and_test(&ctx->cc_refcount))
331 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
333 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
336 * Expire the client context immediately.
338 * \pre Caller must hold at least 1 reference on the \a ctx.
340 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
342 LASSERT(ctx->cc_ops->die);
343 ctx->cc_ops->die(ctx, 0);
345 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
348 * To wake up the threads who are waiting for this client context. Called
349 * after some status change happened on \a ctx.
351 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
353 struct ptlrpc_request *req, *next;
355 spin_lock(&ctx->cc_lock);
356 list_for_each_entry_safe(req, next, &ctx->cc_req_list,
358 list_del_init(&req->rq_ctx_chain);
359 ptlrpc_client_wake_req(req);
361 spin_unlock(&ctx->cc_lock);
363 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
365 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
367 LASSERT(ctx->cc_ops);
369 if (ctx->cc_ops->display == NULL)
372 return ctx->cc_ops->display(ctx, buf, bufsize);
375 static int import_sec_check_expire(struct obd_import *imp)
379 write_lock(&imp->imp_sec_lock);
380 if (imp->imp_sec_expire &&
381 imp->imp_sec_expire < ktime_get_real_seconds()) {
383 imp->imp_sec_expire = 0;
385 write_unlock(&imp->imp_sec_lock);
390 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
391 return sptlrpc_import_sec_adapt(imp, NULL, NULL);
395 * Get and validate the client side ptlrpc security facilities from
396 * \a imp. There is a race condition on client reconnect when the import is
397 * being destroyed while there are outstanding client bound requests. In
398 * this case do not output any error messages if import secuity is not
401 * \param[in] imp obd import associated with client
402 * \param[out] sec client side ptlrpc security
404 * \retval 0 if security retrieved successfully
405 * \retval -ve errno if there was a problem
407 static int import_sec_validate_get(struct obd_import *imp,
408 struct ptlrpc_sec **sec)
412 if (unlikely(imp->imp_sec_expire)) {
413 rc = import_sec_check_expire(imp);
418 *sec = sptlrpc_import_sec_ref(imp);
420 /* Only output an error when the import is still active */
421 if (!test_bit(WORK_STRUCT_PENDING_BIT,
422 work_data_bits(&imp->imp_zombie_work)))
423 CERROR("import %p (%s) with no sec\n",
424 imp, ptlrpc_import_state_name(imp->imp_state));
428 if (unlikely((*sec)->ps_dying)) {
429 CERROR("attempt to use dying sec %p\n", sec);
430 sptlrpc_sec_put(*sec);
438 * Given a \a req, find or allocate an appropriate context for it.
439 * \pre req->rq_cli_ctx == NULL.
441 * \retval 0 succeed, and req->rq_cli_ctx is set.
442 * \retval -ev error number, and req->rq_cli_ctx == NULL.
444 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
446 struct obd_import *imp = req->rq_import;
447 struct ptlrpc_sec *sec;
452 LASSERT(!req->rq_cli_ctx);
455 rc = import_sec_validate_get(imp, &sec);
459 req->rq_cli_ctx = get_my_ctx(sec, false);
461 sptlrpc_sec_put(sec);
463 if (!req->rq_cli_ctx) {
465 } else if (IS_ERR(req->rq_cli_ctx)) {
466 rc = PTR_ERR(req->rq_cli_ctx);
467 req->rq_cli_ctx = NULL;
471 CERROR("%s: fail to get context for req %p: rc = %d\n",
472 imp->imp_obd->obd_name, req, rc);
478 * Drop the context for \a req.
479 * \pre req->rq_cli_ctx != NULL.
480 * \post req->rq_cli_ctx == NULL.
482 * If \a sync == 0, this function should return quickly without sleep;
483 * otherwise it might trigger and wait for the whole process of sending
484 * an context-destroying rpc to server.
486 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
491 LASSERT(req->rq_cli_ctx);
494 * request might be asked to release earlier while still
495 * in the context waiting list.
497 if (!list_empty(&req->rq_ctx_chain)) {
498 spin_lock(&req->rq_cli_ctx->cc_lock);
499 list_del_init(&req->rq_ctx_chain);
500 spin_unlock(&req->rq_cli_ctx->cc_lock);
503 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
504 req->rq_cli_ctx = NULL;
509 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
510 struct ptlrpc_cli_ctx *oldctx,
511 struct ptlrpc_cli_ctx *newctx)
513 struct sptlrpc_flavor old_flvr;
514 char *reqmsg = NULL; /* to workaround old gcc */
519 "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
520 req, oldctx, oldctx->cc_vcred.vc_uid,
521 sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
522 sec2target_str(newctx->cc_sec), oldctx->cc_sec,
523 oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
524 newctx->cc_sec->ps_policy->sp_name);
527 old_flvr = req->rq_flvr;
529 /* save request message */
530 reqmsg_size = req->rq_reqlen;
531 if (reqmsg_size != 0) {
532 LASSERT(req->rq_reqmsg);
533 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
536 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
539 /* release old req/rep buf */
540 req->rq_cli_ctx = oldctx;
541 sptlrpc_cli_free_reqbuf(req);
542 sptlrpc_cli_free_repbuf(req);
543 req->rq_cli_ctx = newctx;
545 /* recalculate the flavor */
546 sptlrpc_req_set_flavor(req, 0);
549 * alloc new request buffer
550 * we don't need to alloc reply buffer here, leave it to the
551 * rest procedure of ptlrpc
553 if (reqmsg_size != 0) {
554 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
556 LASSERT(req->rq_reqmsg);
557 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
559 CWARN("failed to alloc reqbuf: %d\n", rc);
560 req->rq_flvr = old_flvr;
563 OBD_FREE_LARGE(reqmsg, reqmsg_size);
569 * If current context of \a req is dead somehow, e.g. we just switched flavor
570 * thus marked original contexts dead, we'll find a new context for it. if
571 * no switch is needed, \a req will end up with the same context.
573 * \note a request must have a context, to keep other parts of code happy.
574 * In any case of failure during the switching, we must restore the old one.
576 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req,
577 struct ptlrpc_sec *sec)
579 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
580 struct ptlrpc_cli_ctx *newctx;
587 sptlrpc_cli_ctx_get(oldctx);
588 sptlrpc_req_put_ctx(req, 0);
590 /* If sec is provided, we must use the existing context for root that
591 * it references. If not root, or no existing context, or same context,
592 * just fail replacing the dead context.
595 newctx = get_my_ctx(sec, true);
597 GOTO(restore, rc = -EINVAL);
599 GOTO(restore, rc = PTR_ERR(newctx));
600 if (newctx == oldctx) {
601 sptlrpc_cli_ctx_put(newctx, 0);
602 GOTO(restore, rc = -ENODATA);
604 /* Because we are replacing an erroneous ctx, new sec ctx is
605 * expected to have higher imp generation or same imp generation
606 * but higher imp connection count.
608 if (newctx->cc_impgen < oldctx->cc_impgen ||
609 (newctx->cc_impgen == oldctx->cc_impgen &&
610 newctx->cc_impconncnt <= oldctx->cc_impconncnt))
611 CERROR("ctx (%p, fl %lx) will switch, but does not look more recent than old ctx: imp gen %d vs %d, imp conn cnt %d vs %d\n",
612 newctx, newctx->cc_flags,
613 newctx->cc_impgen, oldctx->cc_impgen,
614 newctx->cc_impconncnt, oldctx->cc_impconncnt);
615 req->rq_cli_ctx = newctx;
617 rc = sptlrpc_req_get_ctx(req);
619 LASSERT(!req->rq_cli_ctx);
621 /* restore old ctx */
624 newctx = req->rq_cli_ctx;
629 if (unlikely(newctx == oldctx &&
630 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
632 * still get the old dead ctx, usually means system too busy
635 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
636 newctx, newctx->cc_flags);
638 schedule_timeout_interruptible(cfs_time_seconds(1));
639 } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
642 * new ctx not up to date yet
645 "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
646 newctx, newctx->cc_flags);
649 * it's possible newctx == oldctx if we're switching
650 * subflavor with the same sec.
652 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
654 /* restore old ctx */
655 sptlrpc_req_put_ctx(req, 0);
659 LASSERT(req->rq_cli_ctx == newctx);
662 sptlrpc_cli_ctx_put(oldctx, 1);
666 req->rq_cli_ctx = oldctx;
669 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
672 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
674 if (cli_ctx_is_refreshed(ctx))
680 void ctx_refresh_interrupt(struct ptlrpc_request *req)
683 spin_lock(&req->rq_lock);
685 spin_unlock(&req->rq_lock);
689 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
691 spin_lock(&ctx->cc_lock);
692 if (!list_empty(&req->rq_ctx_chain))
693 list_del_init(&req->rq_ctx_chain);
694 spin_unlock(&ctx->cc_lock);
698 * To refresh the context of \req, if it's not up-to-date.
700 * - == 0: do not wait
701 * - == MAX_SCHEDULE_TIMEOUT: wait indefinitely
702 * - > 0: not supported
704 * The status of the context could be subject to be changed by other threads
705 * at any time. We allow this race, but once we return with 0, the caller will
706 * suppose it's uptodated and keep using it until the owning rpc is done.
708 * \retval 0 only if the context is uptodated.
709 * \retval -ev error number.
711 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
713 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
714 struct ptlrpc_sec *sec;
721 if (req->rq_ctx_init || req->rq_ctx_fini)
724 if (timeout != 0 && timeout != MAX_SCHEDULE_TIMEOUT) {
725 CERROR("req %p: invalid timeout %lu\n", req, timeout);
730 * during the process a request's context might change type even
731 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
735 rc = import_sec_validate_get(req->rq_import, &sec);
739 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
740 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
741 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
742 req_off_ctx_list(req, ctx);
743 sptlrpc_req_replace_dead_ctx(req, NULL);
744 ctx = req->rq_cli_ctx;
747 if (cli_ctx_is_eternal(ctx))
748 GOTO(out_sec_put, rc = 0);
750 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
751 if (ctx->cc_ops->refresh)
752 ctx->cc_ops->refresh(ctx);
754 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
756 LASSERT(ctx->cc_ops->validate);
757 if (ctx->cc_ops->validate(ctx) == 0) {
758 req_off_ctx_list(req, ctx);
759 GOTO(out_sec_put, rc = 0);
762 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
763 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) &&
764 sptlrpc_req_replace_dead_ctx(req, sec) == 0) {
765 ctx = req->rq_cli_ctx;
766 sptlrpc_sec_put(sec);
769 spin_lock(&req->rq_lock);
771 spin_unlock(&req->rq_lock);
772 req_off_ctx_list(req, ctx);
773 GOTO(out_sec_put, rc = -EPERM);
775 sptlrpc_sec_put(sec);
778 sptlrpc_sec_put(sec);
781 * There's a subtle issue for resending RPCs, suppose following
783 * 1. the request was sent to server.
784 * 2. recovery was kicked start, after finished the request was
786 * 3. resend the request.
787 * 4. old reply from server received, we accept and verify the reply.
788 * this has to be success, otherwise the error will be aware
790 * 5. new reply from server received, dropped by LNet.
792 * Note the xid of old & new request is the same. We can't simply
793 * change xid for the resent request because the server replies on
794 * it for reply reconstruction.
796 * Commonly the original context should be uptodate because we
797 * have an expiry nice time; server will keep its context because
798 * we at least hold a ref of old context which prevent context
799 * from destroying RPC being sent. So server still can accept the
800 * request and finish the RPC. But if that's not the case:
801 * 1. If server side context has been trimmed, a NO_CONTEXT will
802 * be returned, gss_cli_ctx_verify/unseal will switch to new
804 * 2. Current context never be refreshed, then we are fine: we
805 * never really send request with old context before.
807 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
808 unlikely(req->rq_reqmsg) &&
809 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
810 req_off_ctx_list(req, ctx);
814 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
815 req_off_ctx_list(req, ctx);
817 * don't switch ctx if import was deactivated
819 if (req->rq_import->imp_deactive) {
820 spin_lock(&req->rq_lock);
822 spin_unlock(&req->rq_lock);
826 rc = sptlrpc_req_replace_dead_ctx(req, NULL);
828 LASSERT(ctx == req->rq_cli_ctx);
829 CERROR("req %p: failed to replace dead ctx %p: %d\n",
831 spin_lock(&req->rq_lock);
833 spin_unlock(&req->rq_lock);
837 ctx = req->rq_cli_ctx;
842 * Now we're sure this context is during upcall, add myself into
845 spin_lock(&ctx->cc_lock);
846 if (list_empty(&req->rq_ctx_chain))
847 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
848 spin_unlock(&ctx->cc_lock);
853 /* Clear any flags that may be present from previous sends */
854 LASSERT(req->rq_receiving_reply == 0);
855 spin_lock(&req->rq_lock);
857 req->rq_timedout = 0;
860 spin_unlock(&req->rq_lock);
862 /* by now we know that timeout value is MAX_SCHEDULE_TIMEOUT,
863 * so wait indefinitely with non-fatal signals blocked
865 if (l_wait_event_abortable(req->rq_reply_waitq,
866 ctx_check_refresh(ctx)) == -ERESTARTSYS) {
868 ctx_refresh_interrupt(req);
872 * following cases could lead us here:
873 * - successfully refreshed;
875 * - timedout, and we don't want recover from the failure;
876 * - timedout, and waked up upon recovery finished;
877 * - someone else mark this ctx dead by force;
878 * - someone invalidate the req and call ptlrpc_client_wake_req(),
879 * e.g. ptlrpc_abort_inflight();
881 if (!cli_ctx_is_refreshed(ctx)) {
882 /* timed out or interruptted */
883 req_off_ctx_list(req, ctx);
892 /* Bring ptlrpc_sec context up-to-date */
893 int sptlrpc_export_update_ctx(struct obd_export *exp)
895 struct obd_import *imp = exp ? exp->exp_imp_reverse : NULL;
896 struct ptlrpc_sec *sec = NULL;
897 struct ptlrpc_cli_ctx *ctx = NULL;
901 sec = sptlrpc_import_sec_ref(imp);
903 ctx = get_my_ctx(sec, false);
906 sptlrpc_sec_put(sec);
910 if (ctx->cc_ops->refresh)
911 rc = ctx->cc_ops->refresh(ctx);
912 sptlrpc_cli_ctx_put(ctx, 1);
918 * Initialize flavor settings for \a req, according to \a opcode.
920 * \note this could be called in two situations:
921 * - new request from ptlrpc_pre_req(), with proper @opcode
922 * - old request which changed ctx in the middle, with @opcode == 0
924 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
926 struct ptlrpc_sec *sec;
928 LASSERT(req->rq_import);
929 LASSERT(req->rq_cli_ctx);
930 LASSERT(req->rq_cli_ctx->cc_sec);
931 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
933 /* special security flags according to opcode */
937 case MGS_CONFIG_READ:
939 req->rq_bulk_read = 1;
943 req->rq_bulk_write = 1;
946 req->rq_ctx_init = 1;
949 req->rq_ctx_fini = 1;
952 /* init/fini rpc won't be resend, so can't be here */
953 LASSERT(req->rq_ctx_init == 0);
954 LASSERT(req->rq_ctx_fini == 0);
956 /* cleanup flags, which should be recalculated */
957 req->rq_pack_udesc = 0;
958 req->rq_pack_bulk = 0;
962 sec = req->rq_cli_ctx->cc_sec;
964 spin_lock(&sec->ps_lock);
965 req->rq_flvr = sec->ps_flvr;
966 spin_unlock(&sec->ps_lock);
969 * force SVC_NULL for context initiation rpc, SVC_INTG for context
972 if (unlikely(req->rq_ctx_init))
973 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
974 else if (unlikely(req->rq_ctx_fini))
975 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
977 /* user descriptor flag, null security can't do it anyway */
978 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
979 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
980 req->rq_pack_udesc = 1;
982 /* bulk security flag */
983 if ((req->rq_bulk_read || req->rq_bulk_write) &&
984 sptlrpc_flavor_has_bulk(&req->rq_flvr))
985 req->rq_pack_bulk = 1;
988 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
990 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
993 LASSERT(req->rq_clrbuf);
994 if (req->rq_pool || !req->rq_reqbuf)
997 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
998 req->rq_reqbuf = NULL;
999 req->rq_reqbuf_len = 0;
1003 * Given an import \a imp, check whether current user has a valid context
1004 * or not. We may create a new context and try to refresh it, and try
1005 * repeatedly try in case of non-fatal errors. Return 0 means success.
1007 int sptlrpc_import_check_ctx(struct obd_import *imp)
1009 struct ptlrpc_sec *sec;
1010 struct ptlrpc_cli_ctx *ctx;
1011 struct ptlrpc_request *req = NULL;
1018 sec = sptlrpc_import_sec_ref(imp);
1019 ctx = get_my_ctx(sec, false);
1020 sptlrpc_sec_put(sec);
1023 RETURN(PTR_ERR(ctx));
1027 if (cli_ctx_is_eternal(ctx) ||
1028 ctx->cc_ops->validate(ctx) == 0) {
1029 sptlrpc_cli_ctx_put(ctx, 1);
1033 if (cli_ctx_is_error(ctx)) {
1034 sptlrpc_cli_ctx_put(ctx, 1);
1038 req = ptlrpc_request_cache_alloc(GFP_NOFS);
1042 ptlrpc_cli_req_init(req);
1043 atomic_set(&req->rq_refcount, 10000);
1045 req->rq_import = imp;
1046 req->rq_flvr = sec->ps_flvr;
1047 req->rq_cli_ctx = ctx;
1049 rc = sptlrpc_req_refresh_ctx(req, MAX_SCHEDULE_TIMEOUT);
1050 LASSERT(list_empty(&req->rq_ctx_chain));
1051 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
1052 ptlrpc_request_cache_free(req);
1058 * Used by ptlrpc client, to perform the pre-defined security transformation
1059 * upon the request message of \a req. After this function called,
1060 * req->rq_reqmsg is still accessible as clear text.
1062 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
1064 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1070 LASSERT(ctx->cc_sec);
1071 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1074 * we wrap bulk request here because now we can be sure
1075 * the context is uptodate.
1078 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1083 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1084 case SPTLRPC_SVC_NULL:
1085 case SPTLRPC_SVC_AUTH:
1086 case SPTLRPC_SVC_INTG:
1087 LASSERT(ctx->cc_ops->sign);
1088 rc = ctx->cc_ops->sign(ctx, req);
1090 case SPTLRPC_SVC_PRIV:
1091 LASSERT(ctx->cc_ops->seal);
1092 rc = ctx->cc_ops->seal(ctx, req);
1099 LASSERT(req->rq_reqdata_len);
1100 LASSERT(req->rq_reqdata_len % 8 == 0);
1101 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1107 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1109 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1115 LASSERT(ctx->cc_sec);
1116 LASSERT(req->rq_repbuf);
1117 LASSERT(req->rq_repdata);
1118 LASSERT(req->rq_repmsg == NULL);
1120 req->rq_rep_swab_mask = 0;
1122 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1125 req_capsule_set_rep_swabbed(&req->rq_pill,
1126 MSG_PTLRPC_HEADER_OFF);
1130 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1134 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1135 CERROR("replied data length %d too small\n",
1136 req->rq_repdata_len);
1140 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1141 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1142 CERROR("reply policy %u doesn't match request policy %u\n",
1143 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1144 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1148 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1149 case SPTLRPC_SVC_NULL:
1150 case SPTLRPC_SVC_AUTH:
1151 case SPTLRPC_SVC_INTG:
1152 LASSERT(ctx->cc_ops->verify);
1153 rc = ctx->cc_ops->verify(ctx, req);
1155 case SPTLRPC_SVC_PRIV:
1156 LASSERT(ctx->cc_ops->unseal);
1157 rc = ctx->cc_ops->unseal(ctx, req);
1162 LASSERT(rc || req->rq_repmsg || req->rq_resend);
1164 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1166 req->rq_rep_swab_mask = 0;
1171 * Used by ptlrpc client, to perform security transformation upon the reply
1172 * message of \a req. After return successfully, req->rq_repmsg points to
1173 * the reply message in clear text.
1175 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1178 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1180 LASSERT(req->rq_repbuf);
1181 LASSERT(req->rq_repdata == NULL);
1182 LASSERT(req->rq_repmsg == NULL);
1183 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1185 if (req->rq_reply_off == 0 &&
1186 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1187 CERROR("real reply with offset 0\n");
1191 if (req->rq_reply_off % 8 != 0) {
1192 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1196 req->rq_repdata = (struct lustre_msg *)
1197 (req->rq_repbuf + req->rq_reply_off);
1198 req->rq_repdata_len = req->rq_nob_received;
1200 return do_cli_unwrap_reply(req);
1204 * Used by ptlrpc client, to perform security transformation upon the early
1205 * reply message of \a req. We expect the rq_reply_off is 0, and
1206 * rq_nob_received is the early reply size.
1208 * Because the receive buffer might be still posted, the reply data might be
1209 * changed at any time, no matter we're holding rq_lock or not. For this reason
1210 * we allocate a separate ptlrpc_request and reply buffer for early reply
1213 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1214 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1215 * \a *req_ret to release it.
1216 * \retval -ev error number, and \a req_ret will not be set.
1218 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1219 struct ptlrpc_request **req_ret)
1221 struct ptlrpc_request *early_req;
1223 int early_bufsz, early_size;
1228 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1229 if (early_req == NULL)
1232 ptlrpc_cli_req_init(early_req);
1234 early_size = req->rq_nob_received;
1235 early_bufsz = size_roundup_power2(early_size);
1236 OBD_ALLOC_LARGE(early_buf, early_bufsz);
1237 if (early_buf == NULL)
1238 GOTO(err_req, rc = -ENOMEM);
1240 /* sanity checkings and copy data out, do it inside spinlock */
1241 spin_lock(&req->rq_lock);
1243 if (req->rq_replied) {
1244 spin_unlock(&req->rq_lock);
1245 GOTO(err_buf, rc = -EALREADY);
1248 LASSERT(req->rq_repbuf);
1249 LASSERT(req->rq_repdata == NULL);
1250 LASSERT(req->rq_repmsg == NULL);
1252 if (req->rq_reply_off != 0) {
1253 CERROR("early reply with offset %u\n", req->rq_reply_off);
1254 spin_unlock(&req->rq_lock);
1255 GOTO(err_buf, rc = -EPROTO);
1258 if (req->rq_nob_received != early_size) {
1259 /* even another early arrived the size should be the same */
1260 CERROR("data size has changed from %u to %u\n",
1261 early_size, req->rq_nob_received);
1262 spin_unlock(&req->rq_lock);
1263 GOTO(err_buf, rc = -EINVAL);
1266 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1267 CERROR("early reply length %d too small\n",
1268 req->rq_nob_received);
1269 spin_unlock(&req->rq_lock);
1270 GOTO(err_buf, rc = -EALREADY);
1273 memcpy(early_buf, req->rq_repbuf, early_size);
1274 spin_unlock(&req->rq_lock);
1276 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1277 early_req->rq_flvr = req->rq_flvr;
1278 early_req->rq_repbuf = early_buf;
1279 early_req->rq_repbuf_len = early_bufsz;
1280 early_req->rq_repdata = (struct lustre_msg *) early_buf;
1281 early_req->rq_repdata_len = early_size;
1282 early_req->rq_early = 1;
1283 early_req->rq_reqmsg = req->rq_reqmsg;
1285 rc = do_cli_unwrap_reply(early_req);
1287 DEBUG_REQ(D_ADAPTTO, early_req,
1288 "unwrap early reply: rc = %d", rc);
1292 LASSERT(early_req->rq_repmsg);
1293 *req_ret = early_req;
1297 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1299 OBD_FREE_LARGE(early_buf, early_bufsz);
1301 ptlrpc_request_cache_free(early_req);
1306 * Used by ptlrpc client, to release a processed early reply \a early_req.
1308 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1310 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1312 LASSERT(early_req->rq_repbuf);
1313 LASSERT(early_req->rq_repdata);
1314 LASSERT(early_req->rq_repmsg);
1316 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1317 OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1318 ptlrpc_request_cache_free(early_req);
1321 /**************************************************
1323 **************************************************/
1326 * "fixed" sec (e.g. null) use sec_id < 0
1328 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1330 int sptlrpc_get_next_secid(void)
1332 return atomic_inc_return(&sptlrpc_sec_id);
1334 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1337 * client side high-level security APIs
1340 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1341 int grace, int force)
1343 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1345 LASSERT(policy->sp_cops);
1346 LASSERT(policy->sp_cops->flush_ctx_cache);
1348 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1351 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1353 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1354 struct sptlrpc_sepol *sepol;
1356 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1357 LASSERT(policy->sp_cops->destroy_sec);
1359 CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
1361 spin_lock(&sec->ps_lock);
1362 sec->ps_sepol_checknext = ktime_set(0, 0);
1363 sepol = rcu_dereference_protected(sec->ps_sepol, 1);
1364 rcu_assign_pointer(sec->ps_sepol, NULL);
1365 spin_unlock(&sec->ps_lock);
1367 sptlrpc_sepol_put(sepol);
1369 policy->sp_cops->destroy_sec(sec);
1370 sptlrpc_policy_put(policy);
1373 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1375 sec_cop_destroy_sec(sec);
1377 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1379 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1381 LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1383 if (sec->ps_policy->sp_cops->kill_sec) {
1384 sec->ps_policy->sp_cops->kill_sec(sec);
1386 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1390 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1393 atomic_inc(&sec->ps_refcount);
1397 EXPORT_SYMBOL(sptlrpc_sec_get);
1399 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1402 LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1404 if (atomic_dec_and_test(&sec->ps_refcount)) {
1405 sptlrpc_gc_del_sec(sec);
1406 sec_cop_destroy_sec(sec);
1410 EXPORT_SYMBOL(sptlrpc_sec_put);
1413 * policy module is responsible for taking refrence of import
1416 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1417 struct ptlrpc_svc_ctx *svc_ctx,
1418 struct sptlrpc_flavor *sf,
1419 enum lustre_sec_part sp)
1421 struct ptlrpc_sec_policy *policy;
1422 struct ptlrpc_sec *sec;
1428 LASSERT(imp->imp_dlm_fake == 1);
1430 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1431 imp->imp_obd->obd_type->typ_name,
1432 imp->imp_obd->obd_name,
1433 sptlrpc_flavor2name(sf, str, sizeof(str)));
1435 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1436 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1438 LASSERT(imp->imp_dlm_fake == 0);
1440 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1441 imp->imp_obd->obd_type->typ_name,
1442 imp->imp_obd->obd_name,
1443 sptlrpc_flavor2name(sf, str, sizeof(str)));
1445 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1447 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1452 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1454 atomic_inc(&sec->ps_refcount);
1458 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1459 sptlrpc_gc_add_sec(sec);
1461 sptlrpc_policy_put(policy);
1467 static int print_srpc_serverctx_seq(struct obd_export *exp, void *cb_data)
1469 struct seq_file *m = cb_data;
1470 struct obd_import *imp = exp->exp_imp_reverse;
1471 struct ptlrpc_sec *sec = NULL;
1474 sec = sptlrpc_import_sec_ref(imp);
1478 if (sec->ps_policy->sp_cops->display)
1479 sec->ps_policy->sp_cops->display(sec, m);
1481 sptlrpc_sec_put(sec);
1486 int lprocfs_srpc_serverctx_seq_show(struct seq_file *m, void *data)
1488 struct obd_device *obd = m->private;
1489 struct obd_export *exp, *n;
1491 spin_lock(&obd->obd_dev_lock);
1492 list_for_each_entry_safe(exp, n, &obd->obd_exports, exp_obd_chain) {
1493 print_srpc_serverctx_seq(exp, m);
1495 spin_unlock(&obd->obd_dev_lock);
1499 EXPORT_SYMBOL(lprocfs_srpc_serverctx_seq_show);
1501 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1503 struct ptlrpc_sec *sec;
1505 read_lock(&imp->imp_sec_lock);
1506 sec = sptlrpc_sec_get(imp->imp_sec);
1507 read_unlock(&imp->imp_sec_lock);
1511 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1513 static void sptlrpc_import_sec_install(struct obd_import *imp,
1514 struct ptlrpc_sec *sec)
1516 struct ptlrpc_sec *old_sec;
1518 LASSERT(atomic_read(&(sec)->ps_refcount) > 0);
1520 write_lock(&imp->imp_sec_lock);
1521 old_sec = imp->imp_sec;
1523 write_unlock(&imp->imp_sec_lock);
1526 sptlrpc_sec_kill(old_sec);
1528 /* balance the ref taken by this import */
1529 sptlrpc_sec_put(old_sec);
1534 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1536 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1540 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1546 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1547 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1549 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1550 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1552 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1553 struct ptlrpc_svc_ctx *svc_ctx,
1554 struct sptlrpc_flavor *flvr)
1556 struct ptlrpc_connection *conn;
1557 struct sptlrpc_flavor sf;
1558 struct ptlrpc_sec *sec, *newsec;
1559 enum lustre_sec_part sp;
1570 conn = imp->imp_connection;
1572 if (svc_ctx == NULL) {
1573 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1575 * normal import, determine flavor from rule set, except
1576 * for mgc the flavor is predetermined.
1578 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1579 sf = cliobd->cl_flvr_mgc;
1581 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1583 &cliobd->cl_target_uuid,
1584 &conn->c_self, &sf);
1586 sp = imp->imp_obd->u.cli.cl_sp_me;
1588 /* reverse import, determine flavor from incoming reqeust */
1591 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1592 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1593 PTLRPC_SEC_FL_ROOTONLY;
1595 sp = sptlrpc_target_sec_part(imp->imp_obd);
1598 sec = sptlrpc_import_sec_ref(imp);
1602 if (flavor_equal(&sf, &sec->ps_flvr))
1605 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1606 imp->imp_obd->obd_name,
1607 obd_uuid2str(&conn->c_remote_uuid),
1608 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1609 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1610 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1611 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1612 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1613 imp->imp_obd->obd_name,
1614 obd_uuid2str(&conn->c_remote_uuid),
1615 LNET_NID_NET(&conn->c_self),
1616 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1619 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1621 sptlrpc_import_sec_install(imp, newsec);
1623 CERROR("import %s->%s: failed to create new sec\n",
1624 imp->imp_obd->obd_name,
1625 obd_uuid2str(&conn->c_remote_uuid));
1630 sptlrpc_sec_put(sec);
1634 void sptlrpc_import_sec_put(struct obd_import *imp)
1637 sptlrpc_sec_kill(imp->imp_sec);
1639 sptlrpc_sec_put(imp->imp_sec);
1640 imp->imp_sec = NULL;
1644 static void import_flush_ctx_common(struct obd_import *imp,
1645 uid_t uid, int grace, int force)
1647 struct ptlrpc_sec *sec;
1652 sec = sptlrpc_import_sec_ref(imp);
1656 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1657 sptlrpc_sec_put(sec);
1660 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1663 * it's important to use grace mode, see explain in
1664 * sptlrpc_req_refresh_ctx()
1666 import_flush_ctx_common(imp, 0, 1, 1);
1669 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1671 import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1674 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1676 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1678 import_flush_ctx_common(imp, -1, 1, 1);
1680 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1683 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1684 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1686 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1688 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1689 struct ptlrpc_sec_policy *policy;
1693 LASSERT(ctx->cc_sec);
1694 LASSERT(ctx->cc_sec->ps_policy);
1695 LASSERT(req->rq_reqmsg == NULL);
1696 LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1698 policy = ctx->cc_sec->ps_policy;
1699 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1701 LASSERT(req->rq_reqmsg);
1702 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1704 /* zeroing preallocated buffer */
1706 memset(req->rq_reqmsg, 0, msgsize);
1713 * Used by ptlrpc client to free request buffer of \a req. After this
1714 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1716 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1718 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1719 struct ptlrpc_sec_policy *policy;
1722 LASSERT(ctx->cc_sec);
1723 LASSERT(ctx->cc_sec->ps_policy);
1724 LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1726 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1729 policy = ctx->cc_sec->ps_policy;
1730 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1731 req->rq_reqmsg = NULL;
1735 * NOTE caller must guarantee the buffer size is enough for the enlargement
1737 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1738 int segment, int newsize)
1741 int oldsize, oldmsg_size, movesize;
1743 LASSERT(segment < msg->lm_bufcount);
1744 LASSERT(msg->lm_buflens[segment] <= newsize);
1746 if (msg->lm_buflens[segment] == newsize)
1749 /* nothing to do if we are enlarging the last segment */
1750 if (segment == msg->lm_bufcount - 1) {
1751 msg->lm_buflens[segment] = newsize;
1755 oldsize = msg->lm_buflens[segment];
1757 src = lustre_msg_buf(msg, segment + 1, 0);
1758 msg->lm_buflens[segment] = newsize;
1759 dst = lustre_msg_buf(msg, segment + 1, 0);
1760 msg->lm_buflens[segment] = oldsize;
1762 /* move from segment + 1 to end segment */
1763 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1764 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1765 movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1766 LASSERT(movesize >= 0);
1769 memmove(dst, src, movesize);
1771 /* note we don't clear the ares where old data live, not secret */
1773 /* finally set new segment size */
1774 msg->lm_buflens[segment] = newsize;
1776 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1779 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1780 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1781 * preserved after the enlargement. this must be called after original request
1782 * buffer being allocated.
1784 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1785 * so caller should refresh its local pointers if needed.
1787 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1788 const struct req_msg_field *field,
1791 struct req_capsule *pill = &req->rq_pill;
1792 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1793 struct ptlrpc_sec_cops *cops;
1794 struct lustre_msg *msg = req->rq_reqmsg;
1795 int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1799 LASSERT(msg->lm_bufcount > segment);
1800 LASSERT(msg->lm_buflens[segment] <= newsize);
1802 if (msg->lm_buflens[segment] == newsize)
1805 cops = ctx->cc_sec->ps_policy->sp_cops;
1806 LASSERT(cops->enlarge_reqbuf);
1807 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1809 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1812 * Used by ptlrpc client to allocate reply buffer of \a req.
1814 * \note After this, req->rq_repmsg is still not accessible.
1816 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1818 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1819 struct ptlrpc_sec_policy *policy;
1824 LASSERT(ctx->cc_sec);
1825 LASSERT(ctx->cc_sec->ps_policy);
1830 policy = ctx->cc_sec->ps_policy;
1831 RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1835 * Used by ptlrpc client to free reply buffer of \a req. After this
1836 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1838 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1840 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1841 struct ptlrpc_sec_policy *policy;
1846 LASSERT(ctx->cc_sec);
1847 LASSERT(ctx->cc_sec->ps_policy);
1848 LASSERT(atomic_read(&(ctx)->cc_refcount) > 0);
1850 if (req->rq_repbuf == NULL)
1852 LASSERT(req->rq_repbuf_len);
1854 policy = ctx->cc_sec->ps_policy;
1855 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1856 req->rq_repmsg = NULL;
1859 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1861 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1862 struct ptlrpc_cli_ctx *ctx)
1864 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1866 if (!policy->sp_cops->install_rctx)
1868 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1871 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1872 struct ptlrpc_svc_ctx *ctx)
1874 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1876 if (!policy->sp_sops->install_rctx)
1878 return policy->sp_sops->install_rctx(imp, ctx);
1882 /* Get SELinux policy info from userspace */
1883 static int sepol_helper(struct obd_import *imp)
1885 char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1887 [0] = "/usr/sbin/l_getsepol",
1889 [2] = NULL, /* obd type */
1891 [4] = NULL, /* obd name */
1893 [6] = mtime_str, /* policy mtime */
1895 [8] = mode_str, /* enforcing mode */
1898 struct sptlrpc_sepol *sepol;
1901 [1] = "PATH=/sbin:/usr/sbin",
1907 if (imp == NULL || imp->imp_obd == NULL ||
1908 imp->imp_obd->obd_type == NULL)
1911 argv[2] = (char *)imp->imp_obd->obd_type->typ_name;
1912 argv[4] = imp->imp_obd->obd_name;
1915 sepol = rcu_dereference(imp->imp_sec->ps_sepol);
1917 /* ps_sepol has not been initialized */
1923 mtime_ms = ktime_to_ms(sepol->ssp_mtime);
1924 snprintf(mtime_str, sizeof(mtime_str), "%lld",
1925 mtime_ms / MSEC_PER_SEC);
1926 if (sepol->ssp_sepol_size > 1)
1927 mode_str[0] = sepol->ssp_sepol[0];
1931 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1937 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1941 if (send_sepol == 0)
1944 if (send_sepol == -1)
1945 /* send_sepol == -1 means fetch sepol status every time */
1948 spin_lock(&imp_sec->ps_lock);
1949 checknext = imp_sec->ps_sepol_checknext;
1950 spin_unlock(&imp_sec->ps_lock);
1952 /* next check is too far in time, please update */
1953 if (ktime_after(checknext,
1954 ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1957 if (ktime_before(ktime_get(), checknext))
1958 /* too early to fetch sepol status */
1962 /* define new sepol_checknext time */
1963 spin_lock(&imp_sec->ps_lock);
1964 imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1965 ktime_set(send_sepol, 0));
1966 spin_unlock(&imp_sec->ps_lock);
1971 static void sptlrpc_sepol_release(struct kref *ref)
1973 struct sptlrpc_sepol *p = container_of(ref, struct sptlrpc_sepol,
1975 kfree_rcu(p, ssp_rcu);
1978 void sptlrpc_sepol_put(struct sptlrpc_sepol *pol)
1982 kref_put(&pol->ssp_ref, sptlrpc_sepol_release);
1984 EXPORT_SYMBOL(sptlrpc_sepol_put);
1986 struct sptlrpc_sepol *sptlrpc_sepol_get_cached(struct ptlrpc_sec *imp_sec)
1988 struct sptlrpc_sepol *p;
1992 p = rcu_dereference(imp_sec->ps_sepol);
1993 if (p && !kref_get_unless_zero(&p->ssp_ref)) {
2001 EXPORT_SYMBOL(sptlrpc_sepol_get_cached);
2003 struct sptlrpc_sepol *sptlrpc_sepol_get(struct ptlrpc_request *req)
2005 struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
2006 struct sptlrpc_sepol *out;
2011 #ifndef HAVE_SELINUX
2012 if (unlikely(send_sepol != 0))
2014 "Client cannot report SELinux status, it was not built against libselinux.\n");
2018 if (send_sepol == 0)
2021 if (imp_sec == NULL)
2022 RETURN(ERR_PTR(-EINVAL));
2024 /* Retrieve SELinux status info */
2025 if (sptlrpc_sepol_needs_check(imp_sec))
2026 rc = sepol_helper(req->rq_import);
2028 if (unlikely(rc == -ENODEV)) {
2030 "Client cannot report SELinux status, SELinux is disabled.\n");
2034 RETURN(ERR_PTR(rc));
2036 out = sptlrpc_sepol_get_cached(imp_sec);
2038 RETURN(ERR_PTR(-ENODATA));
2042 EXPORT_SYMBOL(sptlrpc_sepol_get);
2045 * server side security
2048 static int flavor_allowed(struct sptlrpc_flavor *exp,
2049 struct ptlrpc_request *req)
2051 struct sptlrpc_flavor *flvr = &req->rq_flvr;
2053 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
2056 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
2057 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
2058 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
2059 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
2065 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
2068 * Given an export \a exp, check whether the flavor of incoming \a req
2069 * is allowed by the export \a exp. Main logic is about taking care of
2070 * changing configurations. Return 0 means success.
2072 int sptlrpc_target_export_check(struct obd_export *exp,
2073 struct ptlrpc_request *req)
2075 struct sptlrpc_flavor flavor;
2081 * client side export has no imp_reverse, skip
2082 * FIXME maybe we should check flavor this as well???
2084 if (exp->exp_imp_reverse == NULL)
2087 /* don't care about ctx fini rpc */
2088 if (req->rq_ctx_fini)
2091 spin_lock(&exp->exp_lock);
2094 * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
2095 * the first req with the new flavor, then treat it as current flavor,
2096 * adapt reverse sec according to it.
2097 * note the first rpc with new flavor might not be with root ctx, in
2098 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
2100 if (unlikely(exp->exp_flvr_changed) &&
2101 flavor_allowed(&exp->exp_flvr_old[1], req)) {
2103 * make the new flavor as "current", and old ones as
2106 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
2107 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
2108 flavor = exp->exp_flvr_old[1];
2109 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
2110 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
2111 exp->exp_flvr_old[0] = exp->exp_flvr;
2112 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
2113 EXP_FLVR_UPDATE_EXPIRE;
2114 exp->exp_flvr = flavor;
2116 /* flavor change finished */
2117 exp->exp_flvr_changed = 0;
2118 LASSERT(exp->exp_flvr_adapt == 1);
2120 /* if it's gss, we only interested in root ctx init */
2121 if (req->rq_auth_gss &&
2122 !(req->rq_ctx_init &&
2123 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
2124 req->rq_auth_usr_ost))) {
2125 spin_unlock(&exp->exp_lock);
2126 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
2127 req->rq_auth_gss, req->rq_ctx_init,
2128 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
2129 req->rq_auth_usr_ost);
2133 exp->exp_flvr_adapt = 0;
2134 spin_unlock(&exp->exp_lock);
2136 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2137 req->rq_svc_ctx, &flavor);
2141 * if it equals to the current flavor, we accept it, but need to
2142 * dealing with reverse sec/ctx
2144 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
2146 * most cases should return here, we only interested in
2149 if (!req->rq_auth_gss || !req->rq_ctx_init ||
2150 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2151 !req->rq_auth_usr_ost)) {
2152 spin_unlock(&exp->exp_lock);
2157 * if flavor just changed, we should not proceed, just leave
2158 * it and current flavor will be discovered and replaced
2159 * shortly, and let _this_ rpc pass through
2161 if (exp->exp_flvr_changed) {
2162 LASSERT(exp->exp_flvr_adapt);
2163 spin_unlock(&exp->exp_lock);
2167 if (exp->exp_flvr_adapt) {
2168 exp->exp_flvr_adapt = 0;
2169 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2170 exp, exp->exp_flvr.sf_rpc,
2171 exp->exp_flvr_old[0].sf_rpc,
2172 exp->exp_flvr_old[1].sf_rpc);
2173 flavor = exp->exp_flvr;
2174 spin_unlock(&exp->exp_lock);
2176 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2181 "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2182 exp, exp->exp_flvr.sf_rpc,
2183 exp->exp_flvr_old[0].sf_rpc,
2184 exp->exp_flvr_old[1].sf_rpc);
2185 spin_unlock(&exp->exp_lock);
2187 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2192 if (exp->exp_flvr_expire[0]) {
2193 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2194 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2196 "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2197 exp, exp->exp_flvr.sf_rpc,
2198 exp->exp_flvr_old[0].sf_rpc,
2199 exp->exp_flvr_old[1].sf_rpc,
2200 (s64)(exp->exp_flvr_expire[0] -
2201 ktime_get_real_seconds()));
2202 spin_unlock(&exp->exp_lock);
2206 CDEBUG(D_SEC, "mark middle expired\n");
2207 exp->exp_flvr_expire[0] = 0;
2209 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2210 exp->exp_flvr.sf_rpc,
2211 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2212 req->rq_flvr.sf_rpc);
2216 * now it doesn't match the current flavor, the only chance we can
2217 * accept it is match the old flavors which is not expired.
2219 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2220 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2221 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2222 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2224 exp->exp_flvr.sf_rpc,
2225 exp->exp_flvr_old[0].sf_rpc,
2226 exp->exp_flvr_old[1].sf_rpc,
2227 (s64)(exp->exp_flvr_expire[1] -
2228 ktime_get_real_seconds()));
2229 spin_unlock(&exp->exp_lock);
2233 CDEBUG(D_SEC, "mark oldest expired\n");
2234 exp->exp_flvr_expire[1] = 0;
2236 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2237 exp, exp->exp_flvr.sf_rpc,
2238 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2239 req->rq_flvr.sf_rpc);
2241 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2242 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2243 exp->exp_flvr_old[1].sf_rpc);
2246 spin_unlock(&exp->exp_lock);
2248 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2249 exp, exp->exp_obd->obd_name,
2250 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2251 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2252 req->rq_flvr.sf_rpc,
2253 exp->exp_flvr.sf_rpc,
2254 exp->exp_flvr_old[0].sf_rpc,
2255 exp->exp_flvr_expire[0] ?
2256 (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2257 exp->exp_flvr_old[1].sf_rpc,
2258 exp->exp_flvr_expire[1] ?
2259 (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2262 EXPORT_SYMBOL(sptlrpc_target_export_check);
2264 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2265 struct sptlrpc_rule_set *rset)
2267 struct obd_export *exp;
2268 struct sptlrpc_flavor new_flvr;
2272 spin_lock(&obd->obd_dev_lock);
2274 list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2275 if (exp->exp_connection == NULL)
2279 * note if this export had just been updated flavor
2280 * (exp_flvr_changed == 1), this will override the
2283 spin_lock(&exp->exp_lock);
2284 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2285 &exp->exp_connection->c_peer.nid,
2287 if (exp->exp_flvr_changed ||
2288 !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2289 exp->exp_flvr_old[1] = new_flvr;
2290 exp->exp_flvr_expire[1] = 0;
2291 exp->exp_flvr_changed = 1;
2292 exp->exp_flvr_adapt = 1;
2294 CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2295 exp, sptlrpc_part2name(exp->exp_sp_peer),
2296 exp->exp_flvr.sf_rpc,
2297 exp->exp_flvr_old[1].sf_rpc);
2299 spin_unlock(&exp->exp_lock);
2302 spin_unlock(&obd->obd_dev_lock);
2304 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2306 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2308 /* peer's claim is unreliable unless gss is being used */
2309 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2312 switch (req->rq_sp_from) {
2314 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2315 /* The below message is checked in sanity-sec test_33 */
2316 DEBUG_REQ(D_ERROR, req, "faked source CLI");
2317 svc_rc = SECSVC_DROP;
2321 if (!req->rq_auth_usr_mdt) {
2322 /* The below message is checked in sanity-sec test_33 */
2323 DEBUG_REQ(D_ERROR, req, "faked source MDT");
2324 svc_rc = SECSVC_DROP;
2328 if (!req->rq_auth_usr_ost) {
2329 /* The below message is checked in sanity-sec test_33 */
2330 DEBUG_REQ(D_ERROR, req, "faked source OST");
2331 svc_rc = SECSVC_DROP;
2336 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2337 !req->rq_auth_usr_ost) {
2338 /* The below message is checked in sanity-sec test_33 */
2339 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2340 svc_rc = SECSVC_DROP;
2345 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2346 svc_rc = SECSVC_DROP;
2353 * Used by ptlrpc server, to perform transformation upon request message of
2354 * incoming \a req. This must be the first thing to do with an incoming
2355 * request in ptlrpc layer.
2357 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2358 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2359 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2360 * reply message has been prepared.
2361 * \retval SECSVC_DROP failed, this request should be dropped.
2363 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2365 struct ptlrpc_sec_policy *policy;
2366 struct lustre_msg *msg = req->rq_reqbuf;
2372 LASSERT(req->rq_reqmsg == NULL);
2373 LASSERT(req->rq_repmsg == NULL);
2374 LASSERT(req->rq_svc_ctx == NULL);
2376 req->rq_req_swab_mask = 0;
2378 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2381 req_capsule_set_req_swabbed(&req->rq_pill,
2382 MSG_PTLRPC_HEADER_OFF);
2386 CERROR("error unpacking request from %s x%llu\n",
2387 libcfs_idstr(&req->rq_peer), req->rq_xid);
2388 RETURN(SECSVC_DROP);
2391 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2392 req->rq_sp_from = LUSTRE_SP_ANY;
2393 req->rq_auth_uid = -1; /* set to INVALID_UID */
2394 req->rq_auth_mapped_uid = -1;
2396 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2398 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2399 RETURN(SECSVC_DROP);
2402 LASSERT(policy->sp_sops->accept);
2403 rc = policy->sp_sops->accept(req);
2404 sptlrpc_policy_put(policy);
2405 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2406 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2409 * if it's not null flavor (which means embedded packing msg),
2410 * reset the swab mask for the comming inner msg unpacking.
2412 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2413 req->rq_req_swab_mask = 0;
2415 /* sanity check for the request source */
2416 rc = sptlrpc_svc_check_from(req, rc);
2421 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2422 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2423 * a buffer of \a msglen size.
2425 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2427 struct ptlrpc_sec_policy *policy;
2428 struct ptlrpc_reply_state *rs;
2433 LASSERT(req->rq_svc_ctx);
2434 LASSERT(req->rq_svc_ctx->sc_policy);
2436 policy = req->rq_svc_ctx->sc_policy;
2437 LASSERT(policy->sp_sops->alloc_rs);
2439 rc = policy->sp_sops->alloc_rs(req, msglen);
2440 if (unlikely(rc == -ENOMEM)) {
2441 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2443 if (svcpt->scp_service->srv_max_reply_size <
2444 msglen + sizeof(struct ptlrpc_reply_state)) {
2445 /* Just return failure if the size is too big */
2446 CERROR("size of message is too big (%zd), %d allowed\n",
2447 msglen + sizeof(struct ptlrpc_reply_state),
2448 svcpt->scp_service->srv_max_reply_size);
2452 /* failed alloc, try emergency pool */
2453 rs = lustre_get_emerg_rs(svcpt);
2457 req->rq_reply_state = rs;
2458 rc = policy->sp_sops->alloc_rs(req, msglen);
2460 lustre_put_emerg_rs(rs);
2461 req->rq_reply_state = NULL;
2466 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2472 * Used by ptlrpc server, to perform transformation upon reply message.
2474 * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2475 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2477 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2479 struct ptlrpc_sec_policy *policy;
2484 LASSERT(req->rq_svc_ctx);
2485 LASSERT(req->rq_svc_ctx->sc_policy);
2487 policy = req->rq_svc_ctx->sc_policy;
2488 LASSERT(policy->sp_sops->authorize);
2490 rc = policy->sp_sops->authorize(req);
2491 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2497 * Used by ptlrpc server, to free reply_state.
2499 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2501 struct ptlrpc_sec_policy *policy;
2502 unsigned int prealloc;
2506 LASSERT(rs->rs_svc_ctx);
2507 LASSERT(rs->rs_svc_ctx->sc_policy);
2509 policy = rs->rs_svc_ctx->sc_policy;
2510 LASSERT(policy->sp_sops->free_rs);
2512 prealloc = rs->rs_prealloc;
2513 policy->sp_sops->free_rs(rs);
2516 lustre_put_emerg_rs(rs);
2520 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2522 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2525 atomic_inc(&ctx->sc_refcount);
2528 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2530 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2535 LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2536 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2537 if (ctx->sc_policy->sp_sops->free_ctx)
2538 ctx->sc_policy->sp_sops->free_ctx(ctx);
2540 req->rq_svc_ctx = NULL;
2543 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2545 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2550 LASSERT(atomic_read(&(ctx)->sc_refcount) > 0);
2551 if (ctx->sc_policy->sp_sops->invalidate_ctx)
2552 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2554 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2561 * Perform transformation upon bulk data pointed by \a desc. This is called
2562 * before transforming the request message.
2564 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2565 struct ptlrpc_bulk_desc *desc)
2567 struct ptlrpc_cli_ctx *ctx;
2569 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2571 if (!req->rq_pack_bulk)
2574 ctx = req->rq_cli_ctx;
2575 if (ctx->cc_ops->wrap_bulk)
2576 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2579 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2582 * This is called after unwrap the reply message.
2583 * return nob of actual plain text size received, or error code.
2585 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2586 struct ptlrpc_bulk_desc *desc,
2589 struct ptlrpc_cli_ctx *ctx;
2592 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2594 if (!req->rq_pack_bulk)
2595 return desc->bd_nob_transferred;
2597 ctx = req->rq_cli_ctx;
2598 if (ctx->cc_ops->unwrap_bulk) {
2599 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2603 return desc->bd_nob_transferred;
2605 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2608 * This is called after unwrap the reply message.
2609 * return 0 for success or error code.
2611 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2612 struct ptlrpc_bulk_desc *desc)
2614 struct ptlrpc_cli_ctx *ctx;
2617 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2619 if (!req->rq_pack_bulk)
2622 ctx = req->rq_cli_ctx;
2623 if (ctx->cc_ops->unwrap_bulk) {
2624 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2630 * if everything is going right, nob should equals to nob_transferred.
2631 * in case of privacy mode, nob_transferred needs to be adjusted.
2633 if (desc->bd_nob != desc->bd_nob_transferred) {
2634 CERROR("nob %d doesn't match transferred nob %d\n",
2635 desc->bd_nob, desc->bd_nob_transferred);
2641 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2643 #ifdef HAVE_SERVER_SUPPORT
2645 * Performe transformation upon outgoing bulk read.
2647 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2648 struct ptlrpc_bulk_desc *desc)
2650 struct ptlrpc_svc_ctx *ctx;
2652 LASSERT(req->rq_bulk_read);
2654 if (!req->rq_pack_bulk)
2657 ctx = req->rq_svc_ctx;
2658 if (ctx->sc_policy->sp_sops->wrap_bulk)
2659 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2663 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2666 * Performe transformation upon incoming bulk write.
2668 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2669 struct ptlrpc_bulk_desc *desc)
2671 struct ptlrpc_svc_ctx *ctx;
2674 LASSERT(req->rq_bulk_write);
2677 * if it's in privacy mode, transferred should >= expected; otherwise
2678 * transferred should == expected.
2680 if (desc->bd_nob_transferred < desc->bd_nob ||
2681 (desc->bd_nob_transferred > desc->bd_nob &&
2682 SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2683 SPTLRPC_BULK_SVC_PRIV)) {
2684 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2685 desc->bd_nob_transferred, desc->bd_nob);
2689 if (!req->rq_pack_bulk)
2692 ctx = req->rq_svc_ctx;
2693 if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2694 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2696 CERROR("error unwrap bulk: %d\n", rc);
2699 /* return 0 to allow reply be sent */
2702 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2705 * Prepare buffers for incoming bulk write.
2707 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2708 struct ptlrpc_bulk_desc *desc)
2710 struct ptlrpc_svc_ctx *ctx;
2712 LASSERT(req->rq_bulk_write);
2714 if (!req->rq_pack_bulk)
2717 ctx = req->rq_svc_ctx;
2718 if (ctx->sc_policy->sp_sops->prep_bulk)
2719 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2723 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2725 #endif /* HAVE_SERVER_SUPPORT */
2728 * user descriptor helpers
2731 int sptlrpc_current_user_desc_size(void)
2735 ngroups = current_cred()->group_info->ngroups;
2737 if (ngroups > LUSTRE_MAX_GROUPS)
2738 ngroups = LUSTRE_MAX_GROUPS;
2739 return sptlrpc_user_desc_size(ngroups);
2741 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2743 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2745 struct ptlrpc_user_desc *pud;
2748 pud = lustre_msg_buf(msg, offset, 0);
2750 pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2751 pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2752 pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2753 pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2754 pud->pud_cap = ll_capability_u32(current_cap());
2755 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2758 ngroups = current_cred()->group_info->ngroups;
2759 if (pud->pud_ngroups > ngroups)
2760 pud->pud_ngroups = ngroups;
2761 #ifdef HAVE_GROUP_INFO_GID
2762 memcpy(pud->pud_groups, current_cred()->group_info->gid,
2763 pud->pud_ngroups * sizeof(__u32));
2764 #else /* !HAVE_GROUP_INFO_GID */
2765 memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2766 pud->pud_ngroups * sizeof(__u32));
2767 #endif /* HAVE_GROUP_INFO_GID */
2768 task_unlock(current);
2772 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2774 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2776 struct ptlrpc_user_desc *pud;
2779 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2784 __swab32s(&pud->pud_uid);
2785 __swab32s(&pud->pud_gid);
2786 __swab32s(&pud->pud_fsuid);
2787 __swab32s(&pud->pud_fsgid);
2788 __swab32s(&pud->pud_cap);
2789 __swab32s(&pud->pud_ngroups);
2792 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2793 CERROR("%u groups is too large\n", pud->pud_ngroups);
2797 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2798 msg->lm_buflens[offset]) {
2799 CERROR("%u groups are claimed but bufsize only %u\n",
2800 pud->pud_ngroups, msg->lm_buflens[offset]);
2805 for (i = 0; i < pud->pud_ngroups; i++)
2806 __swab32s(&pud->pud_groups[i]);
2811 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2817 const char *sec2target_str(struct ptlrpc_sec *sec)
2819 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2821 if (sec_is_reverse(sec))
2823 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2825 EXPORT_SYMBOL(sec2target_str);
2828 * return true if the bulk data is protected
2830 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2832 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2833 case SPTLRPC_BULK_SVC_INTG:
2834 case SPTLRPC_BULK_SVC_PRIV:
2840 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2843 static int cfs_hash_alg_id[] = {
2844 [BULK_HASH_ALG_NULL] = CFS_HASH_ALG_NULL,
2845 [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
2846 [BULK_HASH_ALG_CRC32] = CFS_HASH_ALG_CRC32,
2847 [BULK_HASH_ALG_MD5] = CFS_HASH_ALG_MD5,
2848 [BULK_HASH_ALG_SHA1] = CFS_HASH_ALG_SHA1,
2849 [BULK_HASH_ALG_SHA256] = CFS_HASH_ALG_SHA256,
2850 [BULK_HASH_ALG_SHA384] = CFS_HASH_ALG_SHA384,
2851 [BULK_HASH_ALG_SHA512] = CFS_HASH_ALG_SHA512,
2853 const char *sptlrpc_get_hash_name(__u8 hash_alg)
2855 return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
2858 __u8 sptlrpc_get_hash_alg(const char *algname)
2860 return cfs_crypto_hash_alg(algname);
2863 int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
2865 struct ptlrpc_bulk_sec_desc *bsd;
2866 int size = msg->lm_buflens[offset];
2868 bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
2870 CERROR("Invalid bulk sec desc: size %d\n", size);
2875 __swab32s(&bsd->bsd_nob);
2877 if (unlikely(bsd->bsd_version != 0)) {
2878 CERROR("Unexpected version %u\n", bsd->bsd_version);
2882 if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
2883 CERROR("Invalid type %u\n", bsd->bsd_type);
2887 /* FIXME more sanity check here */
2889 if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
2890 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
2891 bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
2892 CERROR("Invalid svc %u\n", bsd->bsd_svc);
2898 EXPORT_SYMBOL(bulk_sec_desc_unpack);
2901 * Compute the checksum of an RPC buffer payload. If the return \a buflen
2902 * is not large enough, truncate the result to fit so that it is possible
2903 * to use a hash function with a large hash space, but only use a part of
2904 * the resulting hash.
2906 int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
2907 void *buf, int buflen)
2909 struct ahash_request *req;
2911 unsigned int bufsize;
2914 LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
2915 LASSERT(buflen >= 4);
2917 req = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
2919 CERROR("Unable to initialize checksum hash %s\n",
2920 cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
2921 return PTR_ERR(req);
2924 hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
2926 for (i = 0; i < desc->bd_iov_count; i++) {
2927 cfs_crypto_hash_update_page(req,
2928 desc->bd_vec[i].bv_page,
2929 desc->bd_vec[i].bv_offset &
2931 desc->bd_vec[i].bv_len);
2934 if (hashsize > buflen) {
2935 unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
2937 bufsize = sizeof(hashbuf);
2938 LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
2940 err = cfs_crypto_hash_final(req, hashbuf, &bufsize);
2941 memcpy(buf, hashbuf, buflen);
2944 err = cfs_crypto_hash_final(req, buf, &bufsize);
2951 * crypto API helper/alloc blkciper
2955 * initialize/finalize
2958 int sptlrpc_init(void)
2962 rwlock_init(&policy_lock);
2964 rc = sptlrpc_gc_init();
2968 rc = sptlrpc_conf_init();
2972 rc = sptlrpc_null_init();
2976 rc = sptlrpc_plain_init();
2980 rc = sptlrpc_lproc_init();
2987 sptlrpc_plain_fini();
2989 sptlrpc_null_fini();
2991 sptlrpc_conf_fini();
2998 void sptlrpc_fini(void)
3000 sptlrpc_lproc_fini();
3001 sptlrpc_plain_fini();
3002 sptlrpc_null_fini();
3003 sptlrpc_conf_fini();