4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Eric Mei <ericm@clusterfs.com>
37 #define DEBUG_SUBSYSTEM S_SEC
39 #include <linux/user_namespace.h>
40 #ifdef HAVE_UIDGID_HEADER
41 # include <linux/uidgid.h>
43 #include <linux/crypto.h>
44 #include <linux/key.h>
46 #include <libcfs/libcfs.h>
48 #include <obd_class.h>
49 #include <obd_support.h>
50 #include <lustre_net.h>
51 #include <lustre_import.h>
52 #include <lustre_dlm.h>
53 #include <lustre_sec.h>
55 #include "ptlrpc_internal.h"
57 static int send_sepol;
58 module_param(send_sepol, int, 0644);
59 MODULE_PARM_DESC(send_sepol, "Client sends SELinux policy status");
65 static rwlock_t policy_lock;
66 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
70 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
72 __u16 number = policy->sp_policy;
74 LASSERT(policy->sp_name);
75 LASSERT(policy->sp_cops);
76 LASSERT(policy->sp_sops);
78 if (number >= SPTLRPC_POLICY_MAX)
81 write_lock(&policy_lock);
82 if (unlikely(policies[number])) {
83 write_unlock(&policy_lock);
86 policies[number] = policy;
87 write_unlock(&policy_lock);
89 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
92 EXPORT_SYMBOL(sptlrpc_register_policy);
94 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
96 __u16 number = policy->sp_policy;
98 LASSERT(number < SPTLRPC_POLICY_MAX);
100 write_lock(&policy_lock);
101 if (unlikely(policies[number] == NULL)) {
102 write_unlock(&policy_lock);
103 CERROR("%s: already unregistered\n", policy->sp_name);
107 LASSERT(policies[number] == policy);
108 policies[number] = NULL;
109 write_unlock(&policy_lock);
111 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
114 EXPORT_SYMBOL(sptlrpc_unregister_policy);
117 struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
119 static DEFINE_MUTEX(load_mutex);
120 static atomic_t loaded = ATOMIC_INIT(0);
121 struct ptlrpc_sec_policy *policy;
122 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
125 if (number >= SPTLRPC_POLICY_MAX)
129 read_lock(&policy_lock);
130 policy = policies[number];
131 if (policy && !try_module_get(policy->sp_owner))
134 flag = atomic_read(&loaded);
135 read_unlock(&policy_lock);
137 if (policy != NULL || flag != 0 ||
138 number != SPTLRPC_POLICY_GSS)
141 /* try to load gss module, once */
142 mutex_lock(&load_mutex);
143 if (atomic_read(&loaded) == 0) {
144 if (request_module("ptlrpc_gss") == 0)
146 "module ptlrpc_gss loaded on demand\n");
148 CERROR("Unable to load module ptlrpc_gss\n");
150 atomic_set(&loaded, 1);
152 mutex_unlock(&load_mutex);
158 __u32 sptlrpc_name2flavor_base(const char *name)
160 if (!strcmp(name, "null"))
161 return SPTLRPC_FLVR_NULL;
162 if (!strcmp(name, "plain"))
163 return SPTLRPC_FLVR_PLAIN;
164 if (!strcmp(name, "gssnull"))
165 return SPTLRPC_FLVR_GSSNULL;
166 if (!strcmp(name, "krb5n"))
167 return SPTLRPC_FLVR_KRB5N;
168 if (!strcmp(name, "krb5a"))
169 return SPTLRPC_FLVR_KRB5A;
170 if (!strcmp(name, "krb5i"))
171 return SPTLRPC_FLVR_KRB5I;
172 if (!strcmp(name, "krb5p"))
173 return SPTLRPC_FLVR_KRB5P;
174 if (!strcmp(name, "skn"))
175 return SPTLRPC_FLVR_SKN;
176 if (!strcmp(name, "ska"))
177 return SPTLRPC_FLVR_SKA;
178 if (!strcmp(name, "ski"))
179 return SPTLRPC_FLVR_SKI;
180 if (!strcmp(name, "skpi"))
181 return SPTLRPC_FLVR_SKPI;
183 return SPTLRPC_FLVR_INVALID;
185 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
187 const char *sptlrpc_flavor2name_base(__u32 flvr)
189 __u32 base = SPTLRPC_FLVR_BASE(flvr);
191 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
193 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
195 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
197 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
199 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
201 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
203 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
205 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKN))
207 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKA))
209 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
211 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
214 CERROR("invalid wire flavor 0x%x\n", flvr);
217 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
219 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
220 char *buf, int bufsize)
222 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
223 snprintf(buf, bufsize, "hash:%s",
224 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
226 snprintf(buf, bufsize, "%s",
227 sptlrpc_flavor2name_base(sf->sf_rpc));
229 buf[bufsize - 1] = '\0';
232 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
234 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
236 snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
239 * currently we don't support customized bulk specification for
240 * flavors other than plain
242 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
246 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
247 strncat(buf, bspec, bufsize);
250 buf[bufsize - 1] = '\0';
253 EXPORT_SYMBOL(sptlrpc_flavor2name);
255 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
259 if (flags & PTLRPC_SEC_FL_REVERSE)
260 strlcat(buf, "reverse,", bufsize);
261 if (flags & PTLRPC_SEC_FL_ROOTONLY)
262 strlcat(buf, "rootonly,", bufsize);
263 if (flags & PTLRPC_SEC_FL_UDESC)
264 strlcat(buf, "udesc,", bufsize);
265 if (flags & PTLRPC_SEC_FL_BULK)
266 strlcat(buf, "bulk,", bufsize);
268 strlcat(buf, "-,", bufsize);
272 EXPORT_SYMBOL(sptlrpc_secflags2str);
275 * client context APIs
279 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
281 struct vfs_cred vcred;
282 int create = 1, remove_dead = 1;
285 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
287 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
288 PTLRPC_SEC_FL_ROOTONLY)) {
291 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
296 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
297 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
300 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
304 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
306 atomic_inc(&ctx->cc_refcount);
309 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
311 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
313 struct ptlrpc_sec *sec = ctx->cc_sec;
316 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
318 if (!atomic_dec_and_test(&ctx->cc_refcount))
321 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
323 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
326 * Expire the client context immediately.
328 * \pre Caller must hold at least 1 reference on the \a ctx.
330 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
332 LASSERT(ctx->cc_ops->die);
333 ctx->cc_ops->die(ctx, 0);
335 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
338 * To wake up the threads who are waiting for this client context. Called
339 * after some status change happened on \a ctx.
341 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
343 struct ptlrpc_request *req, *next;
345 spin_lock(&ctx->cc_lock);
346 list_for_each_entry_safe(req, next, &ctx->cc_req_list,
348 list_del_init(&req->rq_ctx_chain);
349 ptlrpc_client_wake_req(req);
351 spin_unlock(&ctx->cc_lock);
353 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
355 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
357 LASSERT(ctx->cc_ops);
359 if (ctx->cc_ops->display == NULL)
362 return ctx->cc_ops->display(ctx, buf, bufsize);
365 static int import_sec_check_expire(struct obd_import *imp)
369 spin_lock(&imp->imp_lock);
370 if (imp->imp_sec_expire &&
371 imp->imp_sec_expire < ktime_get_real_seconds()) {
373 imp->imp_sec_expire = 0;
375 spin_unlock(&imp->imp_lock);
380 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
381 return sptlrpc_import_sec_adapt(imp, NULL, NULL);
385 * Get and validate the client side ptlrpc security facilities from
386 * \a imp. There is a race condition on client reconnect when the import is
387 * being destroyed while there are outstanding client bound requests. In
388 * this case do not output any error messages if import secuity is not
391 * \param[in] imp obd import associated with client
392 * \param[out] sec client side ptlrpc security
394 * \retval 0 if security retrieved successfully
395 * \retval -ve errno if there was a problem
397 static int import_sec_validate_get(struct obd_import *imp,
398 struct ptlrpc_sec **sec)
402 if (unlikely(imp->imp_sec_expire)) {
403 rc = import_sec_check_expire(imp);
408 *sec = sptlrpc_import_sec_ref(imp);
410 CERROR("import %p (%s) with no sec\n",
411 imp, ptlrpc_import_state_name(imp->imp_state));
415 if (unlikely((*sec)->ps_dying)) {
416 CERROR("attempt to use dying sec %p\n", sec);
417 sptlrpc_sec_put(*sec);
425 * Given a \a req, find or allocate an appropriate context for it.
426 * \pre req->rq_cli_ctx == NULL.
428 * \retval 0 succeed, and req->rq_cli_ctx is set.
429 * \retval -ev error number, and req->rq_cli_ctx == NULL.
431 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
433 struct obd_import *imp = req->rq_import;
434 struct ptlrpc_sec *sec;
439 LASSERT(!req->rq_cli_ctx);
442 rc = import_sec_validate_get(imp, &sec);
446 req->rq_cli_ctx = get_my_ctx(sec);
448 sptlrpc_sec_put(sec);
450 if (!req->rq_cli_ctx) {
451 CERROR("req %p: fail to get context\n", req);
452 RETURN(-ECONNREFUSED);
459 * Drop the context for \a req.
460 * \pre req->rq_cli_ctx != NULL.
461 * \post req->rq_cli_ctx == NULL.
463 * If \a sync == 0, this function should return quickly without sleep;
464 * otherwise it might trigger and wait for the whole process of sending
465 * an context-destroying rpc to server.
467 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
472 LASSERT(req->rq_cli_ctx);
475 * request might be asked to release earlier while still
476 * in the context waiting list.
478 if (!list_empty(&req->rq_ctx_chain)) {
479 spin_lock(&req->rq_cli_ctx->cc_lock);
480 list_del_init(&req->rq_ctx_chain);
481 spin_unlock(&req->rq_cli_ctx->cc_lock);
484 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
485 req->rq_cli_ctx = NULL;
490 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
491 struct ptlrpc_cli_ctx *oldctx,
492 struct ptlrpc_cli_ctx *newctx)
494 struct sptlrpc_flavor old_flvr;
495 char *reqmsg = NULL; /* to workaround old gcc */
499 LASSERT(req->rq_reqmsg);
500 LASSERT(req->rq_reqlen);
501 LASSERT(req->rq_replen);
504 "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
505 req, oldctx, oldctx->cc_vcred.vc_uid,
506 sec2target_str(oldctx->cc_sec), newctx, newctx->cc_vcred.vc_uid,
507 sec2target_str(newctx->cc_sec), oldctx->cc_sec,
508 oldctx->cc_sec->ps_policy->sp_name, newctx->cc_sec,
509 newctx->cc_sec->ps_policy->sp_name);
512 old_flvr = req->rq_flvr;
514 /* save request message */
515 reqmsg_size = req->rq_reqlen;
516 if (reqmsg_size != 0) {
517 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
520 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
523 /* release old req/rep buf */
524 req->rq_cli_ctx = oldctx;
525 sptlrpc_cli_free_reqbuf(req);
526 sptlrpc_cli_free_repbuf(req);
527 req->rq_cli_ctx = newctx;
529 /* recalculate the flavor */
530 sptlrpc_req_set_flavor(req, 0);
533 * alloc new request buffer
534 * we don't need to alloc reply buffer here, leave it to the
535 * rest procedure of ptlrpc
537 if (reqmsg_size != 0) {
538 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
540 LASSERT(req->rq_reqmsg);
541 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
543 CWARN("failed to alloc reqbuf: %d\n", rc);
544 req->rq_flvr = old_flvr;
547 OBD_FREE_LARGE(reqmsg, reqmsg_size);
553 * If current context of \a req is dead somehow, e.g. we just switched flavor
554 * thus marked original contexts dead, we'll find a new context for it. if
555 * no switch is needed, \a req will end up with the same context.
557 * \note a request must have a context, to keep other parts of code happy.
558 * In any case of failure during the switching, we must restore the old one.
560 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
562 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
563 struct ptlrpc_cli_ctx *newctx;
570 sptlrpc_cli_ctx_get(oldctx);
571 sptlrpc_req_put_ctx(req, 0);
573 rc = sptlrpc_req_get_ctx(req);
575 LASSERT(!req->rq_cli_ctx);
577 /* restore old ctx */
578 req->rq_cli_ctx = oldctx;
582 newctx = req->rq_cli_ctx;
585 if (unlikely(newctx == oldctx &&
586 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
588 * still get the old dead ctx, usually means system too busy
591 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
592 newctx, newctx->cc_flags);
594 set_current_state(TASK_INTERRUPTIBLE);
595 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
596 } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
599 * new ctx not up to date yet
602 "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
603 newctx, newctx->cc_flags);
606 * it's possible newctx == oldctx if we're switching
607 * subflavor with the same sec.
609 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
611 /* restore old ctx */
612 sptlrpc_req_put_ctx(req, 0);
613 req->rq_cli_ctx = oldctx;
617 LASSERT(req->rq_cli_ctx == newctx);
620 sptlrpc_cli_ctx_put(oldctx, 1);
623 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
626 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
628 if (cli_ctx_is_refreshed(ctx))
634 int ctx_refresh_timeout(void *data)
636 struct ptlrpc_request *req = data;
639 /* conn_cnt is needed in expire_one_request */
640 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
642 rc = ptlrpc_expire_one_request(req, 1);
644 * if we started recovery, we should mark this ctx dead; otherwise
645 * in case of lgssd died nobody would retire this ctx, following
646 * connecting will still find the same ctx thus cause deadlock.
647 * there's an assumption that expire time of the request should be
648 * later than the context refresh expire time.
651 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
656 void ctx_refresh_interrupt(void *data)
658 struct ptlrpc_request *req = data;
660 spin_lock(&req->rq_lock);
662 spin_unlock(&req->rq_lock);
666 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
668 spin_lock(&ctx->cc_lock);
669 if (!list_empty(&req->rq_ctx_chain))
670 list_del_init(&req->rq_ctx_chain);
671 spin_unlock(&ctx->cc_lock);
675 * To refresh the context of \req, if it's not up-to-date.
678 * - = 0: wait until success or fatal error occur
679 * - > 0: timeout value (in seconds)
681 * The status of the context could be subject to be changed by other threads
682 * at any time. We allow this race, but once we return with 0, the caller will
683 * suppose it's uptodated and keep using it until the owning rpc is done.
685 * \retval 0 only if the context is uptodated.
686 * \retval -ev error number.
688 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
690 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
691 struct ptlrpc_sec *sec;
692 struct l_wait_info lwi;
699 if (req->rq_ctx_init || req->rq_ctx_fini)
703 * during the process a request's context might change type even
704 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
708 rc = import_sec_validate_get(req->rq_import, &sec);
712 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
713 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
714 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
715 req_off_ctx_list(req, ctx);
716 sptlrpc_req_replace_dead_ctx(req);
717 ctx = req->rq_cli_ctx;
719 sptlrpc_sec_put(sec);
721 if (cli_ctx_is_eternal(ctx))
724 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
725 LASSERT(ctx->cc_ops->refresh);
726 ctx->cc_ops->refresh(ctx);
728 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
730 LASSERT(ctx->cc_ops->validate);
731 if (ctx->cc_ops->validate(ctx) == 0) {
732 req_off_ctx_list(req, ctx);
736 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
737 spin_lock(&req->rq_lock);
739 spin_unlock(&req->rq_lock);
740 req_off_ctx_list(req, ctx);
745 * There's a subtle issue for resending RPCs, suppose following
747 * 1. the request was sent to server.
748 * 2. recovery was kicked start, after finished the request was
750 * 3. resend the request.
751 * 4. old reply from server received, we accept and verify the reply.
752 * this has to be success, otherwise the error will be aware
754 * 5. new reply from server received, dropped by LNet.
756 * Note the xid of old & new request is the same. We can't simply
757 * change xid for the resent request because the server replies on
758 * it for reply reconstruction.
760 * Commonly the original context should be uptodate because we
761 * have an expiry nice time; server will keep its context because
762 * we at least hold a ref of old context which prevent context
763 * from destroying RPC being sent. So server still can accept the
764 * request and finish the RPC. But if that's not the case:
765 * 1. If server side context has been trimmed, a NO_CONTEXT will
766 * be returned, gss_cli_ctx_verify/unseal will switch to new
768 * 2. Current context never be refreshed, then we are fine: we
769 * never really send request with old context before.
771 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
772 unlikely(req->rq_reqmsg) &&
773 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
774 req_off_ctx_list(req, ctx);
778 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
779 req_off_ctx_list(req, ctx);
781 * don't switch ctx if import was deactivated
783 if (req->rq_import->imp_deactive) {
784 spin_lock(&req->rq_lock);
786 spin_unlock(&req->rq_lock);
790 rc = sptlrpc_req_replace_dead_ctx(req);
792 LASSERT(ctx == req->rq_cli_ctx);
793 CERROR("req %p: failed to replace dead ctx %p: %d\n",
795 spin_lock(&req->rq_lock);
797 spin_unlock(&req->rq_lock);
801 ctx = req->rq_cli_ctx;
806 * Now we're sure this context is during upcall, add myself into
809 spin_lock(&ctx->cc_lock);
810 if (list_empty(&req->rq_ctx_chain))
811 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
812 spin_unlock(&ctx->cc_lock);
815 RETURN(-EWOULDBLOCK);
817 /* Clear any flags that may be present from previous sends */
818 LASSERT(req->rq_receiving_reply == 0);
819 spin_lock(&req->rq_lock);
821 req->rq_timedout = 0;
824 spin_unlock(&req->rq_lock);
826 lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
828 ctx_refresh_interrupt, req);
829 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
832 * following cases could lead us here:
833 * - successfully refreshed;
835 * - timedout, and we don't want recover from the failure;
836 * - timedout, and waked up upon recovery finished;
837 * - someone else mark this ctx dead by force;
838 * - someone invalidate the req and call ptlrpc_client_wake_req(),
839 * e.g. ptlrpc_abort_inflight();
841 if (!cli_ctx_is_refreshed(ctx)) {
842 /* timed out or interruptted */
843 req_off_ctx_list(req, ctx);
853 * Initialize flavor settings for \a req, according to \a opcode.
855 * \note this could be called in two situations:
856 * - new request from ptlrpc_pre_req(), with proper @opcode
857 * - old request which changed ctx in the middle, with @opcode == 0
859 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
861 struct ptlrpc_sec *sec;
863 LASSERT(req->rq_import);
864 LASSERT(req->rq_cli_ctx);
865 LASSERT(req->rq_cli_ctx->cc_sec);
866 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
868 /* special security flags according to opcode */
872 case MGS_CONFIG_READ:
874 req->rq_bulk_read = 1;
878 req->rq_bulk_write = 1;
881 req->rq_ctx_init = 1;
884 req->rq_ctx_fini = 1;
887 /* init/fini rpc won't be resend, so can't be here */
888 LASSERT(req->rq_ctx_init == 0);
889 LASSERT(req->rq_ctx_fini == 0);
891 /* cleanup flags, which should be recalculated */
892 req->rq_pack_udesc = 0;
893 req->rq_pack_bulk = 0;
897 sec = req->rq_cli_ctx->cc_sec;
899 spin_lock(&sec->ps_lock);
900 req->rq_flvr = sec->ps_flvr;
901 spin_unlock(&sec->ps_lock);
904 * force SVC_NULL for context initiation rpc, SVC_INTG for context
907 if (unlikely(req->rq_ctx_init))
908 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
909 else if (unlikely(req->rq_ctx_fini))
910 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
912 /* user descriptor flag, null security can't do it anyway */
913 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
914 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
915 req->rq_pack_udesc = 1;
917 /* bulk security flag */
918 if ((req->rq_bulk_read || req->rq_bulk_write) &&
919 sptlrpc_flavor_has_bulk(&req->rq_flvr))
920 req->rq_pack_bulk = 1;
923 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
925 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
928 LASSERT(req->rq_clrbuf);
929 if (req->rq_pool || !req->rq_reqbuf)
932 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
933 req->rq_reqbuf = NULL;
934 req->rq_reqbuf_len = 0;
938 * Given an import \a imp, check whether current user has a valid context
939 * or not. We may create a new context and try to refresh it, and try
940 * repeatedly try in case of non-fatal errors. Return 0 means success.
942 int sptlrpc_import_check_ctx(struct obd_import *imp)
944 struct ptlrpc_sec *sec;
945 struct ptlrpc_cli_ctx *ctx;
946 struct ptlrpc_request *req = NULL;
953 sec = sptlrpc_import_sec_ref(imp);
954 ctx = get_my_ctx(sec);
955 sptlrpc_sec_put(sec);
960 if (cli_ctx_is_eternal(ctx) ||
961 ctx->cc_ops->validate(ctx) == 0) {
962 sptlrpc_cli_ctx_put(ctx, 1);
966 if (cli_ctx_is_error(ctx)) {
967 sptlrpc_cli_ctx_put(ctx, 1);
971 req = ptlrpc_request_cache_alloc(GFP_NOFS);
975 ptlrpc_cli_req_init(req);
976 atomic_set(&req->rq_refcount, 10000);
978 req->rq_import = imp;
979 req->rq_flvr = sec->ps_flvr;
980 req->rq_cli_ctx = ctx;
982 rc = sptlrpc_req_refresh_ctx(req, 0);
983 LASSERT(list_empty(&req->rq_ctx_chain));
984 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
985 ptlrpc_request_cache_free(req);
991 * Used by ptlrpc client, to perform the pre-defined security transformation
992 * upon the request message of \a req. After this function called,
993 * req->rq_reqmsg is still accessible as clear text.
995 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
997 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1003 LASSERT(ctx->cc_sec);
1004 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1007 * we wrap bulk request here because now we can be sure
1008 * the context is uptodate.
1011 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
1016 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1017 case SPTLRPC_SVC_NULL:
1018 case SPTLRPC_SVC_AUTH:
1019 case SPTLRPC_SVC_INTG:
1020 LASSERT(ctx->cc_ops->sign);
1021 rc = ctx->cc_ops->sign(ctx, req);
1023 case SPTLRPC_SVC_PRIV:
1024 LASSERT(ctx->cc_ops->seal);
1025 rc = ctx->cc_ops->seal(ctx, req);
1032 LASSERT(req->rq_reqdata_len);
1033 LASSERT(req->rq_reqdata_len % 8 == 0);
1034 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1040 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1042 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1048 LASSERT(ctx->cc_sec);
1049 LASSERT(req->rq_repbuf);
1050 LASSERT(req->rq_repdata);
1051 LASSERT(req->rq_repmsg == NULL);
1053 req->rq_rep_swab_mask = 0;
1055 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1058 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1062 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
1066 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1067 CERROR("replied data length %d too small\n",
1068 req->rq_repdata_len);
1072 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1073 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1074 CERROR("reply policy %u doesn't match request policy %u\n",
1075 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1076 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1080 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1081 case SPTLRPC_SVC_NULL:
1082 case SPTLRPC_SVC_AUTH:
1083 case SPTLRPC_SVC_INTG:
1084 LASSERT(ctx->cc_ops->verify);
1085 rc = ctx->cc_ops->verify(ctx, req);
1087 case SPTLRPC_SVC_PRIV:
1088 LASSERT(ctx->cc_ops->unseal);
1089 rc = ctx->cc_ops->unseal(ctx, req);
1094 LASSERT(rc || req->rq_repmsg || req->rq_resend);
1096 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1098 req->rq_rep_swab_mask = 0;
1103 * Used by ptlrpc client, to perform security transformation upon the reply
1104 * message of \a req. After return successfully, req->rq_repmsg points to
1105 * the reply message in clear text.
1107 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1110 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1112 LASSERT(req->rq_repbuf);
1113 LASSERT(req->rq_repdata == NULL);
1114 LASSERT(req->rq_repmsg == NULL);
1115 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1117 if (req->rq_reply_off == 0 &&
1118 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1119 CERROR("real reply with offset 0\n");
1123 if (req->rq_reply_off % 8 != 0) {
1124 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1128 req->rq_repdata = (struct lustre_msg *)
1129 (req->rq_repbuf + req->rq_reply_off);
1130 req->rq_repdata_len = req->rq_nob_received;
1132 return do_cli_unwrap_reply(req);
1136 * Used by ptlrpc client, to perform security transformation upon the early
1137 * reply message of \a req. We expect the rq_reply_off is 0, and
1138 * rq_nob_received is the early reply size.
1140 * Because the receive buffer might be still posted, the reply data might be
1141 * changed at any time, no matter we're holding rq_lock or not. For this reason
1142 * we allocate a separate ptlrpc_request and reply buffer for early reply
1145 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1146 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1147 * \a *req_ret to release it.
1148 * \retval -ev error number, and \a req_ret will not be set.
1150 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1151 struct ptlrpc_request **req_ret)
1153 struct ptlrpc_request *early_req;
1155 int early_bufsz, early_size;
1160 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1161 if (early_req == NULL)
1164 ptlrpc_cli_req_init(early_req);
1166 early_size = req->rq_nob_received;
1167 early_bufsz = size_roundup_power2(early_size);
1168 OBD_ALLOC_LARGE(early_buf, early_bufsz);
1169 if (early_buf == NULL)
1170 GOTO(err_req, rc = -ENOMEM);
1172 /* sanity checkings and copy data out, do it inside spinlock */
1173 spin_lock(&req->rq_lock);
1175 if (req->rq_replied) {
1176 spin_unlock(&req->rq_lock);
1177 GOTO(err_buf, rc = -EALREADY);
1180 LASSERT(req->rq_repbuf);
1181 LASSERT(req->rq_repdata == NULL);
1182 LASSERT(req->rq_repmsg == NULL);
1184 if (req->rq_reply_off != 0) {
1185 CERROR("early reply with offset %u\n", req->rq_reply_off);
1186 spin_unlock(&req->rq_lock);
1187 GOTO(err_buf, rc = -EPROTO);
1190 if (req->rq_nob_received != early_size) {
1191 /* even another early arrived the size should be the same */
1192 CERROR("data size has changed from %u to %u\n",
1193 early_size, req->rq_nob_received);
1194 spin_unlock(&req->rq_lock);
1195 GOTO(err_buf, rc = -EINVAL);
1198 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1199 CERROR("early reply length %d too small\n",
1200 req->rq_nob_received);
1201 spin_unlock(&req->rq_lock);
1202 GOTO(err_buf, rc = -EALREADY);
1205 memcpy(early_buf, req->rq_repbuf, early_size);
1206 spin_unlock(&req->rq_lock);
1208 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1209 early_req->rq_flvr = req->rq_flvr;
1210 early_req->rq_repbuf = early_buf;
1211 early_req->rq_repbuf_len = early_bufsz;
1212 early_req->rq_repdata = (struct lustre_msg *) early_buf;
1213 early_req->rq_repdata_len = early_size;
1214 early_req->rq_early = 1;
1215 early_req->rq_reqmsg = req->rq_reqmsg;
1217 rc = do_cli_unwrap_reply(early_req);
1219 DEBUG_REQ(D_ADAPTTO, early_req,
1220 "error %d unwrap early reply", rc);
1224 LASSERT(early_req->rq_repmsg);
1225 *req_ret = early_req;
1229 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1231 OBD_FREE_LARGE(early_buf, early_bufsz);
1233 ptlrpc_request_cache_free(early_req);
1238 * Used by ptlrpc client, to release a processed early reply \a early_req.
1240 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1242 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1244 LASSERT(early_req->rq_repbuf);
1245 LASSERT(early_req->rq_repdata);
1246 LASSERT(early_req->rq_repmsg);
1248 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1249 OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1250 ptlrpc_request_cache_free(early_req);
1253 /**************************************************
1255 **************************************************/
1258 * "fixed" sec (e.g. null) use sec_id < 0
1260 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1262 int sptlrpc_get_next_secid(void)
1264 return atomic_inc_return(&sptlrpc_sec_id);
1266 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1269 * client side high-level security APIs
1272 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1273 int grace, int force)
1275 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1277 LASSERT(policy->sp_cops);
1278 LASSERT(policy->sp_cops->flush_ctx_cache);
1280 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1283 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1285 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1287 LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1288 LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1289 LASSERT(policy->sp_cops->destroy_sec);
1291 CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1293 policy->sp_cops->destroy_sec(sec);
1294 sptlrpc_policy_put(policy);
1297 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1299 sec_cop_destroy_sec(sec);
1301 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1303 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1305 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1307 if (sec->ps_policy->sp_cops->kill_sec) {
1308 sec->ps_policy->sp_cops->kill_sec(sec);
1310 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1314 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1317 atomic_inc(&sec->ps_refcount);
1321 EXPORT_SYMBOL(sptlrpc_sec_get);
1323 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1326 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1328 if (atomic_dec_and_test(&sec->ps_refcount)) {
1329 sptlrpc_gc_del_sec(sec);
1330 sec_cop_destroy_sec(sec);
1334 EXPORT_SYMBOL(sptlrpc_sec_put);
1337 * policy module is responsible for taking refrence of import
1340 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1341 struct ptlrpc_svc_ctx *svc_ctx,
1342 struct sptlrpc_flavor *sf,
1343 enum lustre_sec_part sp)
1345 struct ptlrpc_sec_policy *policy;
1346 struct ptlrpc_sec *sec;
1352 LASSERT(imp->imp_dlm_fake == 1);
1354 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1355 imp->imp_obd->obd_type->typ_name,
1356 imp->imp_obd->obd_name,
1357 sptlrpc_flavor2name(sf, str, sizeof(str)));
1359 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1360 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1362 LASSERT(imp->imp_dlm_fake == 0);
1364 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1365 imp->imp_obd->obd_type->typ_name,
1366 imp->imp_obd->obd_name,
1367 sptlrpc_flavor2name(sf, str, sizeof(str)));
1369 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1371 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1376 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1378 atomic_inc(&sec->ps_refcount);
1382 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1383 sptlrpc_gc_add_sec(sec);
1385 sptlrpc_policy_put(policy);
1391 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1393 struct ptlrpc_sec *sec;
1395 spin_lock(&imp->imp_lock);
1396 sec = sptlrpc_sec_get(imp->imp_sec);
1397 spin_unlock(&imp->imp_lock);
1401 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1403 static void sptlrpc_import_sec_install(struct obd_import *imp,
1404 struct ptlrpc_sec *sec)
1406 struct ptlrpc_sec *old_sec;
1408 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1410 spin_lock(&imp->imp_lock);
1411 old_sec = imp->imp_sec;
1413 spin_unlock(&imp->imp_lock);
1416 sptlrpc_sec_kill(old_sec);
1418 /* balance the ref taken by this import */
1419 sptlrpc_sec_put(old_sec);
1424 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1426 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1430 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1436 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1437 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1439 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1440 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1442 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1443 struct ptlrpc_svc_ctx *svc_ctx,
1444 struct sptlrpc_flavor *flvr)
1446 struct ptlrpc_connection *conn;
1447 struct sptlrpc_flavor sf;
1448 struct ptlrpc_sec *sec, *newsec;
1449 enum lustre_sec_part sp;
1460 conn = imp->imp_connection;
1462 if (svc_ctx == NULL) {
1463 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1465 * normal import, determine flavor from rule set, except
1466 * for mgc the flavor is predetermined.
1468 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1469 sf = cliobd->cl_flvr_mgc;
1471 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1473 &cliobd->cl_target_uuid,
1476 sp = imp->imp_obd->u.cli.cl_sp_me;
1478 /* reverse import, determine flavor from incoming reqeust */
1481 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1482 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1483 PTLRPC_SEC_FL_ROOTONLY;
1485 sp = sptlrpc_target_sec_part(imp->imp_obd);
1488 sec = sptlrpc_import_sec_ref(imp);
1492 if (flavor_equal(&sf, &sec->ps_flvr))
1495 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1496 imp->imp_obd->obd_name,
1497 obd_uuid2str(&conn->c_remote_uuid),
1498 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1499 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1500 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1501 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1502 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1503 imp->imp_obd->obd_name,
1504 obd_uuid2str(&conn->c_remote_uuid),
1505 LNET_NIDNET(conn->c_self),
1506 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1509 mutex_lock(&imp->imp_sec_mutex);
1511 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1513 sptlrpc_import_sec_install(imp, newsec);
1515 CERROR("import %s->%s: failed to create new sec\n",
1516 imp->imp_obd->obd_name,
1517 obd_uuid2str(&conn->c_remote_uuid));
1521 mutex_unlock(&imp->imp_sec_mutex);
1523 sptlrpc_sec_put(sec);
1527 void sptlrpc_import_sec_put(struct obd_import *imp)
1530 sptlrpc_sec_kill(imp->imp_sec);
1532 sptlrpc_sec_put(imp->imp_sec);
1533 imp->imp_sec = NULL;
1537 static void import_flush_ctx_common(struct obd_import *imp,
1538 uid_t uid, int grace, int force)
1540 struct ptlrpc_sec *sec;
1545 sec = sptlrpc_import_sec_ref(imp);
1549 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1550 sptlrpc_sec_put(sec);
1553 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1556 * it's important to use grace mode, see explain in
1557 * sptlrpc_req_refresh_ctx()
1559 import_flush_ctx_common(imp, 0, 1, 1);
1562 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1564 import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1567 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1569 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1571 import_flush_ctx_common(imp, -1, 1, 1);
1573 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1576 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1577 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1579 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1581 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1582 struct ptlrpc_sec_policy *policy;
1586 LASSERT(ctx->cc_sec);
1587 LASSERT(ctx->cc_sec->ps_policy);
1588 LASSERT(req->rq_reqmsg == NULL);
1589 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1591 policy = ctx->cc_sec->ps_policy;
1592 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1594 LASSERT(req->rq_reqmsg);
1595 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1597 /* zeroing preallocated buffer */
1599 memset(req->rq_reqmsg, 0, msgsize);
1606 * Used by ptlrpc client to free request buffer of \a req. After this
1607 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1609 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1611 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1612 struct ptlrpc_sec_policy *policy;
1615 LASSERT(ctx->cc_sec);
1616 LASSERT(ctx->cc_sec->ps_policy);
1617 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1619 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1622 policy = ctx->cc_sec->ps_policy;
1623 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1624 req->rq_reqmsg = NULL;
1628 * NOTE caller must guarantee the buffer size is enough for the enlargement
1630 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1631 int segment, int newsize)
1634 int oldsize, oldmsg_size, movesize;
1636 LASSERT(segment < msg->lm_bufcount);
1637 LASSERT(msg->lm_buflens[segment] <= newsize);
1639 if (msg->lm_buflens[segment] == newsize)
1642 /* nothing to do if we are enlarging the last segment */
1643 if (segment == msg->lm_bufcount - 1) {
1644 msg->lm_buflens[segment] = newsize;
1648 oldsize = msg->lm_buflens[segment];
1650 src = lustre_msg_buf(msg, segment + 1, 0);
1651 msg->lm_buflens[segment] = newsize;
1652 dst = lustre_msg_buf(msg, segment + 1, 0);
1653 msg->lm_buflens[segment] = oldsize;
1655 /* move from segment + 1 to end segment */
1656 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1657 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1658 movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1659 LASSERT(movesize >= 0);
1662 memmove(dst, src, movesize);
1664 /* note we don't clear the ares where old data live, not secret */
1666 /* finally set new segment size */
1667 msg->lm_buflens[segment] = newsize;
1669 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1672 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1673 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1674 * preserved after the enlargement. this must be called after original request
1675 * buffer being allocated.
1677 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1678 * so caller should refresh its local pointers if needed.
1680 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1681 const struct req_msg_field *field,
1684 struct req_capsule *pill = &req->rq_pill;
1685 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1686 struct ptlrpc_sec_cops *cops;
1687 struct lustre_msg *msg = req->rq_reqmsg;
1688 int segment = __req_capsule_offset(pill, field, RCL_CLIENT);
1692 LASSERT(msg->lm_bufcount > segment);
1693 LASSERT(msg->lm_buflens[segment] <= newsize);
1695 if (msg->lm_buflens[segment] == newsize)
1698 cops = ctx->cc_sec->ps_policy->sp_cops;
1699 LASSERT(cops->enlarge_reqbuf);
1700 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1702 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1705 * Used by ptlrpc client to allocate reply buffer of \a req.
1707 * \note After this, req->rq_repmsg is still not accessible.
1709 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1711 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1712 struct ptlrpc_sec_policy *policy;
1717 LASSERT(ctx->cc_sec);
1718 LASSERT(ctx->cc_sec->ps_policy);
1723 policy = ctx->cc_sec->ps_policy;
1724 RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1728 * Used by ptlrpc client to free reply buffer of \a req. After this
1729 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1731 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1733 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1734 struct ptlrpc_sec_policy *policy;
1739 LASSERT(ctx->cc_sec);
1740 LASSERT(ctx->cc_sec->ps_policy);
1741 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1743 if (req->rq_repbuf == NULL)
1745 LASSERT(req->rq_repbuf_len);
1747 policy = ctx->cc_sec->ps_policy;
1748 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1749 req->rq_repmsg = NULL;
1752 EXPORT_SYMBOL(sptlrpc_cli_free_repbuf);
1754 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1755 struct ptlrpc_cli_ctx *ctx)
1757 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1759 if (!policy->sp_cops->install_rctx)
1761 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1764 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1765 struct ptlrpc_svc_ctx *ctx)
1767 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1769 if (!policy->sp_sops->install_rctx)
1771 return policy->sp_sops->install_rctx(imp, ctx);
1774 /* Get SELinux policy info from userspace */
1775 static int sepol_helper(struct obd_import *imp)
1777 char mtime_str[21] = { 0 }, mode_str[2] = { 0 };
1779 [0] = "/usr/sbin/l_getsepol",
1781 [2] = NULL, /* obd type */
1783 [4] = NULL, /* obd name */
1785 [6] = mtime_str, /* policy mtime */
1787 [8] = mode_str, /* enforcing mode */
1792 [1] = "PATH=/sbin:/usr/sbin",
1798 if (imp == NULL || imp->imp_obd == NULL ||
1799 imp->imp_obd->obd_type == NULL) {
1802 argv[2] = imp->imp_obd->obd_type->typ_name;
1803 argv[4] = imp->imp_obd->obd_name;
1804 spin_lock(&imp->imp_sec->ps_lock);
1805 if (imp->imp_sec->ps_sepol_mtime == 0 &&
1806 imp->imp_sec->ps_sepol[0] == '\0') {
1807 /* ps_sepol has not been initialized */
1811 snprintf(mtime_str, sizeof(mtime_str), "%lu",
1812 imp->imp_sec->ps_sepol_mtime);
1813 mode_str[0] = imp->imp_sec->ps_sepol[0];
1815 spin_unlock(&imp->imp_sec->ps_lock);
1816 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
1823 static inline int sptlrpc_sepol_needs_check(struct ptlrpc_sec *imp_sec)
1827 if (send_sepol == 0 || !selinux_is_enabled())
1830 if (send_sepol == -1)
1831 /* send_sepol == -1 means fetch sepol status every time */
1834 spin_lock(&imp_sec->ps_lock);
1835 checknext = imp_sec->ps_sepol_checknext;
1836 spin_unlock(&imp_sec->ps_lock);
1838 /* next check is too far in time, please update */
1839 if (ktime_after(checknext,
1840 ktime_add(ktime_get(), ktime_set(send_sepol, 0))))
1843 if (ktime_before(ktime_get(), checknext))
1844 /* too early to fetch sepol status */
1848 /* define new sepol_checknext time */
1849 spin_lock(&imp_sec->ps_lock);
1850 imp_sec->ps_sepol_checknext = ktime_add(ktime_get(),
1851 ktime_set(send_sepol, 0));
1852 spin_unlock(&imp_sec->ps_lock);
1857 int sptlrpc_get_sepol(struct ptlrpc_request *req)
1859 struct ptlrpc_sec *imp_sec = req->rq_import->imp_sec;
1864 (req->rq_sepol)[0] = '\0';
1866 #ifndef HAVE_SELINUX
1867 if (unlikely(send_sepol != 0))
1869 "Client cannot report SELinux status, it was not built against libselinux.\n");
1873 if (send_sepol == 0 || !selinux_is_enabled())
1876 if (imp_sec == NULL)
1879 /* Retrieve SELinux status info */
1880 if (sptlrpc_sepol_needs_check(imp_sec))
1881 rc = sepol_helper(req->rq_import);
1882 if (likely(rc == 0)) {
1883 spin_lock(&imp_sec->ps_lock);
1884 memcpy(req->rq_sepol, imp_sec->ps_sepol,
1885 sizeof(req->rq_sepol));
1886 spin_unlock(&imp_sec->ps_lock);
1891 EXPORT_SYMBOL(sptlrpc_get_sepol);
1894 * server side security
1897 static int flavor_allowed(struct sptlrpc_flavor *exp,
1898 struct ptlrpc_request *req)
1900 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1902 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1905 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1906 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1907 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1908 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1914 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1917 * Given an export \a exp, check whether the flavor of incoming \a req
1918 * is allowed by the export \a exp. Main logic is about taking care of
1919 * changing configurations. Return 0 means success.
1921 int sptlrpc_target_export_check(struct obd_export *exp,
1922 struct ptlrpc_request *req)
1924 struct sptlrpc_flavor flavor;
1930 * client side export has no imp_reverse, skip
1931 * FIXME maybe we should check flavor this as well???
1933 if (exp->exp_imp_reverse == NULL)
1936 /* don't care about ctx fini rpc */
1937 if (req->rq_ctx_fini)
1940 spin_lock(&exp->exp_lock);
1943 * if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1944 * the first req with the new flavor, then treat it as current flavor,
1945 * adapt reverse sec according to it.
1946 * note the first rpc with new flavor might not be with root ctx, in
1947 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1949 if (unlikely(exp->exp_flvr_changed) &&
1950 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1952 * make the new flavor as "current", and old ones as
1955 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1956 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1957 flavor = exp->exp_flvr_old[1];
1958 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1959 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1960 exp->exp_flvr_old[0] = exp->exp_flvr;
1961 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
1962 EXP_FLVR_UPDATE_EXPIRE;
1963 exp->exp_flvr = flavor;
1965 /* flavor change finished */
1966 exp->exp_flvr_changed = 0;
1967 LASSERT(exp->exp_flvr_adapt == 1);
1969 /* if it's gss, we only interested in root ctx init */
1970 if (req->rq_auth_gss &&
1971 !(req->rq_ctx_init &&
1972 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1973 req->rq_auth_usr_ost))) {
1974 spin_unlock(&exp->exp_lock);
1975 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1976 req->rq_auth_gss, req->rq_ctx_init,
1977 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1978 req->rq_auth_usr_ost);
1982 exp->exp_flvr_adapt = 0;
1983 spin_unlock(&exp->exp_lock);
1985 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1986 req->rq_svc_ctx, &flavor);
1990 * if it equals to the current flavor, we accept it, but need to
1991 * dealing with reverse sec/ctx
1993 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1995 * most cases should return here, we only interested in
1998 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1999 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2000 !req->rq_auth_usr_ost)) {
2001 spin_unlock(&exp->exp_lock);
2006 * if flavor just changed, we should not proceed, just leave
2007 * it and current flavor will be discovered and replaced
2008 * shortly, and let _this_ rpc pass through
2010 if (exp->exp_flvr_changed) {
2011 LASSERT(exp->exp_flvr_adapt);
2012 spin_unlock(&exp->exp_lock);
2016 if (exp->exp_flvr_adapt) {
2017 exp->exp_flvr_adapt = 0;
2018 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
2019 exp, exp->exp_flvr.sf_rpc,
2020 exp->exp_flvr_old[0].sf_rpc,
2021 exp->exp_flvr_old[1].sf_rpc);
2022 flavor = exp->exp_flvr;
2023 spin_unlock(&exp->exp_lock);
2025 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
2030 "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
2031 exp, exp->exp_flvr.sf_rpc,
2032 exp->exp_flvr_old[0].sf_rpc,
2033 exp->exp_flvr_old[1].sf_rpc);
2034 spin_unlock(&exp->exp_lock);
2036 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
2041 if (exp->exp_flvr_expire[0]) {
2042 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
2043 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
2045 "exp %p (%x|%x|%x): match the middle one (%lld)\n",
2046 exp, exp->exp_flvr.sf_rpc,
2047 exp->exp_flvr_old[0].sf_rpc,
2048 exp->exp_flvr_old[1].sf_rpc,
2049 (s64)(exp->exp_flvr_expire[0] -
2050 ktime_get_real_seconds()));
2051 spin_unlock(&exp->exp_lock);
2055 CDEBUG(D_SEC, "mark middle expired\n");
2056 exp->exp_flvr_expire[0] = 0;
2058 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
2059 exp->exp_flvr.sf_rpc,
2060 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2061 req->rq_flvr.sf_rpc);
2065 * now it doesn't match the current flavor, the only chance we can
2066 * accept it is match the old flavors which is not expired.
2068 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
2069 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
2070 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
2071 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
2073 exp->exp_flvr.sf_rpc,
2074 exp->exp_flvr_old[0].sf_rpc,
2075 exp->exp_flvr_old[1].sf_rpc,
2076 (s64)(exp->exp_flvr_expire[1] -
2077 ktime_get_real_seconds()));
2078 spin_unlock(&exp->exp_lock);
2082 CDEBUG(D_SEC, "mark oldest expired\n");
2083 exp->exp_flvr_expire[1] = 0;
2085 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
2086 exp, exp->exp_flvr.sf_rpc,
2087 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
2088 req->rq_flvr.sf_rpc);
2090 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
2091 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
2092 exp->exp_flvr_old[1].sf_rpc);
2095 spin_unlock(&exp->exp_lock);
2097 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
2098 exp, exp->exp_obd->obd_name,
2099 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
2100 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
2101 req->rq_flvr.sf_rpc,
2102 exp->exp_flvr.sf_rpc,
2103 exp->exp_flvr_old[0].sf_rpc,
2104 exp->exp_flvr_expire[0] ?
2105 (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
2106 exp->exp_flvr_old[1].sf_rpc,
2107 exp->exp_flvr_expire[1] ?
2108 (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
2111 EXPORT_SYMBOL(sptlrpc_target_export_check);
2113 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
2114 struct sptlrpc_rule_set *rset)
2116 struct obd_export *exp;
2117 struct sptlrpc_flavor new_flvr;
2121 spin_lock(&obd->obd_dev_lock);
2123 list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
2124 if (exp->exp_connection == NULL)
2128 * note if this export had just been updated flavor
2129 * (exp_flvr_changed == 1), this will override the
2132 spin_lock(&exp->exp_lock);
2133 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
2134 exp->exp_connection->c_peer.nid,
2136 if (exp->exp_flvr_changed ||
2137 !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2138 exp->exp_flvr_old[1] = new_flvr;
2139 exp->exp_flvr_expire[1] = 0;
2140 exp->exp_flvr_changed = 1;
2141 exp->exp_flvr_adapt = 1;
2143 CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2144 exp, sptlrpc_part2name(exp->exp_sp_peer),
2145 exp->exp_flvr.sf_rpc,
2146 exp->exp_flvr_old[1].sf_rpc);
2148 spin_unlock(&exp->exp_lock);
2151 spin_unlock(&obd->obd_dev_lock);
2153 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2155 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2157 /* peer's claim is unreliable unless gss is being used */
2158 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2161 switch (req->rq_sp_from) {
2163 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2164 DEBUG_REQ(D_ERROR, req, "faked source CLI");
2165 svc_rc = SECSVC_DROP;
2169 if (!req->rq_auth_usr_mdt) {
2170 DEBUG_REQ(D_ERROR, req, "faked source MDT");
2171 svc_rc = SECSVC_DROP;
2175 if (!req->rq_auth_usr_ost) {
2176 DEBUG_REQ(D_ERROR, req, "faked source OST");
2177 svc_rc = SECSVC_DROP;
2182 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2183 !req->rq_auth_usr_ost) {
2184 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2185 svc_rc = SECSVC_DROP;
2190 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2191 svc_rc = SECSVC_DROP;
2198 * Used by ptlrpc server, to perform transformation upon request message of
2199 * incoming \a req. This must be the first thing to do with an incoming
2200 * request in ptlrpc layer.
2202 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2203 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2204 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2205 * reply message has been prepared.
2206 * \retval SECSVC_DROP failed, this request should be dropped.
2208 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2210 struct ptlrpc_sec_policy *policy;
2211 struct lustre_msg *msg = req->rq_reqbuf;
2217 LASSERT(req->rq_reqmsg == NULL);
2218 LASSERT(req->rq_repmsg == NULL);
2219 LASSERT(req->rq_svc_ctx == NULL);
2221 req->rq_req_swab_mask = 0;
2223 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2226 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2230 CERROR("error unpacking request from %s x%llu\n",
2231 libcfs_id2str(req->rq_peer), req->rq_xid);
2232 RETURN(SECSVC_DROP);
2235 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2236 req->rq_sp_from = LUSTRE_SP_ANY;
2237 req->rq_auth_uid = -1; /* set to INVALID_UID */
2238 req->rq_auth_mapped_uid = -1;
2240 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2242 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2243 RETURN(SECSVC_DROP);
2246 LASSERT(policy->sp_sops->accept);
2247 rc = policy->sp_sops->accept(req);
2248 sptlrpc_policy_put(policy);
2249 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2250 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2253 * if it's not null flavor (which means embedded packing msg),
2254 * reset the swab mask for the comming inner msg unpacking.
2256 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2257 req->rq_req_swab_mask = 0;
2259 /* sanity check for the request source */
2260 rc = sptlrpc_svc_check_from(req, rc);
2265 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2266 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2267 * a buffer of \a msglen size.
2269 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2271 struct ptlrpc_sec_policy *policy;
2272 struct ptlrpc_reply_state *rs;
2277 LASSERT(req->rq_svc_ctx);
2278 LASSERT(req->rq_svc_ctx->sc_policy);
2280 policy = req->rq_svc_ctx->sc_policy;
2281 LASSERT(policy->sp_sops->alloc_rs);
2283 rc = policy->sp_sops->alloc_rs(req, msglen);
2284 if (unlikely(rc == -ENOMEM)) {
2285 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2287 if (svcpt->scp_service->srv_max_reply_size <
2288 msglen + sizeof(struct ptlrpc_reply_state)) {
2289 /* Just return failure if the size is too big */
2290 CERROR("size of message is too big (%zd), %d allowed\n",
2291 msglen + sizeof(struct ptlrpc_reply_state),
2292 svcpt->scp_service->srv_max_reply_size);
2296 /* failed alloc, try emergency pool */
2297 rs = lustre_get_emerg_rs(svcpt);
2301 req->rq_reply_state = rs;
2302 rc = policy->sp_sops->alloc_rs(req, msglen);
2304 lustre_put_emerg_rs(rs);
2305 req->rq_reply_state = NULL;
2310 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2316 * Used by ptlrpc server, to perform transformation upon reply message.
2318 * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2319 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2321 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2323 struct ptlrpc_sec_policy *policy;
2328 LASSERT(req->rq_svc_ctx);
2329 LASSERT(req->rq_svc_ctx->sc_policy);
2331 policy = req->rq_svc_ctx->sc_policy;
2332 LASSERT(policy->sp_sops->authorize);
2334 rc = policy->sp_sops->authorize(req);
2335 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2341 * Used by ptlrpc server, to free reply_state.
2343 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2345 struct ptlrpc_sec_policy *policy;
2346 unsigned int prealloc;
2350 LASSERT(rs->rs_svc_ctx);
2351 LASSERT(rs->rs_svc_ctx->sc_policy);
2353 policy = rs->rs_svc_ctx->sc_policy;
2354 LASSERT(policy->sp_sops->free_rs);
2356 prealloc = rs->rs_prealloc;
2357 policy->sp_sops->free_rs(rs);
2360 lustre_put_emerg_rs(rs);
2364 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2366 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2369 atomic_inc(&ctx->sc_refcount);
2372 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2374 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2379 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2380 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2381 if (ctx->sc_policy->sp_sops->free_ctx)
2382 ctx->sc_policy->sp_sops->free_ctx(ctx);
2384 req->rq_svc_ctx = NULL;
2387 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2389 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2394 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2395 if (ctx->sc_policy->sp_sops->invalidate_ctx)
2396 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2398 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2405 * Perform transformation upon bulk data pointed by \a desc. This is called
2406 * before transforming the request message.
2408 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2409 struct ptlrpc_bulk_desc *desc)
2411 struct ptlrpc_cli_ctx *ctx;
2413 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2415 if (!req->rq_pack_bulk)
2418 ctx = req->rq_cli_ctx;
2419 if (ctx->cc_ops->wrap_bulk)
2420 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2423 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2426 * This is called after unwrap the reply message.
2427 * return nob of actual plain text size received, or error code.
2429 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2430 struct ptlrpc_bulk_desc *desc,
2433 struct ptlrpc_cli_ctx *ctx;
2436 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2438 if (!req->rq_pack_bulk)
2439 return desc->bd_nob_transferred;
2441 ctx = req->rq_cli_ctx;
2442 if (ctx->cc_ops->unwrap_bulk) {
2443 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2447 return desc->bd_nob_transferred;
2449 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2452 * This is called after unwrap the reply message.
2453 * return 0 for success or error code.
2455 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2456 struct ptlrpc_bulk_desc *desc)
2458 struct ptlrpc_cli_ctx *ctx;
2461 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2463 if (!req->rq_pack_bulk)
2466 ctx = req->rq_cli_ctx;
2467 if (ctx->cc_ops->unwrap_bulk) {
2468 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2474 * if everything is going right, nob should equals to nob_transferred.
2475 * in case of privacy mode, nob_transferred needs to be adjusted.
2477 if (desc->bd_nob != desc->bd_nob_transferred) {
2478 CERROR("nob %d doesn't match transferred nob %d\n",
2479 desc->bd_nob, desc->bd_nob_transferred);
2485 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2487 #ifdef HAVE_SERVER_SUPPORT
2489 * Performe transformation upon outgoing bulk read.
2491 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2492 struct ptlrpc_bulk_desc *desc)
2494 struct ptlrpc_svc_ctx *ctx;
2496 LASSERT(req->rq_bulk_read);
2498 if (!req->rq_pack_bulk)
2501 ctx = req->rq_svc_ctx;
2502 if (ctx->sc_policy->sp_sops->wrap_bulk)
2503 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2507 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2510 * Performe transformation upon incoming bulk write.
2512 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2513 struct ptlrpc_bulk_desc *desc)
2515 struct ptlrpc_svc_ctx *ctx;
2518 LASSERT(req->rq_bulk_write);
2521 * if it's in privacy mode, transferred should >= expected; otherwise
2522 * transferred should == expected.
2524 if (desc->bd_nob_transferred < desc->bd_nob ||
2525 (desc->bd_nob_transferred > desc->bd_nob &&
2526 SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2527 SPTLRPC_BULK_SVC_PRIV)) {
2528 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2529 desc->bd_nob_transferred, desc->bd_nob);
2533 if (!req->rq_pack_bulk)
2536 ctx = req->rq_svc_ctx;
2537 if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2538 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2540 CERROR("error unwrap bulk: %d\n", rc);
2543 /* return 0 to allow reply be sent */
2546 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2549 * Prepare buffers for incoming bulk write.
2551 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2552 struct ptlrpc_bulk_desc *desc)
2554 struct ptlrpc_svc_ctx *ctx;
2556 LASSERT(req->rq_bulk_write);
2558 if (!req->rq_pack_bulk)
2561 ctx = req->rq_svc_ctx;
2562 if (ctx->sc_policy->sp_sops->prep_bulk)
2563 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2567 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2569 #endif /* HAVE_SERVER_SUPPORT */
2572 * user descriptor helpers
2575 int sptlrpc_current_user_desc_size(void)
2579 ngroups = current_ngroups;
2581 if (ngroups > LUSTRE_MAX_GROUPS)
2582 ngroups = LUSTRE_MAX_GROUPS;
2583 return sptlrpc_user_desc_size(ngroups);
2585 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2587 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2589 struct ptlrpc_user_desc *pud;
2591 pud = lustre_msg_buf(msg, offset, 0);
2593 pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2594 pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2595 pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2596 pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2597 pud->pud_cap = cfs_curproc_cap_pack();
2598 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2601 if (pud->pud_ngroups > current_ngroups)
2602 pud->pud_ngroups = current_ngroups;
2603 #ifdef HAVE_GROUP_INFO_GID
2604 memcpy(pud->pud_groups, current_cred()->group_info->gid,
2605 pud->pud_ngroups * sizeof(__u32));
2606 #else /* !HAVE_GROUP_INFO_GID */
2607 memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2608 pud->pud_ngroups * sizeof(__u32));
2609 #endif /* HAVE_GROUP_INFO_GID */
2610 task_unlock(current);
2614 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2616 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2618 struct ptlrpc_user_desc *pud;
2621 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2626 __swab32s(&pud->pud_uid);
2627 __swab32s(&pud->pud_gid);
2628 __swab32s(&pud->pud_fsuid);
2629 __swab32s(&pud->pud_fsgid);
2630 __swab32s(&pud->pud_cap);
2631 __swab32s(&pud->pud_ngroups);
2634 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2635 CERROR("%u groups is too large\n", pud->pud_ngroups);
2639 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2640 msg->lm_buflens[offset]) {
2641 CERROR("%u groups are claimed but bufsize only %u\n",
2642 pud->pud_ngroups, msg->lm_buflens[offset]);
2647 for (i = 0; i < pud->pud_ngroups; i++)
2648 __swab32s(&pud->pud_groups[i]);
2653 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2659 const char *sec2target_str(struct ptlrpc_sec *sec)
2661 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2663 if (sec_is_reverse(sec))
2665 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2667 EXPORT_SYMBOL(sec2target_str);
2670 * return true if the bulk data is protected
2672 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2674 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2675 case SPTLRPC_BULK_SVC_INTG:
2676 case SPTLRPC_BULK_SVC_PRIV:
2682 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2685 * crypto API helper/alloc blkciper
2689 * initialize/finalize
2692 int sptlrpc_init(void)
2696 rwlock_init(&policy_lock);
2698 rc = sptlrpc_gc_init();
2702 rc = sptlrpc_conf_init();
2706 rc = sptlrpc_enc_pool_init();
2710 rc = sptlrpc_null_init();
2714 rc = sptlrpc_plain_init();
2718 rc = sptlrpc_lproc_init();
2725 sptlrpc_plain_fini();
2727 sptlrpc_null_fini();
2729 sptlrpc_enc_pool_fini();
2731 sptlrpc_conf_fini();
2738 void sptlrpc_fini(void)
2740 sptlrpc_lproc_fini();
2741 sptlrpc_plain_fini();
2742 sptlrpc_null_fini();
2743 sptlrpc_enc_pool_fini();
2744 sptlrpc_conf_fini();