4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2014, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
43 #include <linux/user_namespace.h>
44 #ifdef HAVE_UIDGID_HEADER
45 # include <linux/uidgid.h>
47 #include <linux/crypto.h>
48 #include <linux/key.h>
50 #include <libcfs/libcfs.h>
52 #include <obd_class.h>
53 #include <obd_support.h>
54 #include <lustre_net.h>
55 #include <lustre_import.h>
56 #include <lustre_dlm.h>
57 #include <lustre_sec.h>
59 #include "ptlrpc_internal.h"
61 /***********************************************
63 ***********************************************/
65 static rwlock_t policy_lock;
66 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
70 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
72 __u16 number = policy->sp_policy;
74 LASSERT(policy->sp_name);
75 LASSERT(policy->sp_cops);
76 LASSERT(policy->sp_sops);
78 if (number >= SPTLRPC_POLICY_MAX)
81 write_lock(&policy_lock);
82 if (unlikely(policies[number])) {
83 write_unlock(&policy_lock);
86 policies[number] = policy;
87 write_unlock(&policy_lock);
89 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
92 EXPORT_SYMBOL(sptlrpc_register_policy);
94 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
96 __u16 number = policy->sp_policy;
98 LASSERT(number < SPTLRPC_POLICY_MAX);
100 write_lock(&policy_lock);
101 if (unlikely(policies[number] == NULL)) {
102 write_unlock(&policy_lock);
103 CERROR("%s: already unregistered\n", policy->sp_name);
107 LASSERT(policies[number] == policy);
108 policies[number] = NULL;
109 write_unlock(&policy_lock);
111 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
114 EXPORT_SYMBOL(sptlrpc_unregister_policy);
117 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
119 static DEFINE_MUTEX(load_mutex);
120 static atomic_t loaded = ATOMIC_INIT(0);
121 struct ptlrpc_sec_policy *policy;
122 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
125 if (number >= SPTLRPC_POLICY_MAX)
129 read_lock(&policy_lock);
130 policy = policies[number];
131 if (policy && !try_module_get(policy->sp_owner))
134 flag = atomic_read(&loaded);
135 read_unlock(&policy_lock);
137 if (policy != NULL || flag != 0 ||
138 number != SPTLRPC_POLICY_GSS)
141 /* try to load gss module, once */
142 mutex_lock(&load_mutex);
143 if (atomic_read(&loaded) == 0) {
144 if (request_module("ptlrpc_gss") == 0)
146 "module ptlrpc_gss loaded on demand\n");
148 CERROR("Unable to load module ptlrpc_gss\n");
150 atomic_set(&loaded, 1);
152 mutex_unlock(&load_mutex);
158 __u32 sptlrpc_name2flavor_base(const char *name)
160 if (!strcmp(name, "null"))
161 return SPTLRPC_FLVR_NULL;
162 if (!strcmp(name, "plain"))
163 return SPTLRPC_FLVR_PLAIN;
164 if (!strcmp(name, "gssnull"))
165 return SPTLRPC_FLVR_GSSNULL;
166 if (!strcmp(name, "krb5n"))
167 return SPTLRPC_FLVR_KRB5N;
168 if (!strcmp(name, "krb5a"))
169 return SPTLRPC_FLVR_KRB5A;
170 if (!strcmp(name, "krb5i"))
171 return SPTLRPC_FLVR_KRB5I;
172 if (!strcmp(name, "krb5p"))
173 return SPTLRPC_FLVR_KRB5P;
174 if (!strcmp(name, "ski"))
175 return SPTLRPC_FLVR_SKI;
176 if (!strcmp(name, "skpi"))
177 return SPTLRPC_FLVR_SKPI;
179 return SPTLRPC_FLVR_INVALID;
181 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
183 const char *sptlrpc_flavor2name_base(__u32 flvr)
185 __u32 base = SPTLRPC_FLVR_BASE(flvr);
187 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
189 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
191 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
193 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
195 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
197 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
199 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
201 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
203 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
206 CERROR("invalid wire flavor 0x%x\n", flvr);
209 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
211 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
212 char *buf, int bufsize)
214 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
215 snprintf(buf, bufsize, "hash:%s",
216 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
218 snprintf(buf, bufsize, "%s",
219 sptlrpc_flavor2name_base(sf->sf_rpc));
221 buf[bufsize - 1] = '\0';
224 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
226 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
228 snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
231 * currently we don't support customized bulk specification for
232 * flavors other than plain
234 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
238 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
239 strncat(buf, bspec, bufsize);
242 buf[bufsize - 1] = '\0';
245 EXPORT_SYMBOL(sptlrpc_flavor2name);
247 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
251 if (flags & PTLRPC_SEC_FL_REVERSE)
252 strlcat(buf, "reverse,", bufsize);
253 if (flags & PTLRPC_SEC_FL_ROOTONLY)
254 strlcat(buf, "rootonly,", bufsize);
255 if (flags & PTLRPC_SEC_FL_UDESC)
256 strlcat(buf, "udesc,", bufsize);
257 if (flags & PTLRPC_SEC_FL_BULK)
258 strlcat(buf, "bulk,", bufsize);
260 strlcat(buf, "-,", bufsize);
264 EXPORT_SYMBOL(sptlrpc_secflags2str);
266 /**************************************************
267 * client context APIs *
268 **************************************************/
271 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
273 struct vfs_cred vcred;
274 int create = 1, remove_dead = 1;
277 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
279 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
280 PTLRPC_SEC_FL_ROOTONLY)) {
283 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
288 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
289 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
292 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
296 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
298 atomic_inc(&ctx->cc_refcount);
301 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
303 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
305 struct ptlrpc_sec *sec = ctx->cc_sec;
308 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
310 if (!atomic_dec_and_test(&ctx->cc_refcount))
313 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
315 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
318 * Expire the client context immediately.
320 * \pre Caller must hold at least 1 reference on the \a ctx.
322 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
324 LASSERT(ctx->cc_ops->die);
325 ctx->cc_ops->die(ctx, 0);
327 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
330 * To wake up the threads who are waiting for this client context. Called
331 * after some status change happened on \a ctx.
333 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
335 struct ptlrpc_request *req, *next;
337 spin_lock(&ctx->cc_lock);
338 list_for_each_entry_safe(req, next, &ctx->cc_req_list,
340 list_del_init(&req->rq_ctx_chain);
341 ptlrpc_client_wake_req(req);
343 spin_unlock(&ctx->cc_lock);
345 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
347 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
349 LASSERT(ctx->cc_ops);
351 if (ctx->cc_ops->display == NULL)
354 return ctx->cc_ops->display(ctx, buf, bufsize);
357 static int import_sec_check_expire(struct obd_import *imp)
361 spin_lock(&imp->imp_lock);
362 if (imp->imp_sec_expire &&
363 imp->imp_sec_expire < cfs_time_current_sec()) {
365 imp->imp_sec_expire = 0;
367 spin_unlock(&imp->imp_lock);
372 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
373 return sptlrpc_import_sec_adapt(imp, NULL, NULL);
377 * Get and validate the client side ptlrpc security facilities from
378 * \a imp. There is a race condition on client reconnect when the import is
379 * being destroyed while there are outstanding client bound requests. In
380 * this case do not output any error messages if import secuity is not
383 * \param[in] imp obd import associated with client
384 * \param[out] sec client side ptlrpc security
386 * \retval 0 if security retrieved successfully
387 * \retval -ve errno if there was a problem
389 static int import_sec_validate_get(struct obd_import *imp,
390 struct ptlrpc_sec **sec)
394 if (unlikely(imp->imp_sec_expire)) {
395 rc = import_sec_check_expire(imp);
400 *sec = sptlrpc_import_sec_ref(imp);
401 /* Only output an error when the import is still active */
403 if (list_empty(&imp->imp_zombie_chain))
404 CERROR("import %p (%s) with no sec\n",
405 imp, ptlrpc_import_state_name(imp->imp_state));
409 if (unlikely((*sec)->ps_dying)) {
410 CERROR("attempt to use dying sec %p\n", sec);
411 sptlrpc_sec_put(*sec);
419 * Given a \a req, find or allocate an appropriate context for it.
420 * \pre req->rq_cli_ctx == NULL.
422 * \retval 0 succeed, and req->rq_cli_ctx is set.
423 * \retval -ev error number, and req->rq_cli_ctx == NULL.
425 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
427 struct obd_import *imp = req->rq_import;
428 struct ptlrpc_sec *sec;
432 LASSERT(!req->rq_cli_ctx);
435 rc = import_sec_validate_get(imp, &sec);
439 req->rq_cli_ctx = get_my_ctx(sec);
441 sptlrpc_sec_put(sec);
443 if (!req->rq_cli_ctx) {
444 CERROR("req %p: fail to get context\n", req);
445 RETURN(-ECONNREFUSED);
452 * Drop the context for \a req.
453 * \pre req->rq_cli_ctx != NULL.
454 * \post req->rq_cli_ctx == NULL.
456 * If \a sync == 0, this function should return quickly without sleep;
457 * otherwise it might trigger and wait for the whole process of sending
458 * an context-destroying rpc to server.
460 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
465 LASSERT(req->rq_cli_ctx);
467 /* request might be asked to release earlier while still
468 * in the context waiting list.
470 if (!list_empty(&req->rq_ctx_chain)) {
471 spin_lock(&req->rq_cli_ctx->cc_lock);
472 list_del_init(&req->rq_ctx_chain);
473 spin_unlock(&req->rq_cli_ctx->cc_lock);
476 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
477 req->rq_cli_ctx = NULL;
482 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
483 struct ptlrpc_cli_ctx *oldctx,
484 struct ptlrpc_cli_ctx *newctx)
486 struct sptlrpc_flavor old_flvr;
487 char *reqmsg = NULL; /* to workaround old gcc */
491 LASSERT(req->rq_reqmsg);
492 LASSERT(req->rq_reqlen);
493 LASSERT(req->rq_replen);
495 CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
496 "switch sec %p(%s) -> %p(%s)\n", req,
497 oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
498 newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
499 oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
500 newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
503 old_flvr = req->rq_flvr;
505 /* save request message */
506 reqmsg_size = req->rq_reqlen;
507 if (reqmsg_size != 0) {
508 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
511 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
514 /* release old req/rep buf */
515 req->rq_cli_ctx = oldctx;
516 sptlrpc_cli_free_reqbuf(req);
517 sptlrpc_cli_free_repbuf(req);
518 req->rq_cli_ctx = newctx;
520 /* recalculate the flavor */
521 sptlrpc_req_set_flavor(req, 0);
523 /* alloc new request buffer
524 * we don't need to alloc reply buffer here, leave it to the
525 * rest procedure of ptlrpc */
526 if (reqmsg_size != 0) {
527 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
529 LASSERT(req->rq_reqmsg);
530 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
532 CWARN("failed to alloc reqbuf: %d\n", rc);
533 req->rq_flvr = old_flvr;
536 OBD_FREE_LARGE(reqmsg, reqmsg_size);
542 * If current context of \a req is dead somehow, e.g. we just switched flavor
543 * thus marked original contexts dead, we'll find a new context for it. if
544 * no switch is needed, \a req will end up with the same context.
546 * \note a request must have a context, to keep other parts of code happy.
547 * In any case of failure during the switching, we must restore the old one.
549 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
551 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
552 struct ptlrpc_cli_ctx *newctx;
558 sptlrpc_cli_ctx_get(oldctx);
559 sptlrpc_req_put_ctx(req, 0);
561 rc = sptlrpc_req_get_ctx(req);
563 LASSERT(!req->rq_cli_ctx);
565 /* restore old ctx */
566 req->rq_cli_ctx = oldctx;
570 newctx = req->rq_cli_ctx;
573 if (unlikely(newctx == oldctx &&
574 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
576 * still get the old dead ctx, usually means system too busy
579 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
580 newctx, newctx->cc_flags);
582 set_current_state(TASK_INTERRUPTIBLE);
583 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
584 } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
587 * new ctx not up to date yet
590 "ctx (%p, fl %lx) doesn't switch, not up to date yet\n",
591 newctx, newctx->cc_flags);
594 * it's possible newctx == oldctx if we're switching
595 * subflavor with the same sec.
597 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
599 /* restore old ctx */
600 sptlrpc_req_put_ctx(req, 0);
601 req->rq_cli_ctx = oldctx;
605 LASSERT(req->rq_cli_ctx == newctx);
608 sptlrpc_cli_ctx_put(oldctx, 1);
611 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
614 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
616 if (cli_ctx_is_refreshed(ctx))
622 int ctx_refresh_timeout(void *data)
624 struct ptlrpc_request *req = data;
627 /* conn_cnt is needed in expire_one_request */
628 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
630 rc = ptlrpc_expire_one_request(req, 1);
631 /* if we started recovery, we should mark this ctx dead; otherwise
632 * in case of lgssd died nobody would retire this ctx, following
633 * connecting will still find the same ctx thus cause deadlock.
634 * there's an assumption that expire time of the request should be
635 * later than the context refresh expire time.
638 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
643 void ctx_refresh_interrupt(void *data)
645 struct ptlrpc_request *req = data;
647 spin_lock(&req->rq_lock);
649 spin_unlock(&req->rq_lock);
653 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
655 spin_lock(&ctx->cc_lock);
656 if (!list_empty(&req->rq_ctx_chain))
657 list_del_init(&req->rq_ctx_chain);
658 spin_unlock(&ctx->cc_lock);
662 * To refresh the context of \req, if it's not up-to-date.
665 * - = 0: wait until success or fatal error occur
666 * - > 0: timeout value (in seconds)
668 * The status of the context could be subject to be changed by other threads
669 * at any time. We allow this race, but once we return with 0, the caller will
670 * suppose it's uptodated and keep using it until the owning rpc is done.
672 * \retval 0 only if the context is uptodated.
673 * \retval -ev error number.
675 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
677 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
678 struct ptlrpc_sec *sec;
679 struct l_wait_info lwi;
685 if (req->rq_ctx_init || req->rq_ctx_fini)
689 * during the process a request's context might change type even
690 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
694 rc = import_sec_validate_get(req->rq_import, &sec);
698 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
699 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
700 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
701 req_off_ctx_list(req, ctx);
702 sptlrpc_req_replace_dead_ctx(req);
703 ctx = req->rq_cli_ctx;
705 sptlrpc_sec_put(sec);
707 if (cli_ctx_is_eternal(ctx))
710 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
711 LASSERT(ctx->cc_ops->refresh);
712 ctx->cc_ops->refresh(ctx);
714 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
716 LASSERT(ctx->cc_ops->validate);
717 if (ctx->cc_ops->validate(ctx) == 0) {
718 req_off_ctx_list(req, ctx);
722 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
723 spin_lock(&req->rq_lock);
725 spin_unlock(&req->rq_lock);
726 req_off_ctx_list(req, ctx);
731 * There's a subtle issue for resending RPCs, suppose following
733 * 1. the request was sent to server.
734 * 2. recovery was kicked start, after finished the request was
736 * 3. resend the request.
737 * 4. old reply from server received, we accept and verify the reply.
738 * this has to be success, otherwise the error will be aware
740 * 5. new reply from server received, dropped by LNet.
742 * Note the xid of old & new request is the same. We can't simply
743 * change xid for the resent request because the server replies on
744 * it for reply reconstruction.
746 * Commonly the original context should be uptodate because we
747 * have an expiry nice time; server will keep its context because
748 * we at least hold a ref of old context which prevent context
749 * from destroying RPC being sent. So server still can accept the
750 * request and finish the RPC. But if that's not the case:
751 * 1. If server side context has been trimmed, a NO_CONTEXT will
752 * be returned, gss_cli_ctx_verify/unseal will switch to new
754 * 2. Current context never be refreshed, then we are fine: we
755 * never really send request with old context before.
757 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
758 unlikely(req->rq_reqmsg) &&
759 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
760 req_off_ctx_list(req, ctx);
764 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
765 req_off_ctx_list(req, ctx);
767 * don't switch ctx if import was deactivated
769 if (req->rq_import->imp_deactive) {
770 spin_lock(&req->rq_lock);
772 spin_unlock(&req->rq_lock);
776 rc = sptlrpc_req_replace_dead_ctx(req);
778 LASSERT(ctx == req->rq_cli_ctx);
779 CERROR("req %p: failed to replace dead ctx %p: %d\n",
781 spin_lock(&req->rq_lock);
783 spin_unlock(&req->rq_lock);
787 ctx = req->rq_cli_ctx;
792 * Now we're sure this context is during upcall, add myself into
795 spin_lock(&ctx->cc_lock);
796 if (list_empty(&req->rq_ctx_chain))
797 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
798 spin_unlock(&ctx->cc_lock);
801 RETURN(-EWOULDBLOCK);
803 /* Clear any flags that may be present from previous sends */
804 LASSERT(req->rq_receiving_reply == 0);
805 spin_lock(&req->rq_lock);
807 req->rq_timedout = 0;
810 spin_unlock(&req->rq_lock);
812 lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
814 ctx_refresh_interrupt, req);
815 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
818 * following cases could lead us here:
819 * - successfully refreshed;
821 * - timedout, and we don't want recover from the failure;
822 * - timedout, and waked up upon recovery finished;
823 * - someone else mark this ctx dead by force;
824 * - someone invalidate the req and call ptlrpc_client_wake_req(),
825 * e.g. ptlrpc_abort_inflight();
827 if (!cli_ctx_is_refreshed(ctx)) {
828 /* timed out or interruptted */
829 req_off_ctx_list(req, ctx);
839 * Initialize flavor settings for \a req, according to \a opcode.
841 * \note this could be called in two situations:
842 * - new request from ptlrpc_pre_req(), with proper @opcode
843 * - old request which changed ctx in the middle, with @opcode == 0
845 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
847 struct ptlrpc_sec *sec;
849 LASSERT(req->rq_import);
850 LASSERT(req->rq_cli_ctx);
851 LASSERT(req->rq_cli_ctx->cc_sec);
852 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
854 /* special security flags accoding to opcode */
858 case MGS_CONFIG_READ:
860 req->rq_bulk_read = 1;
864 req->rq_bulk_write = 1;
867 req->rq_ctx_init = 1;
870 req->rq_ctx_fini = 1;
873 /* init/fini rpc won't be resend, so can't be here */
874 LASSERT(req->rq_ctx_init == 0);
875 LASSERT(req->rq_ctx_fini == 0);
877 /* cleanup flags, which should be recalculated */
878 req->rq_pack_udesc = 0;
879 req->rq_pack_bulk = 0;
883 sec = req->rq_cli_ctx->cc_sec;
885 spin_lock(&sec->ps_lock);
886 req->rq_flvr = sec->ps_flvr;
887 spin_unlock(&sec->ps_lock);
889 /* force SVC_NULL for context initiation rpc, SVC_INTG for context
891 if (unlikely(req->rq_ctx_init))
892 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
893 else if (unlikely(req->rq_ctx_fini))
894 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
896 /* user descriptor flag, null security can't do it anyway */
897 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
898 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
899 req->rq_pack_udesc = 1;
901 /* bulk security flag */
902 if ((req->rq_bulk_read || req->rq_bulk_write) &&
903 sptlrpc_flavor_has_bulk(&req->rq_flvr))
904 req->rq_pack_bulk = 1;
907 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
909 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
912 LASSERT(req->rq_clrbuf);
913 if (req->rq_pool || !req->rq_reqbuf)
916 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
917 req->rq_reqbuf = NULL;
918 req->rq_reqbuf_len = 0;
922 * Given an import \a imp, check whether current user has a valid context
923 * or not. We may create a new context and try to refresh it, and try
924 * repeatedly try in case of non-fatal errors. Return 0 means success.
926 int sptlrpc_import_check_ctx(struct obd_import *imp)
928 struct ptlrpc_sec *sec;
929 struct ptlrpc_cli_ctx *ctx;
930 struct ptlrpc_request *req = NULL;
936 sec = sptlrpc_import_sec_ref(imp);
937 ctx = get_my_ctx(sec);
938 sptlrpc_sec_put(sec);
943 if (cli_ctx_is_eternal(ctx) ||
944 ctx->cc_ops->validate(ctx) == 0) {
945 sptlrpc_cli_ctx_put(ctx, 1);
949 if (cli_ctx_is_error(ctx)) {
950 sptlrpc_cli_ctx_put(ctx, 1);
954 req = ptlrpc_request_cache_alloc(GFP_NOFS);
958 ptlrpc_cli_req_init(req);
959 atomic_set(&req->rq_refcount, 10000);
961 req->rq_import = imp;
962 req->rq_flvr = sec->ps_flvr;
963 req->rq_cli_ctx = ctx;
965 rc = sptlrpc_req_refresh_ctx(req, 0);
966 LASSERT(list_empty(&req->rq_ctx_chain));
967 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
968 ptlrpc_request_cache_free(req);
974 * Used by ptlrpc client, to perform the pre-defined security transformation
975 * upon the request message of \a req. After this function called,
976 * req->rq_reqmsg is still accessible as clear text.
978 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
980 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
985 LASSERT(ctx->cc_sec);
986 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
988 /* we wrap bulk request here because now we can be sure
989 * the context is uptodate.
992 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
997 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
998 case SPTLRPC_SVC_NULL:
999 case SPTLRPC_SVC_AUTH:
1000 case SPTLRPC_SVC_INTG:
1001 LASSERT(ctx->cc_ops->sign);
1002 rc = ctx->cc_ops->sign(ctx, req);
1004 case SPTLRPC_SVC_PRIV:
1005 LASSERT(ctx->cc_ops->seal);
1006 rc = ctx->cc_ops->seal(ctx, req);
1013 LASSERT(req->rq_reqdata_len);
1014 LASSERT(req->rq_reqdata_len % 8 == 0);
1015 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1021 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1023 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1028 LASSERT(ctx->cc_sec);
1029 LASSERT(req->rq_repbuf);
1030 LASSERT(req->rq_repdata);
1031 LASSERT(req->rq_repmsg == NULL);
1033 req->rq_rep_swab_mask = 0;
1035 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1038 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1042 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1046 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1047 CERROR("replied data length %d too small\n",
1048 req->rq_repdata_len);
1052 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1053 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1054 CERROR("reply policy %u doesn't match request policy %u\n",
1055 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1056 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1060 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1061 case SPTLRPC_SVC_NULL:
1062 case SPTLRPC_SVC_AUTH:
1063 case SPTLRPC_SVC_INTG:
1064 LASSERT(ctx->cc_ops->verify);
1065 rc = ctx->cc_ops->verify(ctx, req);
1067 case SPTLRPC_SVC_PRIV:
1068 LASSERT(ctx->cc_ops->unseal);
1069 rc = ctx->cc_ops->unseal(ctx, req);
1074 LASSERT(rc || req->rq_repmsg || req->rq_resend);
1076 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1078 req->rq_rep_swab_mask = 0;
1083 * Used by ptlrpc client, to perform security transformation upon the reply
1084 * message of \a req. After return successfully, req->rq_repmsg points to
1085 * the reply message in clear text.
1087 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1090 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1092 LASSERT(req->rq_repbuf);
1093 LASSERT(req->rq_repdata == NULL);
1094 LASSERT(req->rq_repmsg == NULL);
1095 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1097 if (req->rq_reply_off == 0 &&
1098 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1099 CERROR("real reply with offset 0\n");
1103 if (req->rq_reply_off % 8 != 0) {
1104 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1108 req->rq_repdata = (struct lustre_msg *)
1109 (req->rq_repbuf + req->rq_reply_off);
1110 req->rq_repdata_len = req->rq_nob_received;
1112 return do_cli_unwrap_reply(req);
1116 * Used by ptlrpc client, to perform security transformation upon the early
1117 * reply message of \a req. We expect the rq_reply_off is 0, and
1118 * rq_nob_received is the early reply size.
1120 * Because the receive buffer might be still posted, the reply data might be
1121 * changed at any time, no matter we're holding rq_lock or not. For this reason
1122 * we allocate a separate ptlrpc_request and reply buffer for early reply
1125 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1126 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1127 * \a *req_ret to release it.
1128 * \retval -ev error number, and \a req_ret will not be set.
1130 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1131 struct ptlrpc_request **req_ret)
1133 struct ptlrpc_request *early_req;
1135 int early_bufsz, early_size;
1139 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1140 if (early_req == NULL)
1143 ptlrpc_cli_req_init(early_req);
1145 early_size = req->rq_nob_received;
1146 early_bufsz = size_roundup_power2(early_size);
1147 OBD_ALLOC_LARGE(early_buf, early_bufsz);
1148 if (early_buf == NULL)
1149 GOTO(err_req, rc = -ENOMEM);
1151 /* sanity checkings and copy data out, do it inside spinlock */
1152 spin_lock(&req->rq_lock);
1154 if (req->rq_replied) {
1155 spin_unlock(&req->rq_lock);
1156 GOTO(err_buf, rc = -EALREADY);
1159 LASSERT(req->rq_repbuf);
1160 LASSERT(req->rq_repdata == NULL);
1161 LASSERT(req->rq_repmsg == NULL);
1163 if (req->rq_reply_off != 0) {
1164 CERROR("early reply with offset %u\n", req->rq_reply_off);
1165 spin_unlock(&req->rq_lock);
1166 GOTO(err_buf, rc = -EPROTO);
1169 if (req->rq_nob_received != early_size) {
1170 /* even another early arrived the size should be the same */
1171 CERROR("data size has changed from %u to %u\n",
1172 early_size, req->rq_nob_received);
1173 spin_unlock(&req->rq_lock);
1174 GOTO(err_buf, rc = -EINVAL);
1177 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1178 CERROR("early reply length %d too small\n",
1179 req->rq_nob_received);
1180 spin_unlock(&req->rq_lock);
1181 GOTO(err_buf, rc = -EALREADY);
1184 memcpy(early_buf, req->rq_repbuf, early_size);
1185 spin_unlock(&req->rq_lock);
1187 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1188 early_req->rq_flvr = req->rq_flvr;
1189 early_req->rq_repbuf = early_buf;
1190 early_req->rq_repbuf_len = early_bufsz;
1191 early_req->rq_repdata = (struct lustre_msg *) early_buf;
1192 early_req->rq_repdata_len = early_size;
1193 early_req->rq_early = 1;
1194 early_req->rq_reqmsg = req->rq_reqmsg;
1196 rc = do_cli_unwrap_reply(early_req);
1198 DEBUG_REQ(D_ADAPTTO, early_req,
1199 "error %d unwrap early reply", rc);
1203 LASSERT(early_req->rq_repmsg);
1204 *req_ret = early_req;
1208 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1210 OBD_FREE_LARGE(early_buf, early_bufsz);
1212 ptlrpc_request_cache_free(early_req);
1217 * Used by ptlrpc client, to release a processed early reply \a early_req.
1219 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1221 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1223 LASSERT(early_req->rq_repbuf);
1224 LASSERT(early_req->rq_repdata);
1225 LASSERT(early_req->rq_repmsg);
1227 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1228 OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1229 ptlrpc_request_cache_free(early_req);
1232 /**************************************************
1234 **************************************************/
1237 * "fixed" sec (e.g. null) use sec_id < 0
1239 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1241 int sptlrpc_get_next_secid(void)
1243 return atomic_inc_return(&sptlrpc_sec_id);
1245 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1247 /**************************************************
1248 * client side high-level security APIs *
1249 **************************************************/
1251 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1252 int grace, int force)
1254 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1256 LASSERT(policy->sp_cops);
1257 LASSERT(policy->sp_cops->flush_ctx_cache);
1259 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1262 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1264 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1266 LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1267 LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1268 LASSERT(policy->sp_cops->destroy_sec);
1270 CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1272 policy->sp_cops->destroy_sec(sec);
1273 sptlrpc_policy_put(policy);
1276 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1278 sec_cop_destroy_sec(sec);
1280 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1282 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1284 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1286 if (sec->ps_policy->sp_cops->kill_sec) {
1287 sec->ps_policy->sp_cops->kill_sec(sec);
1289 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1293 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1296 atomic_inc(&sec->ps_refcount);
1300 EXPORT_SYMBOL(sptlrpc_sec_get);
1302 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1305 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1307 if (atomic_dec_and_test(&sec->ps_refcount)) {
1308 sptlrpc_gc_del_sec(sec);
1309 sec_cop_destroy_sec(sec);
1313 EXPORT_SYMBOL(sptlrpc_sec_put);
1316 * policy module is responsible for taking refrence of import
1319 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1320 struct ptlrpc_svc_ctx *svc_ctx,
1321 struct sptlrpc_flavor *sf,
1322 enum lustre_sec_part sp)
1324 struct ptlrpc_sec_policy *policy;
1325 struct ptlrpc_sec *sec;
1330 LASSERT(imp->imp_dlm_fake == 1);
1332 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1333 imp->imp_obd->obd_type->typ_name,
1334 imp->imp_obd->obd_name,
1335 sptlrpc_flavor2name(sf, str, sizeof(str)));
1337 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1338 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1340 LASSERT(imp->imp_dlm_fake == 0);
1342 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1343 imp->imp_obd->obd_type->typ_name,
1344 imp->imp_obd->obd_name,
1345 sptlrpc_flavor2name(sf, str, sizeof(str)));
1347 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1349 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1354 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1356 atomic_inc(&sec->ps_refcount);
1360 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1361 sptlrpc_gc_add_sec(sec);
1363 sptlrpc_policy_put(policy);
1369 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1371 struct ptlrpc_sec *sec;
1373 spin_lock(&imp->imp_lock);
1374 sec = sptlrpc_sec_get(imp->imp_sec);
1375 spin_unlock(&imp->imp_lock);
1379 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1381 static void sptlrpc_import_sec_install(struct obd_import *imp,
1382 struct ptlrpc_sec *sec)
1384 struct ptlrpc_sec *old_sec;
1386 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1388 spin_lock(&imp->imp_lock);
1389 old_sec = imp->imp_sec;
1391 spin_unlock(&imp->imp_lock);
1394 sptlrpc_sec_kill(old_sec);
1396 /* balance the ref taken by this import */
1397 sptlrpc_sec_put(old_sec);
1402 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1404 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1408 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1413 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1414 struct ptlrpc_sec *sec,
1415 struct sptlrpc_flavor *sf)
1417 char str1[32], str2[32];
1419 if (sec->ps_flvr.sf_flags != sf->sf_flags)
1420 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1421 sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1422 str1, sizeof(str1)),
1423 sptlrpc_secflags2str(sf->sf_flags,
1424 str2, sizeof(str2)));
1426 spin_lock(&sec->ps_lock);
1427 flavor_copy(&sec->ps_flvr, sf);
1428 spin_unlock(&sec->ps_lock);
1432 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1433 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1435 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1436 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1438 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1439 struct ptlrpc_svc_ctx *svc_ctx,
1440 struct sptlrpc_flavor *flvr)
1442 struct ptlrpc_connection *conn;
1443 struct sptlrpc_flavor sf;
1444 struct ptlrpc_sec *sec, *newsec;
1445 enum lustre_sec_part sp;
1455 conn = imp->imp_connection;
1457 if (svc_ctx == NULL) {
1458 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1460 * normal import, determine flavor from rule set, except
1461 * for mgc the flavor is predetermined.
1463 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1464 sf = cliobd->cl_flvr_mgc;
1466 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1468 &cliobd->cl_target_uuid,
1471 sp = imp->imp_obd->u.cli.cl_sp_me;
1473 /* reverse import, determine flavor from incoming reqeust */
1476 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1477 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1478 PTLRPC_SEC_FL_ROOTONLY;
1480 sp = sptlrpc_target_sec_part(imp->imp_obd);
1483 sec = sptlrpc_import_sec_ref(imp);
1487 if (flavor_equal(&sf, &sec->ps_flvr))
1490 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1491 imp->imp_obd->obd_name,
1492 obd_uuid2str(&conn->c_remote_uuid),
1493 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1494 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1496 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1497 SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1498 SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1499 SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1500 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1503 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1504 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1505 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1506 imp->imp_obd->obd_name,
1507 obd_uuid2str(&conn->c_remote_uuid),
1508 LNET_NIDNET(conn->c_self),
1509 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1512 mutex_lock(&imp->imp_sec_mutex);
1514 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1516 sptlrpc_import_sec_install(imp, newsec);
1518 CERROR("import %s->%s: failed to create new sec\n",
1519 imp->imp_obd->obd_name,
1520 obd_uuid2str(&conn->c_remote_uuid));
1524 mutex_unlock(&imp->imp_sec_mutex);
1526 sptlrpc_sec_put(sec);
1530 void sptlrpc_import_sec_put(struct obd_import *imp)
1533 sptlrpc_sec_kill(imp->imp_sec);
1535 sptlrpc_sec_put(imp->imp_sec);
1536 imp->imp_sec = NULL;
1540 static void import_flush_ctx_common(struct obd_import *imp,
1541 uid_t uid, int grace, int force)
1543 struct ptlrpc_sec *sec;
1548 sec = sptlrpc_import_sec_ref(imp);
1552 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1553 sptlrpc_sec_put(sec);
1556 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1558 /* it's important to use grace mode, see explain in
1559 * sptlrpc_req_refresh_ctx() */
1560 import_flush_ctx_common(imp, 0, 1, 1);
1563 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1565 import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1568 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1570 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1572 import_flush_ctx_common(imp, -1, 1, 1);
1574 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1577 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1578 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1580 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1582 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1583 struct ptlrpc_sec_policy *policy;
1587 LASSERT(ctx->cc_sec);
1588 LASSERT(ctx->cc_sec->ps_policy);
1589 LASSERT(req->rq_reqmsg == NULL);
1590 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1592 policy = ctx->cc_sec->ps_policy;
1593 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1595 LASSERT(req->rq_reqmsg);
1596 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1598 /* zeroing preallocated buffer */
1600 memset(req->rq_reqmsg, 0, msgsize);
1607 * Used by ptlrpc client to free request buffer of \a req. After this
1608 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1610 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1612 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1613 struct ptlrpc_sec_policy *policy;
1616 LASSERT(ctx->cc_sec);
1617 LASSERT(ctx->cc_sec->ps_policy);
1618 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1620 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1623 policy = ctx->cc_sec->ps_policy;
1624 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1625 req->rq_reqmsg = NULL;
1629 * NOTE caller must guarantee the buffer size is enough for the enlargement
1631 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1632 int segment, int newsize)
1635 int oldsize, oldmsg_size, movesize;
1637 LASSERT(segment < msg->lm_bufcount);
1638 LASSERT(msg->lm_buflens[segment] <= newsize);
1640 if (msg->lm_buflens[segment] == newsize)
1643 /* nothing to do if we are enlarging the last segment */
1644 if (segment == msg->lm_bufcount - 1) {
1645 msg->lm_buflens[segment] = newsize;
1649 oldsize = msg->lm_buflens[segment];
1651 src = lustre_msg_buf(msg, segment + 1, 0);
1652 msg->lm_buflens[segment] = newsize;
1653 dst = lustre_msg_buf(msg, segment + 1, 0);
1654 msg->lm_buflens[segment] = oldsize;
1656 /* move from segment + 1 to end segment */
1657 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1658 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1659 movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1660 LASSERT(movesize >= 0);
1663 memmove(dst, src, movesize);
1665 /* note we don't clear the ares where old data live, not secret */
1667 /* finally set new segment size */
1668 msg->lm_buflens[segment] = newsize;
1670 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1673 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1674 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1675 * preserved after the enlargement. this must be called after original request
1676 * buffer being allocated.
1678 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1679 * so caller should refresh its local pointers if needed.
1681 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1682 int segment, int newsize)
1684 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1685 struct ptlrpc_sec_cops *cops;
1686 struct lustre_msg *msg = req->rq_reqmsg;
1690 LASSERT(msg->lm_bufcount > segment);
1691 LASSERT(msg->lm_buflens[segment] <= newsize);
1693 if (msg->lm_buflens[segment] == newsize)
1696 cops = ctx->cc_sec->ps_policy->sp_cops;
1697 LASSERT(cops->enlarge_reqbuf);
1698 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1700 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1703 * Used by ptlrpc client to allocate reply buffer of \a req.
1705 * \note After this, req->rq_repmsg is still not accessible.
1707 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1709 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1710 struct ptlrpc_sec_policy *policy;
1714 LASSERT(ctx->cc_sec);
1715 LASSERT(ctx->cc_sec->ps_policy);
1720 policy = ctx->cc_sec->ps_policy;
1721 RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1725 * Used by ptlrpc client to free reply buffer of \a req. After this
1726 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1728 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1730 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1731 struct ptlrpc_sec_policy *policy;
1735 LASSERT(ctx->cc_sec);
1736 LASSERT(ctx->cc_sec->ps_policy);
1737 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1739 if (req->rq_repbuf == NULL)
1741 LASSERT(req->rq_repbuf_len);
1743 policy = ctx->cc_sec->ps_policy;
1744 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1745 req->rq_repmsg = NULL;
1749 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1750 struct ptlrpc_cli_ctx *ctx)
1752 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1754 if (!policy->sp_cops->install_rctx)
1756 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1759 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1760 struct ptlrpc_svc_ctx *ctx)
1762 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1764 if (!policy->sp_sops->install_rctx)
1766 return policy->sp_sops->install_rctx(imp, ctx);
1769 /****************************************
1770 * server side security *
1771 ****************************************/
1773 static int flavor_allowed(struct sptlrpc_flavor *exp,
1774 struct ptlrpc_request *req)
1776 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1778 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1781 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1782 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1783 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1784 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1790 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1793 * Given an export \a exp, check whether the flavor of incoming \a req
1794 * is allowed by the export \a exp. Main logic is about taking care of
1795 * changing configurations. Return 0 means success.
1797 int sptlrpc_target_export_check(struct obd_export *exp,
1798 struct ptlrpc_request *req)
1800 struct sptlrpc_flavor flavor;
1805 /* client side export has no imp_reverse, skip
1806 * FIXME maybe we should check flavor this as well??? */
1807 if (exp->exp_imp_reverse == NULL)
1810 /* don't care about ctx fini rpc */
1811 if (req->rq_ctx_fini)
1814 spin_lock(&exp->exp_lock);
1816 /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1817 * the first req with the new flavor, then treat it as current flavor,
1818 * adapt reverse sec according to it.
1819 * note the first rpc with new flavor might not be with root ctx, in
1820 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1821 if (unlikely(exp->exp_flvr_changed) &&
1822 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1823 /* make the new flavor as "current", and old ones as
1824 * about-to-expire */
1825 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1826 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1827 flavor = exp->exp_flvr_old[1];
1828 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1829 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1830 exp->exp_flvr_old[0] = exp->exp_flvr;
1831 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1832 EXP_FLVR_UPDATE_EXPIRE;
1833 exp->exp_flvr = flavor;
1835 /* flavor change finished */
1836 exp->exp_flvr_changed = 0;
1837 LASSERT(exp->exp_flvr_adapt == 1);
1839 /* if it's gss, we only interested in root ctx init */
1840 if (req->rq_auth_gss &&
1841 !(req->rq_ctx_init &&
1842 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1843 req->rq_auth_usr_ost))) {
1844 spin_unlock(&exp->exp_lock);
1845 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1846 req->rq_auth_gss, req->rq_ctx_init,
1847 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1848 req->rq_auth_usr_ost);
1852 exp->exp_flvr_adapt = 0;
1853 spin_unlock(&exp->exp_lock);
1855 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1856 req->rq_svc_ctx, &flavor);
1859 /* if it equals to the current flavor, we accept it, but need to
1860 * dealing with reverse sec/ctx */
1861 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1862 /* most cases should return here, we only interested in
1863 * gss root ctx init */
1864 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1865 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1866 !req->rq_auth_usr_ost)) {
1867 spin_unlock(&exp->exp_lock);
1871 /* if flavor just changed, we should not proceed, just leave
1872 * it and current flavor will be discovered and replaced
1873 * shortly, and let _this_ rpc pass through */
1874 if (exp->exp_flvr_changed) {
1875 LASSERT(exp->exp_flvr_adapt);
1876 spin_unlock(&exp->exp_lock);
1880 if (exp->exp_flvr_adapt) {
1881 exp->exp_flvr_adapt = 0;
1882 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1883 exp, exp->exp_flvr.sf_rpc,
1884 exp->exp_flvr_old[0].sf_rpc,
1885 exp->exp_flvr_old[1].sf_rpc);
1886 flavor = exp->exp_flvr;
1887 spin_unlock(&exp->exp_lock);
1889 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1893 CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1894 "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1895 exp->exp_flvr_old[0].sf_rpc,
1896 exp->exp_flvr_old[1].sf_rpc);
1897 spin_unlock(&exp->exp_lock);
1899 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1904 if (exp->exp_flvr_expire[0]) {
1905 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1906 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1907 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1908 "middle one ("CFS_DURATION_T")\n", exp,
1909 exp->exp_flvr.sf_rpc,
1910 exp->exp_flvr_old[0].sf_rpc,
1911 exp->exp_flvr_old[1].sf_rpc,
1912 exp->exp_flvr_expire[0] -
1913 cfs_time_current_sec());
1914 spin_unlock(&exp->exp_lock);
1918 CDEBUG(D_SEC, "mark middle expired\n");
1919 exp->exp_flvr_expire[0] = 0;
1921 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1922 exp->exp_flvr.sf_rpc,
1923 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1924 req->rq_flvr.sf_rpc);
1927 /* now it doesn't match the current flavor, the only chance we can
1928 * accept it is match the old flavors which is not expired. */
1929 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1930 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1931 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1932 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1933 "oldest one ("CFS_DURATION_T")\n", exp,
1934 exp->exp_flvr.sf_rpc,
1935 exp->exp_flvr_old[0].sf_rpc,
1936 exp->exp_flvr_old[1].sf_rpc,
1937 exp->exp_flvr_expire[1] -
1938 cfs_time_current_sec());
1939 spin_unlock(&exp->exp_lock);
1943 CDEBUG(D_SEC, "mark oldest expired\n");
1944 exp->exp_flvr_expire[1] = 0;
1946 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1947 exp, exp->exp_flvr.sf_rpc,
1948 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1949 req->rq_flvr.sf_rpc);
1951 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1952 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1953 exp->exp_flvr_old[1].sf_rpc);
1956 spin_unlock(&exp->exp_lock);
1958 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1959 "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1960 exp, exp->exp_obd->obd_name,
1961 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1962 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1963 req->rq_flvr.sf_rpc,
1964 exp->exp_flvr.sf_rpc,
1965 exp->exp_flvr_old[0].sf_rpc,
1966 exp->exp_flvr_expire[0] ?
1967 (unsigned long) (exp->exp_flvr_expire[0] -
1968 cfs_time_current_sec()) : 0,
1969 exp->exp_flvr_old[1].sf_rpc,
1970 exp->exp_flvr_expire[1] ?
1971 (unsigned long) (exp->exp_flvr_expire[1] -
1972 cfs_time_current_sec()) : 0);
1975 EXPORT_SYMBOL(sptlrpc_target_export_check);
1977 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1978 struct sptlrpc_rule_set *rset)
1980 struct obd_export *exp;
1981 struct sptlrpc_flavor new_flvr;
1985 spin_lock(&obd->obd_dev_lock);
1987 list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1988 if (exp->exp_connection == NULL)
1991 /* note if this export had just been updated flavor
1992 * (exp_flvr_changed == 1), this will override the
1994 spin_lock(&exp->exp_lock);
1995 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1996 exp->exp_connection->c_peer.nid,
1998 if (exp->exp_flvr_changed ||
1999 !flavor_equal(&new_flvr, &exp->exp_flvr)) {
2000 exp->exp_flvr_old[1] = new_flvr;
2001 exp->exp_flvr_expire[1] = 0;
2002 exp->exp_flvr_changed = 1;
2003 exp->exp_flvr_adapt = 1;
2005 CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
2006 exp, sptlrpc_part2name(exp->exp_sp_peer),
2007 exp->exp_flvr.sf_rpc,
2008 exp->exp_flvr_old[1].sf_rpc);
2010 spin_unlock(&exp->exp_lock);
2013 spin_unlock(&obd->obd_dev_lock);
2015 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
2017 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
2019 /* peer's claim is unreliable unless gss is being used */
2020 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2023 switch (req->rq_sp_from) {
2025 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2026 DEBUG_REQ(D_ERROR, req, "faked source CLI");
2027 svc_rc = SECSVC_DROP;
2031 if (!req->rq_auth_usr_mdt) {
2032 DEBUG_REQ(D_ERROR, req, "faked source MDT");
2033 svc_rc = SECSVC_DROP;
2037 if (!req->rq_auth_usr_ost) {
2038 DEBUG_REQ(D_ERROR, req, "faked source OST");
2039 svc_rc = SECSVC_DROP;
2044 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2045 !req->rq_auth_usr_ost) {
2046 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2047 svc_rc = SECSVC_DROP;
2052 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2053 svc_rc = SECSVC_DROP;
2060 * Used by ptlrpc server, to perform transformation upon request message of
2061 * incoming \a req. This must be the first thing to do with an incoming
2062 * request in ptlrpc layer.
2064 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2065 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2066 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2067 * reply message has been prepared.
2068 * \retval SECSVC_DROP failed, this request should be dropped.
2070 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2072 struct ptlrpc_sec_policy *policy;
2073 struct lustre_msg *msg = req->rq_reqbuf;
2078 LASSERT(req->rq_reqmsg == NULL);
2079 LASSERT(req->rq_repmsg == NULL);
2080 LASSERT(req->rq_svc_ctx == NULL);
2082 req->rq_req_swab_mask = 0;
2084 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2087 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2091 CERROR("error unpacking request from %s x"LPU64"\n",
2092 libcfs_id2str(req->rq_peer), req->rq_xid);
2093 RETURN(SECSVC_DROP);
2096 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2097 req->rq_sp_from = LUSTRE_SP_ANY;
2098 req->rq_auth_uid = -1; /* set to INVALID_UID */
2099 req->rq_auth_mapped_uid = -1;
2101 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2103 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2104 RETURN(SECSVC_DROP);
2107 LASSERT(policy->sp_sops->accept);
2108 rc = policy->sp_sops->accept(req);
2109 sptlrpc_policy_put(policy);
2110 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2111 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2114 * if it's not null flavor (which means embedded packing msg),
2115 * reset the swab mask for the comming inner msg unpacking.
2117 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2118 req->rq_req_swab_mask = 0;
2120 /* sanity check for the request source */
2121 rc = sptlrpc_svc_check_from(req, rc);
2126 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2127 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2128 * a buffer of \a msglen size.
2130 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2132 struct ptlrpc_sec_policy *policy;
2133 struct ptlrpc_reply_state *rs;
2137 LASSERT(req->rq_svc_ctx);
2138 LASSERT(req->rq_svc_ctx->sc_policy);
2140 policy = req->rq_svc_ctx->sc_policy;
2141 LASSERT(policy->sp_sops->alloc_rs);
2143 rc = policy->sp_sops->alloc_rs(req, msglen);
2144 if (unlikely(rc == -ENOMEM)) {
2145 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2146 if (svcpt->scp_service->srv_max_reply_size <
2147 msglen + sizeof(struct ptlrpc_reply_state)) {
2148 /* Just return failure if the size is too big */
2149 CERROR("size of message is too big (%zd), %d allowed\n",
2150 msglen + sizeof(struct ptlrpc_reply_state),
2151 svcpt->scp_service->srv_max_reply_size);
2155 /* failed alloc, try emergency pool */
2156 rs = lustre_get_emerg_rs(svcpt);
2160 req->rq_reply_state = rs;
2161 rc = policy->sp_sops->alloc_rs(req, msglen);
2163 lustre_put_emerg_rs(rs);
2164 req->rq_reply_state = NULL;
2169 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2175 * Used by ptlrpc server, to perform transformation upon reply message.
2177 * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2178 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2180 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2182 struct ptlrpc_sec_policy *policy;
2186 LASSERT(req->rq_svc_ctx);
2187 LASSERT(req->rq_svc_ctx->sc_policy);
2189 policy = req->rq_svc_ctx->sc_policy;
2190 LASSERT(policy->sp_sops->authorize);
2192 rc = policy->sp_sops->authorize(req);
2193 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2199 * Used by ptlrpc server, to free reply_state.
2201 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2203 struct ptlrpc_sec_policy *policy;
2204 unsigned int prealloc;
2207 LASSERT(rs->rs_svc_ctx);
2208 LASSERT(rs->rs_svc_ctx->sc_policy);
2210 policy = rs->rs_svc_ctx->sc_policy;
2211 LASSERT(policy->sp_sops->free_rs);
2213 prealloc = rs->rs_prealloc;
2214 policy->sp_sops->free_rs(rs);
2217 lustre_put_emerg_rs(rs);
2221 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2223 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2226 atomic_inc(&ctx->sc_refcount);
2229 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2231 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2236 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2237 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2238 if (ctx->sc_policy->sp_sops->free_ctx)
2239 ctx->sc_policy->sp_sops->free_ctx(ctx);
2241 req->rq_svc_ctx = NULL;
2244 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2246 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2251 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2252 if (ctx->sc_policy->sp_sops->invalidate_ctx)
2253 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2255 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2257 /****************************************
2259 ****************************************/
2262 * Perform transformation upon bulk data pointed by \a desc. This is called
2263 * before transforming the request message.
2265 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2266 struct ptlrpc_bulk_desc *desc)
2268 struct ptlrpc_cli_ctx *ctx;
2270 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2272 if (!req->rq_pack_bulk)
2275 ctx = req->rq_cli_ctx;
2276 if (ctx->cc_ops->wrap_bulk)
2277 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2280 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2283 * This is called after unwrap the reply message.
2284 * return nob of actual plain text size received, or error code.
2286 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2287 struct ptlrpc_bulk_desc *desc,
2290 struct ptlrpc_cli_ctx *ctx;
2293 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2295 if (!req->rq_pack_bulk)
2296 return desc->bd_nob_transferred;
2298 ctx = req->rq_cli_ctx;
2299 if (ctx->cc_ops->unwrap_bulk) {
2300 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2304 return desc->bd_nob_transferred;
2306 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2309 * This is called after unwrap the reply message.
2310 * return 0 for success or error code.
2312 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2313 struct ptlrpc_bulk_desc *desc)
2315 struct ptlrpc_cli_ctx *ctx;
2318 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2320 if (!req->rq_pack_bulk)
2323 ctx = req->rq_cli_ctx;
2324 if (ctx->cc_ops->unwrap_bulk) {
2325 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2331 * if everything is going right, nob should equals to nob_transferred.
2332 * in case of privacy mode, nob_transferred needs to be adjusted.
2334 if (desc->bd_nob != desc->bd_nob_transferred) {
2335 CERROR("nob %d doesn't match transferred nob %d\n",
2336 desc->bd_nob, desc->bd_nob_transferred);
2342 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2344 #ifdef HAVE_SERVER_SUPPORT
2346 * Performe transformation upon outgoing bulk read.
2348 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2349 struct ptlrpc_bulk_desc *desc)
2351 struct ptlrpc_svc_ctx *ctx;
2353 LASSERT(req->rq_bulk_read);
2355 if (!req->rq_pack_bulk)
2358 ctx = req->rq_svc_ctx;
2359 if (ctx->sc_policy->sp_sops->wrap_bulk)
2360 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2364 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2367 * Performe transformation upon incoming bulk write.
2369 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2370 struct ptlrpc_bulk_desc *desc)
2372 struct ptlrpc_svc_ctx *ctx;
2375 LASSERT(req->rq_bulk_write);
2378 * if it's in privacy mode, transferred should >= expected; otherwise
2379 * transferred should == expected.
2381 if (desc->bd_nob_transferred < desc->bd_nob ||
2382 (desc->bd_nob_transferred > desc->bd_nob &&
2383 SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2384 SPTLRPC_BULK_SVC_PRIV)) {
2385 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2386 desc->bd_nob_transferred, desc->bd_nob);
2390 if (!req->rq_pack_bulk)
2393 ctx = req->rq_svc_ctx;
2394 if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2395 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2397 CERROR("error unwrap bulk: %d\n", rc);
2400 /* return 0 to allow reply be sent */
2403 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2406 * Prepare buffers for incoming bulk write.
2408 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2409 struct ptlrpc_bulk_desc *desc)
2411 struct ptlrpc_svc_ctx *ctx;
2413 LASSERT(req->rq_bulk_write);
2415 if (!req->rq_pack_bulk)
2418 ctx = req->rq_svc_ctx;
2419 if (ctx->sc_policy->sp_sops->prep_bulk)
2420 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2424 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2426 #endif /* HAVE_SERVER_SUPPORT */
2428 /****************************************
2429 * user descriptor helpers *
2430 ****************************************/
2432 int sptlrpc_current_user_desc_size(void)
2436 ngroups = current_ngroups;
2438 if (ngroups > LUSTRE_MAX_GROUPS)
2439 ngroups = LUSTRE_MAX_GROUPS;
2440 return sptlrpc_user_desc_size(ngroups);
2442 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2444 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2446 struct ptlrpc_user_desc *pud;
2448 pud = lustre_msg_buf(msg, offset, 0);
2450 pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2451 pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2452 pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2453 pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
2454 pud->pud_cap = cfs_curproc_cap_pack();
2455 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2458 if (pud->pud_ngroups > current_ngroups)
2459 pud->pud_ngroups = current_ngroups;
2460 memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2461 pud->pud_ngroups * sizeof(__u32));
2462 task_unlock(current);
2466 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2468 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2470 struct ptlrpc_user_desc *pud;
2473 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2478 __swab32s(&pud->pud_uid);
2479 __swab32s(&pud->pud_gid);
2480 __swab32s(&pud->pud_fsuid);
2481 __swab32s(&pud->pud_fsgid);
2482 __swab32s(&pud->pud_cap);
2483 __swab32s(&pud->pud_ngroups);
2486 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2487 CERROR("%u groups is too large\n", pud->pud_ngroups);
2491 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2492 msg->lm_buflens[offset]) {
2493 CERROR("%u groups are claimed but bufsize only %u\n",
2494 pud->pud_ngroups, msg->lm_buflens[offset]);
2499 for (i = 0; i < pud->pud_ngroups; i++)
2500 __swab32s(&pud->pud_groups[i]);
2505 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2507 /****************************************
2509 ****************************************/
2511 const char * sec2target_str(struct ptlrpc_sec *sec)
2513 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2515 if (sec_is_reverse(sec))
2517 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2519 EXPORT_SYMBOL(sec2target_str);
2522 * return true if the bulk data is protected
2524 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2526 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2527 case SPTLRPC_BULK_SVC_INTG:
2528 case SPTLRPC_BULK_SVC_PRIV:
2534 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2536 /****************************************
2537 * crypto API helper/alloc blkciper *
2538 ****************************************/
2540 /****************************************
2541 * initialize/finalize *
2542 ****************************************/
2544 int sptlrpc_init(void)
2548 rwlock_init(&policy_lock);
2550 rc = sptlrpc_gc_init();
2554 rc = sptlrpc_conf_init();
2558 rc = sptlrpc_enc_pool_init();
2562 rc = sptlrpc_null_init();
2566 rc = sptlrpc_plain_init();
2570 rc = sptlrpc_lproc_init();
2577 sptlrpc_plain_fini();
2579 sptlrpc_null_fini();
2581 sptlrpc_enc_pool_fini();
2583 sptlrpc_conf_fini();
2590 void sptlrpc_fini(void)
2592 sptlrpc_lproc_fini();
2593 sptlrpc_plain_fini();
2594 sptlrpc_null_fini();
2595 sptlrpc_enc_pool_fini();
2596 sptlrpc_conf_fini();