4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Eric Mei <ericm@clusterfs.com>
41 #define DEBUG_SUBSYSTEM S_SEC
43 #include <libcfs/libcfs.h>
45 #include <liblustre.h>
46 #include <libcfs/list.h>
48 #include <linux/crypto.h>
49 #include <linux/key.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_net.h>
56 #include <lustre_import.h>
57 #include <lustre_dlm.h>
58 #include <lustre_sec.h>
60 #include "ptlrpc_internal.h"
62 /***********************************************
64 ***********************************************/
66 static rwlock_t policy_lock;
67 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
71 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
73 __u16 number = policy->sp_policy;
75 LASSERT(policy->sp_name);
76 LASSERT(policy->sp_cops);
77 LASSERT(policy->sp_sops);
79 if (number >= SPTLRPC_POLICY_MAX)
82 write_lock(&policy_lock);
83 if (unlikely(policies[number])) {
84 write_unlock(&policy_lock);
87 policies[number] = policy;
88 write_unlock(&policy_lock);
90 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
93 EXPORT_SYMBOL(sptlrpc_register_policy);
95 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
97 __u16 number = policy->sp_policy;
99 LASSERT(number < SPTLRPC_POLICY_MAX);
101 write_lock(&policy_lock);
102 if (unlikely(policies[number] == NULL)) {
103 write_unlock(&policy_lock);
104 CERROR("%s: already unregistered\n", policy->sp_name);
108 LASSERT(policies[number] == policy);
109 policies[number] = NULL;
110 write_unlock(&policy_lock);
112 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
115 EXPORT_SYMBOL(sptlrpc_unregister_policy);
118 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
120 static DEFINE_MUTEX(load_mutex);
121 static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
122 struct ptlrpc_sec_policy *policy;
123 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
126 if (number >= SPTLRPC_POLICY_MAX)
130 read_lock(&policy_lock);
131 policy = policies[number];
132 if (policy && !try_module_get(policy->sp_owner))
135 flag = cfs_atomic_read(&loaded);
136 read_unlock(&policy_lock);
138 if (policy != NULL || flag != 0 ||
139 number != SPTLRPC_POLICY_GSS)
142 /* try to load gss module, once */
143 mutex_lock(&load_mutex);
144 if (cfs_atomic_read(&loaded) == 0) {
145 if (request_module("ptlrpc_gss") == 0)
147 "module ptlrpc_gss loaded on demand\n");
149 CERROR("Unable to load module ptlrpc_gss\n");
151 cfs_atomic_set(&loaded, 1);
153 mutex_unlock(&load_mutex);
159 __u32 sptlrpc_name2flavor_base(const char *name)
161 if (!strcmp(name, "null"))
162 return SPTLRPC_FLVR_NULL;
163 if (!strcmp(name, "plain"))
164 return SPTLRPC_FLVR_PLAIN;
165 if (!strcmp(name, "gssnull"))
166 return SPTLRPC_FLVR_GSSNULL;
167 if (!strcmp(name, "krb5n"))
168 return SPTLRPC_FLVR_KRB5N;
169 if (!strcmp(name, "krb5a"))
170 return SPTLRPC_FLVR_KRB5A;
171 if (!strcmp(name, "krb5i"))
172 return SPTLRPC_FLVR_KRB5I;
173 if (!strcmp(name, "krb5p"))
174 return SPTLRPC_FLVR_KRB5P;
175 if (!strcmp(name, "ski"))
176 return SPTLRPC_FLVR_SKI;
177 if (!strcmp(name, "skpi"))
178 return SPTLRPC_FLVR_SKPI;
180 return SPTLRPC_FLVR_INVALID;
182 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
184 const char *sptlrpc_flavor2name_base(__u32 flvr)
186 __u32 base = SPTLRPC_FLVR_BASE(flvr);
188 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
190 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
192 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_GSSNULL))
194 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
196 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
198 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
200 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
202 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKI))
204 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_SKPI))
207 CERROR("invalid wire flavor 0x%x\n", flvr);
210 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
212 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
213 char *buf, int bufsize)
215 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
216 snprintf(buf, bufsize, "hash:%s",
217 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
219 snprintf(buf, bufsize, "%s",
220 sptlrpc_flavor2name_base(sf->sf_rpc));
222 buf[bufsize - 1] = '\0';
225 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
227 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
229 snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
232 * currently we don't support customized bulk specification for
233 * flavors other than plain
235 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
239 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
240 strncat(buf, bspec, bufsize);
243 buf[bufsize - 1] = '\0';
246 EXPORT_SYMBOL(sptlrpc_flavor2name);
248 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
252 if (flags & PTLRPC_SEC_FL_REVERSE)
253 strlcat(buf, "reverse,", bufsize);
254 if (flags & PTLRPC_SEC_FL_ROOTONLY)
255 strlcat(buf, "rootonly,", bufsize);
256 if (flags & PTLRPC_SEC_FL_UDESC)
257 strlcat(buf, "udesc,", bufsize);
258 if (flags & PTLRPC_SEC_FL_BULK)
259 strlcat(buf, "bulk,", bufsize);
261 strlcat(buf, "-,", bufsize);
265 EXPORT_SYMBOL(sptlrpc_secflags2str);
267 /**************************************************
268 * client context APIs *
269 **************************************************/
272 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
274 struct vfs_cred vcred;
275 int create = 1, remove_dead = 1;
278 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
280 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
281 PTLRPC_SEC_FL_ROOTONLY)) {
284 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
289 vcred.vc_uid = current_uid();
290 vcred.vc_gid = current_gid();
293 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
297 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
299 cfs_atomic_inc(&ctx->cc_refcount);
302 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
304 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
306 struct ptlrpc_sec *sec = ctx->cc_sec;
309 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
311 if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
314 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
316 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
319 * Expire the client context immediately.
321 * \pre Caller must hold at least 1 reference on the \a ctx.
323 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
325 LASSERT(ctx->cc_ops->die);
326 ctx->cc_ops->die(ctx, 0);
328 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
331 * To wake up the threads who are waiting for this client context. Called
332 * after some status change happened on \a ctx.
334 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
336 struct ptlrpc_request *req, *next;
338 spin_lock(&ctx->cc_lock);
339 cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
341 cfs_list_del_init(&req->rq_ctx_chain);
342 ptlrpc_client_wake_req(req);
344 spin_unlock(&ctx->cc_lock);
346 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
348 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
350 LASSERT(ctx->cc_ops);
352 if (ctx->cc_ops->display == NULL)
355 return ctx->cc_ops->display(ctx, buf, bufsize);
358 static int import_sec_check_expire(struct obd_import *imp)
362 spin_lock(&imp->imp_lock);
363 if (imp->imp_sec_expire &&
364 imp->imp_sec_expire < cfs_time_current_sec()) {
366 imp->imp_sec_expire = 0;
368 spin_unlock(&imp->imp_lock);
373 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
374 return sptlrpc_import_sec_adapt(imp, NULL, 0);
377 static int import_sec_validate_get(struct obd_import *imp,
378 struct ptlrpc_sec **sec)
382 if (unlikely(imp->imp_sec_expire)) {
383 rc = import_sec_check_expire(imp);
388 *sec = sptlrpc_import_sec_ref(imp);
390 CERROR("import %p (%s) with no sec\n",
391 imp, ptlrpc_import_state_name(imp->imp_state));
395 if (unlikely((*sec)->ps_dying)) {
396 CERROR("attempt to use dying sec %p\n", sec);
397 sptlrpc_sec_put(*sec);
405 * Given a \a req, find or allocate a appropriate context for it.
406 * \pre req->rq_cli_ctx == NULL.
408 * \retval 0 succeed, and req->rq_cli_ctx is set.
409 * \retval -ev error number, and req->rq_cli_ctx == NULL.
411 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
413 struct obd_import *imp = req->rq_import;
414 struct ptlrpc_sec *sec;
418 LASSERT(!req->rq_cli_ctx);
421 rc = import_sec_validate_get(imp, &sec);
425 req->rq_cli_ctx = get_my_ctx(sec);
427 sptlrpc_sec_put(sec);
429 if (!req->rq_cli_ctx) {
430 CERROR("req %p: fail to get context\n", req);
438 * Drop the context for \a req.
439 * \pre req->rq_cli_ctx != NULL.
440 * \post req->rq_cli_ctx == NULL.
442 * If \a sync == 0, this function should return quickly without sleep;
443 * otherwise it might trigger and wait for the whole process of sending
444 * an context-destroying rpc to server.
446 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
451 LASSERT(req->rq_cli_ctx);
453 /* request might be asked to release earlier while still
454 * in the context waiting list.
456 if (!cfs_list_empty(&req->rq_ctx_chain)) {
457 spin_lock(&req->rq_cli_ctx->cc_lock);
458 cfs_list_del_init(&req->rq_ctx_chain);
459 spin_unlock(&req->rq_cli_ctx->cc_lock);
462 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
463 req->rq_cli_ctx = NULL;
468 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
469 struct ptlrpc_cli_ctx *oldctx,
470 struct ptlrpc_cli_ctx *newctx)
472 struct sptlrpc_flavor old_flvr;
473 char *reqmsg = NULL; /* to workaround old gcc */
477 LASSERT(req->rq_reqmsg);
478 LASSERT(req->rq_reqlen);
479 LASSERT(req->rq_replen);
481 CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
482 "switch sec %p(%s) -> %p(%s)\n", req,
483 oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
484 newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
485 oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
486 newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
489 old_flvr = req->rq_flvr;
491 /* save request message */
492 reqmsg_size = req->rq_reqlen;
493 if (reqmsg_size != 0) {
494 OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
497 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
500 /* release old req/rep buf */
501 req->rq_cli_ctx = oldctx;
502 sptlrpc_cli_free_reqbuf(req);
503 sptlrpc_cli_free_repbuf(req);
504 req->rq_cli_ctx = newctx;
506 /* recalculate the flavor */
507 sptlrpc_req_set_flavor(req, 0);
509 /* alloc new request buffer
510 * we don't need to alloc reply buffer here, leave it to the
511 * rest procedure of ptlrpc */
512 if (reqmsg_size != 0) {
513 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
515 LASSERT(req->rq_reqmsg);
516 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
518 CWARN("failed to alloc reqbuf: %d\n", rc);
519 req->rq_flvr = old_flvr;
522 OBD_FREE_LARGE(reqmsg, reqmsg_size);
528 * If current context of \a req is dead somehow, e.g. we just switched flavor
529 * thus marked original contexts dead, we'll find a new context for it. if
530 * no switch is needed, \a req will end up with the same context.
532 * \note a request must have a context, to keep other parts of code happy.
533 * In any case of failure during the switching, we must restore the old one.
535 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
537 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
538 struct ptlrpc_cli_ctx *newctx;
544 sptlrpc_cli_ctx_get(oldctx);
545 sptlrpc_req_put_ctx(req, 0);
547 rc = sptlrpc_req_get_ctx(req);
549 LASSERT(!req->rq_cli_ctx);
551 /* restore old ctx */
552 req->rq_cli_ctx = oldctx;
556 newctx = req->rq_cli_ctx;
559 if (unlikely(newctx == oldctx &&
560 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
562 * still get the old dead ctx, usually means system too busy
565 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
566 newctx, newctx->cc_flags);
568 schedule_timeout_and_set_state(TASK_INTERRUPTIBLE,
572 * it's possible newctx == oldctx if we're switching
573 * subflavor with the same sec.
575 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
577 /* restore old ctx */
578 sptlrpc_req_put_ctx(req, 0);
579 req->rq_cli_ctx = oldctx;
583 LASSERT(req->rq_cli_ctx == newctx);
586 sptlrpc_cli_ctx_put(oldctx, 1);
589 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
592 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
594 if (cli_ctx_is_refreshed(ctx))
600 int ctx_refresh_timeout(void *data)
602 struct ptlrpc_request *req = data;
605 /* conn_cnt is needed in expire_one_request */
606 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
608 rc = ptlrpc_expire_one_request(req, 1);
609 /* if we started recovery, we should mark this ctx dead; otherwise
610 * in case of lgssd died nobody would retire this ctx, following
611 * connecting will still find the same ctx thus cause deadlock.
612 * there's an assumption that expire time of the request should be
613 * later than the context refresh expire time.
616 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
621 void ctx_refresh_interrupt(void *data)
623 struct ptlrpc_request *req = data;
625 spin_lock(&req->rq_lock);
627 spin_unlock(&req->rq_lock);
631 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
633 spin_lock(&ctx->cc_lock);
634 if (!cfs_list_empty(&req->rq_ctx_chain))
635 cfs_list_del_init(&req->rq_ctx_chain);
636 spin_unlock(&ctx->cc_lock);
640 * To refresh the context of \req, if it's not up-to-date.
643 * - = 0: wait until success or fatal error occur
644 * - > 0: timeout value (in seconds)
646 * The status of the context could be subject to be changed by other threads
647 * at any time. We allow this race, but once we return with 0, the caller will
648 * suppose it's uptodated and keep using it until the owning rpc is done.
650 * \retval 0 only if the context is uptodated.
651 * \retval -ev error number.
653 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
655 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
656 struct ptlrpc_sec *sec;
657 struct l_wait_info lwi;
663 if (req->rq_ctx_init || req->rq_ctx_fini)
667 * during the process a request's context might change type even
668 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
672 rc = import_sec_validate_get(req->rq_import, &sec);
676 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
677 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
678 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
679 req_off_ctx_list(req, ctx);
680 sptlrpc_req_replace_dead_ctx(req);
681 ctx = req->rq_cli_ctx;
683 sptlrpc_sec_put(sec);
685 if (cli_ctx_is_eternal(ctx))
688 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
689 LASSERT(ctx->cc_ops->refresh);
690 ctx->cc_ops->refresh(ctx);
692 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
694 LASSERT(ctx->cc_ops->validate);
695 if (ctx->cc_ops->validate(ctx) == 0) {
696 req_off_ctx_list(req, ctx);
700 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
701 spin_lock(&req->rq_lock);
703 spin_unlock(&req->rq_lock);
704 req_off_ctx_list(req, ctx);
709 * There's a subtle issue for resending RPCs, suppose following
711 * 1. the request was sent to server.
712 * 2. recovery was kicked start, after finished the request was
714 * 3. resend the request.
715 * 4. old reply from server received, we accept and verify the reply.
716 * this has to be success, otherwise the error will be aware
718 * 5. new reply from server received, dropped by LNet.
720 * Note the xid of old & new request is the same. We can't simply
721 * change xid for the resent request because the server replies on
722 * it for reply reconstruction.
724 * Commonly the original context should be uptodate because we
725 * have a expiry nice time; server will keep its context because
726 * we at least hold a ref of old context which prevent context
727 * destroying RPC being sent. So server still can accept the request
728 * and finish the RPC. But if that's not the case:
729 * 1. If server side context has been trimmed, a NO_CONTEXT will
730 * be returned, gss_cli_ctx_verify/unseal will switch to new
732 * 2. Current context never be refreshed, then we are fine: we
733 * never really send request with old context before.
735 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
736 unlikely(req->rq_reqmsg) &&
737 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
738 req_off_ctx_list(req, ctx);
742 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
743 req_off_ctx_list(req, ctx);
745 * don't switch ctx if import was deactivated
747 if (req->rq_import->imp_deactive) {
748 spin_lock(&req->rq_lock);
750 spin_unlock(&req->rq_lock);
754 rc = sptlrpc_req_replace_dead_ctx(req);
756 LASSERT(ctx == req->rq_cli_ctx);
757 CERROR("req %p: failed to replace dead ctx %p: %d\n",
759 spin_lock(&req->rq_lock);
761 spin_unlock(&req->rq_lock);
765 ctx = req->rq_cli_ctx;
770 * Now we're sure this context is during upcall, add myself into
773 spin_lock(&ctx->cc_lock);
774 if (cfs_list_empty(&req->rq_ctx_chain))
775 cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
776 spin_unlock(&ctx->cc_lock);
779 RETURN(-EWOULDBLOCK);
781 /* Clear any flags that may be present from previous sends */
782 LASSERT(req->rq_receiving_reply == 0);
783 spin_lock(&req->rq_lock);
785 req->rq_timedout = 0;
788 spin_unlock(&req->rq_lock);
790 lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
791 ctx_refresh_interrupt, req);
792 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
795 * following cases could lead us here:
796 * - successfully refreshed;
798 * - timedout, and we don't want recover from the failure;
799 * - timedout, and waked up upon recovery finished;
800 * - someone else mark this ctx dead by force;
801 * - someone invalidate the req and call ptlrpc_client_wake_req(),
802 * e.g. ptlrpc_abort_inflight();
804 if (!cli_ctx_is_refreshed(ctx)) {
805 /* timed out or interruptted */
806 req_off_ctx_list(req, ctx);
816 * Initialize flavor settings for \a req, according to \a opcode.
818 * \note this could be called in two situations:
819 * - new request from ptlrpc_pre_req(), with proper @opcode
820 * - old request which changed ctx in the middle, with @opcode == 0
822 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
824 struct ptlrpc_sec *sec;
826 LASSERT(req->rq_import);
827 LASSERT(req->rq_cli_ctx);
828 LASSERT(req->rq_cli_ctx->cc_sec);
829 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
831 /* special security flags accoding to opcode */
835 case MGS_CONFIG_READ:
837 req->rq_bulk_read = 1;
841 req->rq_bulk_write = 1;
844 req->rq_ctx_init = 1;
847 req->rq_ctx_fini = 1;
850 /* init/fini rpc won't be resend, so can't be here */
851 LASSERT(req->rq_ctx_init == 0);
852 LASSERT(req->rq_ctx_fini == 0);
854 /* cleanup flags, which should be recalculated */
855 req->rq_pack_udesc = 0;
856 req->rq_pack_bulk = 0;
860 sec = req->rq_cli_ctx->cc_sec;
862 spin_lock(&sec->ps_lock);
863 req->rq_flvr = sec->ps_flvr;
864 spin_unlock(&sec->ps_lock);
866 /* force SVC_NULL for context initiation rpc, SVC_INTG for context
868 if (unlikely(req->rq_ctx_init))
869 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
870 else if (unlikely(req->rq_ctx_fini))
871 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
873 /* user descriptor flag, null security can't do it anyway */
874 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
875 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
876 req->rq_pack_udesc = 1;
878 /* bulk security flag */
879 if ((req->rq_bulk_read || req->rq_bulk_write) &&
880 sptlrpc_flavor_has_bulk(&req->rq_flvr))
881 req->rq_pack_bulk = 1;
884 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
886 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
889 LASSERT(req->rq_clrbuf);
890 if (req->rq_pool || !req->rq_reqbuf)
893 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
894 req->rq_reqbuf = NULL;
895 req->rq_reqbuf_len = 0;
899 * Given an import \a imp, check whether current user has a valid context
900 * or not. We may create a new context and try to refresh it, and try
901 * repeatedly try in case of non-fatal errors. Return 0 means success.
903 int sptlrpc_import_check_ctx(struct obd_import *imp)
905 struct ptlrpc_sec *sec;
906 struct ptlrpc_cli_ctx *ctx;
907 struct ptlrpc_request *req = NULL;
913 sec = sptlrpc_import_sec_ref(imp);
914 ctx = get_my_ctx(sec);
915 sptlrpc_sec_put(sec);
920 if (cli_ctx_is_eternal(ctx) ||
921 ctx->cc_ops->validate(ctx) == 0) {
922 sptlrpc_cli_ctx_put(ctx, 1);
926 if (cli_ctx_is_error(ctx)) {
927 sptlrpc_cli_ctx_put(ctx, 1);
931 req = ptlrpc_request_cache_alloc(GFP_NOFS);
935 spin_lock_init(&req->rq_lock);
936 cfs_atomic_set(&req->rq_refcount, 10000);
937 CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
938 init_waitqueue_head(&req->rq_reply_waitq);
939 init_waitqueue_head(&req->rq_set_waitq);
940 req->rq_import = imp;
941 req->rq_flvr = sec->ps_flvr;
942 req->rq_cli_ctx = ctx;
944 rc = sptlrpc_req_refresh_ctx(req, 0);
945 LASSERT(cfs_list_empty(&req->rq_ctx_chain));
946 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
947 ptlrpc_request_cache_free(req);
953 * Used by ptlrpc client, to perform the pre-defined security transformation
954 * upon the request message of \a req. After this function called,
955 * req->rq_reqmsg is still accessible as clear text.
957 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
959 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
964 LASSERT(ctx->cc_sec);
965 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
967 /* we wrap bulk request here because now we can be sure
968 * the context is uptodate.
971 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
976 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
977 case SPTLRPC_SVC_NULL:
978 case SPTLRPC_SVC_AUTH:
979 case SPTLRPC_SVC_INTG:
980 LASSERT(ctx->cc_ops->sign);
981 rc = ctx->cc_ops->sign(ctx, req);
983 case SPTLRPC_SVC_PRIV:
984 LASSERT(ctx->cc_ops->seal);
985 rc = ctx->cc_ops->seal(ctx, req);
992 LASSERT(req->rq_reqdata_len);
993 LASSERT(req->rq_reqdata_len % 8 == 0);
994 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
1000 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
1002 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1007 LASSERT(ctx->cc_sec);
1008 LASSERT(req->rq_repbuf);
1009 LASSERT(req->rq_repdata);
1010 LASSERT(req->rq_repmsg == NULL);
1012 req->rq_rep_swab_mask = 0;
1014 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1017 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1021 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1025 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1026 CERROR("replied data length %d too small\n",
1027 req->rq_repdata_len);
1031 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1032 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1033 CERROR("reply policy %u doesn't match request policy %u\n",
1034 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1035 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1039 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1040 case SPTLRPC_SVC_NULL:
1041 case SPTLRPC_SVC_AUTH:
1042 case SPTLRPC_SVC_INTG:
1043 LASSERT(ctx->cc_ops->verify);
1044 rc = ctx->cc_ops->verify(ctx, req);
1046 case SPTLRPC_SVC_PRIV:
1047 LASSERT(ctx->cc_ops->unseal);
1048 rc = ctx->cc_ops->unseal(ctx, req);
1053 LASSERT(rc || req->rq_repmsg || req->rq_resend);
1055 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1057 req->rq_rep_swab_mask = 0;
1062 * Used by ptlrpc client, to perform security transformation upon the reply
1063 * message of \a req. After return successfully, req->rq_repmsg points to
1064 * the reply message in clear text.
1066 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1069 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1071 LASSERT(req->rq_repbuf);
1072 LASSERT(req->rq_repdata == NULL);
1073 LASSERT(req->rq_repmsg == NULL);
1074 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1076 if (req->rq_reply_off == 0 &&
1077 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1078 CERROR("real reply with offset 0\n");
1082 if (req->rq_reply_off % 8 != 0) {
1083 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1087 req->rq_repdata = (struct lustre_msg *)
1088 (req->rq_repbuf + req->rq_reply_off);
1089 req->rq_repdata_len = req->rq_nob_received;
1091 return do_cli_unwrap_reply(req);
1095 * Used by ptlrpc client, to perform security transformation upon the early
1096 * reply message of \a req. We expect the rq_reply_off is 0, and
1097 * rq_nob_received is the early reply size.
1099 * Because the receive buffer might be still posted, the reply data might be
1100 * changed at any time, no matter we're holding rq_lock or not. For this reason
1101 * we allocate a separate ptlrpc_request and reply buffer for early reply
1104 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1105 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1106 * \a *req_ret to release it.
1107 * \retval -ev error number, and \a req_ret will not be set.
1109 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1110 struct ptlrpc_request **req_ret)
1112 struct ptlrpc_request *early_req;
1114 int early_bufsz, early_size;
1118 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
1119 if (early_req == NULL)
1122 early_size = req->rq_nob_received;
1123 early_bufsz = size_roundup_power2(early_size);
1124 OBD_ALLOC_LARGE(early_buf, early_bufsz);
1125 if (early_buf == NULL)
1126 GOTO(err_req, rc = -ENOMEM);
1128 /* sanity checkings and copy data out, do it inside spinlock */
1129 spin_lock(&req->rq_lock);
1131 if (req->rq_replied) {
1132 spin_unlock(&req->rq_lock);
1133 GOTO(err_buf, rc = -EALREADY);
1136 LASSERT(req->rq_repbuf);
1137 LASSERT(req->rq_repdata == NULL);
1138 LASSERT(req->rq_repmsg == NULL);
1140 if (req->rq_reply_off != 0) {
1141 CERROR("early reply with offset %u\n", req->rq_reply_off);
1142 spin_unlock(&req->rq_lock);
1143 GOTO(err_buf, rc = -EPROTO);
1146 if (req->rq_nob_received != early_size) {
1147 /* even another early arrived the size should be the same */
1148 CERROR("data size has changed from %u to %u\n",
1149 early_size, req->rq_nob_received);
1150 spin_unlock(&req->rq_lock);
1151 GOTO(err_buf, rc = -EINVAL);
1154 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1155 CERROR("early reply length %d too small\n",
1156 req->rq_nob_received);
1157 spin_unlock(&req->rq_lock);
1158 GOTO(err_buf, rc = -EALREADY);
1161 memcpy(early_buf, req->rq_repbuf, early_size);
1162 spin_unlock(&req->rq_lock);
1164 spin_lock_init(&early_req->rq_lock);
1165 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1166 early_req->rq_flvr = req->rq_flvr;
1167 early_req->rq_repbuf = early_buf;
1168 early_req->rq_repbuf_len = early_bufsz;
1169 early_req->rq_repdata = (struct lustre_msg *) early_buf;
1170 early_req->rq_repdata_len = early_size;
1171 early_req->rq_early = 1;
1172 early_req->rq_reqmsg = req->rq_reqmsg;
1174 rc = do_cli_unwrap_reply(early_req);
1176 DEBUG_REQ(D_ADAPTTO, early_req,
1177 "error %d unwrap early reply", rc);
1181 LASSERT(early_req->rq_repmsg);
1182 *req_ret = early_req;
1186 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1188 OBD_FREE_LARGE(early_buf, early_bufsz);
1190 ptlrpc_request_cache_free(early_req);
1195 * Used by ptlrpc client, to release a processed early reply \a early_req.
1197 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1199 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1201 LASSERT(early_req->rq_repbuf);
1202 LASSERT(early_req->rq_repdata);
1203 LASSERT(early_req->rq_repmsg);
1205 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1206 OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1207 ptlrpc_request_cache_free(early_req);
1210 /**************************************************
1212 **************************************************/
1215 * "fixed" sec (e.g. null) use sec_id < 0
1217 static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
1219 int sptlrpc_get_next_secid(void)
1221 return cfs_atomic_inc_return(&sptlrpc_sec_id);
1223 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1225 /**************************************************
1226 * client side high-level security APIs *
1227 **************************************************/
1229 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1230 int grace, int force)
1232 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1234 LASSERT(policy->sp_cops);
1235 LASSERT(policy->sp_cops->flush_ctx_cache);
1237 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1240 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1242 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1244 LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1245 LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1246 LASSERT(policy->sp_cops->destroy_sec);
1248 CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1250 policy->sp_cops->destroy_sec(sec);
1251 sptlrpc_policy_put(policy);
1254 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1256 sec_cop_destroy_sec(sec);
1258 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1260 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1262 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1264 if (sec->ps_policy->sp_cops->kill_sec) {
1265 sec->ps_policy->sp_cops->kill_sec(sec);
1267 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1271 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1274 cfs_atomic_inc(&sec->ps_refcount);
1278 EXPORT_SYMBOL(sptlrpc_sec_get);
1280 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1283 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1285 if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
1286 sptlrpc_gc_del_sec(sec);
1287 sec_cop_destroy_sec(sec);
1291 EXPORT_SYMBOL(sptlrpc_sec_put);
1294 * policy module is responsible for taking refrence of import
1297 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1298 struct ptlrpc_svc_ctx *svc_ctx,
1299 struct sptlrpc_flavor *sf,
1300 enum lustre_sec_part sp)
1302 struct ptlrpc_sec_policy *policy;
1303 struct ptlrpc_sec *sec;
1308 LASSERT(imp->imp_dlm_fake == 1);
1310 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1311 imp->imp_obd->obd_type->typ_name,
1312 imp->imp_obd->obd_name,
1313 sptlrpc_flavor2name(sf, str, sizeof(str)));
1315 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1316 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1318 LASSERT(imp->imp_dlm_fake == 0);
1320 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1321 imp->imp_obd->obd_type->typ_name,
1322 imp->imp_obd->obd_name,
1323 sptlrpc_flavor2name(sf, str, sizeof(str)));
1325 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1327 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1332 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1334 cfs_atomic_inc(&sec->ps_refcount);
1338 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1339 sptlrpc_gc_add_sec(sec);
1341 sptlrpc_policy_put(policy);
1347 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1349 struct ptlrpc_sec *sec;
1351 spin_lock(&imp->imp_lock);
1352 sec = sptlrpc_sec_get(imp->imp_sec);
1353 spin_unlock(&imp->imp_lock);
1357 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1359 static void sptlrpc_import_sec_install(struct obd_import *imp,
1360 struct ptlrpc_sec *sec)
1362 struct ptlrpc_sec *old_sec;
1364 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1366 spin_lock(&imp->imp_lock);
1367 old_sec = imp->imp_sec;
1369 spin_unlock(&imp->imp_lock);
1372 sptlrpc_sec_kill(old_sec);
1374 /* balance the ref taken by this import */
1375 sptlrpc_sec_put(old_sec);
1380 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1382 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1386 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1391 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1392 struct ptlrpc_sec *sec,
1393 struct sptlrpc_flavor *sf)
1395 char str1[32], str2[32];
1397 if (sec->ps_flvr.sf_flags != sf->sf_flags)
1398 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1399 sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1400 str1, sizeof(str1)),
1401 sptlrpc_secflags2str(sf->sf_flags,
1402 str2, sizeof(str2)));
1404 spin_lock(&sec->ps_lock);
1405 flavor_copy(&sec->ps_flvr, sf);
1406 spin_unlock(&sec->ps_lock);
1410 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1411 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1413 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1414 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1416 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1417 struct ptlrpc_svc_ctx *svc_ctx,
1418 struct sptlrpc_flavor *flvr)
1420 struct ptlrpc_connection *conn;
1421 struct sptlrpc_flavor sf;
1422 struct ptlrpc_sec *sec, *newsec;
1423 enum lustre_sec_part sp;
1433 conn = imp->imp_connection;
1435 if (svc_ctx == NULL) {
1436 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1438 * normal import, determine flavor from rule set, except
1439 * for mgc the flavor is predetermined.
1441 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1442 sf = cliobd->cl_flvr_mgc;
1444 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1446 &cliobd->cl_target_uuid,
1449 sp = imp->imp_obd->u.cli.cl_sp_me;
1451 /* reverse import, determine flavor from incoming reqeust */
1454 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1455 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1456 PTLRPC_SEC_FL_ROOTONLY;
1458 sp = sptlrpc_target_sec_part(imp->imp_obd);
1461 sec = sptlrpc_import_sec_ref(imp);
1465 if (flavor_equal(&sf, &sec->ps_flvr))
1468 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1469 imp->imp_obd->obd_name,
1470 obd_uuid2str(&conn->c_remote_uuid),
1471 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1472 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1474 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1475 SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1476 SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1477 SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1478 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1481 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1482 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1483 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1484 imp->imp_obd->obd_name,
1485 obd_uuid2str(&conn->c_remote_uuid),
1486 LNET_NIDNET(conn->c_self),
1487 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1490 mutex_lock(&imp->imp_sec_mutex);
1492 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1494 sptlrpc_import_sec_install(imp, newsec);
1496 CERROR("import %s->%s: failed to create new sec\n",
1497 imp->imp_obd->obd_name,
1498 obd_uuid2str(&conn->c_remote_uuid));
1502 mutex_unlock(&imp->imp_sec_mutex);
1504 sptlrpc_sec_put(sec);
1508 void sptlrpc_import_sec_put(struct obd_import *imp)
1511 sptlrpc_sec_kill(imp->imp_sec);
1513 sptlrpc_sec_put(imp->imp_sec);
1514 imp->imp_sec = NULL;
1518 static void import_flush_ctx_common(struct obd_import *imp,
1519 uid_t uid, int grace, int force)
1521 struct ptlrpc_sec *sec;
1526 sec = sptlrpc_import_sec_ref(imp);
1530 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1531 sptlrpc_sec_put(sec);
1534 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1536 /* it's important to use grace mode, see explain in
1537 * sptlrpc_req_refresh_ctx() */
1538 import_flush_ctx_common(imp, 0, 1, 1);
1541 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1543 import_flush_ctx_common(imp, current_uid(), 1, 1);
1545 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1547 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1549 import_flush_ctx_common(imp, -1, 1, 1);
1551 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1554 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1555 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1557 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1559 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1560 struct ptlrpc_sec_policy *policy;
1564 LASSERT(ctx->cc_sec);
1565 LASSERT(ctx->cc_sec->ps_policy);
1566 LASSERT(req->rq_reqmsg == NULL);
1567 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1569 policy = ctx->cc_sec->ps_policy;
1570 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1572 LASSERT(req->rq_reqmsg);
1573 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1575 /* zeroing preallocated buffer */
1577 memset(req->rq_reqmsg, 0, msgsize);
1584 * Used by ptlrpc client to free request buffer of \a req. After this
1585 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1587 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1589 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1590 struct ptlrpc_sec_policy *policy;
1593 LASSERT(ctx->cc_sec);
1594 LASSERT(ctx->cc_sec->ps_policy);
1595 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1597 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1600 policy = ctx->cc_sec->ps_policy;
1601 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1602 req->rq_reqmsg = NULL;
1606 * NOTE caller must guarantee the buffer size is enough for the enlargement
1608 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1609 int segment, int newsize)
1612 int oldsize, oldmsg_size, movesize;
1614 LASSERT(segment < msg->lm_bufcount);
1615 LASSERT(msg->lm_buflens[segment] <= newsize);
1617 if (msg->lm_buflens[segment] == newsize)
1620 /* nothing to do if we are enlarging the last segment */
1621 if (segment == msg->lm_bufcount - 1) {
1622 msg->lm_buflens[segment] = newsize;
1626 oldsize = msg->lm_buflens[segment];
1628 src = lustre_msg_buf(msg, segment + 1, 0);
1629 msg->lm_buflens[segment] = newsize;
1630 dst = lustre_msg_buf(msg, segment + 1, 0);
1631 msg->lm_buflens[segment] = oldsize;
1633 /* move from segment + 1 to end segment */
1634 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1635 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1636 movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1637 LASSERT(movesize >= 0);
1640 memmove(dst, src, movesize);
1642 /* note we don't clear the ares where old data live, not secret */
1644 /* finally set new segment size */
1645 msg->lm_buflens[segment] = newsize;
1647 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1650 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1651 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1652 * preserved after the enlargement. this must be called after original request
1653 * buffer being allocated.
1655 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1656 * so caller should refresh its local pointers if needed.
1658 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1659 int segment, int newsize)
1661 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1662 struct ptlrpc_sec_cops *cops;
1663 struct lustre_msg *msg = req->rq_reqmsg;
1667 LASSERT(msg->lm_bufcount > segment);
1668 LASSERT(msg->lm_buflens[segment] <= newsize);
1670 if (msg->lm_buflens[segment] == newsize)
1673 cops = ctx->cc_sec->ps_policy->sp_cops;
1674 LASSERT(cops->enlarge_reqbuf);
1675 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1677 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1680 * Used by ptlrpc client to allocate reply buffer of \a req.
1682 * \note After this, req->rq_repmsg is still not accessible.
1684 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1686 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1687 struct ptlrpc_sec_policy *policy;
1691 LASSERT(ctx->cc_sec);
1692 LASSERT(ctx->cc_sec->ps_policy);
1697 policy = ctx->cc_sec->ps_policy;
1698 RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1702 * Used by ptlrpc client to free reply buffer of \a req. After this
1703 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1705 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1707 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1708 struct ptlrpc_sec_policy *policy;
1712 LASSERT(ctx->cc_sec);
1713 LASSERT(ctx->cc_sec->ps_policy);
1714 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1716 if (req->rq_repbuf == NULL)
1718 LASSERT(req->rq_repbuf_len);
1720 policy = ctx->cc_sec->ps_policy;
1721 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1722 req->rq_repmsg = NULL;
1726 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1727 struct ptlrpc_cli_ctx *ctx)
1729 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1731 if (!policy->sp_cops->install_rctx)
1733 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1736 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1737 struct ptlrpc_svc_ctx *ctx)
1739 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1741 if (!policy->sp_sops->install_rctx)
1743 return policy->sp_sops->install_rctx(imp, ctx);
1746 /****************************************
1747 * server side security *
1748 ****************************************/
1750 static int flavor_allowed(struct sptlrpc_flavor *exp,
1751 struct ptlrpc_request *req)
1753 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1755 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1758 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1759 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1760 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1761 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1767 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1770 * Given an export \a exp, check whether the flavor of incoming \a req
1771 * is allowed by the export \a exp. Main logic is about taking care of
1772 * changing configurations. Return 0 means success.
1774 int sptlrpc_target_export_check(struct obd_export *exp,
1775 struct ptlrpc_request *req)
1777 struct sptlrpc_flavor flavor;
1782 /* client side export has no imp_reverse, skip
1783 * FIXME maybe we should check flavor this as well??? */
1784 if (exp->exp_imp_reverse == NULL)
1787 /* don't care about ctx fini rpc */
1788 if (req->rq_ctx_fini)
1791 spin_lock(&exp->exp_lock);
1793 /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1794 * the first req with the new flavor, then treat it as current flavor,
1795 * adapt reverse sec according to it.
1796 * note the first rpc with new flavor might not be with root ctx, in
1797 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1798 if (unlikely(exp->exp_flvr_changed) &&
1799 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1800 /* make the new flavor as "current", and old ones as
1801 * about-to-expire */
1802 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1803 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1804 flavor = exp->exp_flvr_old[1];
1805 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1806 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1807 exp->exp_flvr_old[0] = exp->exp_flvr;
1808 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1809 EXP_FLVR_UPDATE_EXPIRE;
1810 exp->exp_flvr = flavor;
1812 /* flavor change finished */
1813 exp->exp_flvr_changed = 0;
1814 LASSERT(exp->exp_flvr_adapt == 1);
1816 /* if it's gss, we only interested in root ctx init */
1817 if (req->rq_auth_gss &&
1818 !(req->rq_ctx_init &&
1819 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1820 req->rq_auth_usr_ost))) {
1821 spin_unlock(&exp->exp_lock);
1822 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1823 req->rq_auth_gss, req->rq_ctx_init,
1824 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1825 req->rq_auth_usr_ost);
1829 exp->exp_flvr_adapt = 0;
1830 spin_unlock(&exp->exp_lock);
1832 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1833 req->rq_svc_ctx, &flavor);
1836 /* if it equals to the current flavor, we accept it, but need to
1837 * dealing with reverse sec/ctx */
1838 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1839 /* most cases should return here, we only interested in
1840 * gss root ctx init */
1841 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1842 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1843 !req->rq_auth_usr_ost)) {
1844 spin_unlock(&exp->exp_lock);
1848 /* if flavor just changed, we should not proceed, just leave
1849 * it and current flavor will be discovered and replaced
1850 * shortly, and let _this_ rpc pass through */
1851 if (exp->exp_flvr_changed) {
1852 LASSERT(exp->exp_flvr_adapt);
1853 spin_unlock(&exp->exp_lock);
1857 if (exp->exp_flvr_adapt) {
1858 exp->exp_flvr_adapt = 0;
1859 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1860 exp, exp->exp_flvr.sf_rpc,
1861 exp->exp_flvr_old[0].sf_rpc,
1862 exp->exp_flvr_old[1].sf_rpc);
1863 flavor = exp->exp_flvr;
1864 spin_unlock(&exp->exp_lock);
1866 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1870 CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1871 "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1872 exp->exp_flvr_old[0].sf_rpc,
1873 exp->exp_flvr_old[1].sf_rpc);
1874 spin_unlock(&exp->exp_lock);
1876 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1881 if (exp->exp_flvr_expire[0]) {
1882 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1883 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1884 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1885 "middle one ("CFS_DURATION_T")\n", exp,
1886 exp->exp_flvr.sf_rpc,
1887 exp->exp_flvr_old[0].sf_rpc,
1888 exp->exp_flvr_old[1].sf_rpc,
1889 exp->exp_flvr_expire[0] -
1890 cfs_time_current_sec());
1891 spin_unlock(&exp->exp_lock);
1895 CDEBUG(D_SEC, "mark middle expired\n");
1896 exp->exp_flvr_expire[0] = 0;
1898 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1899 exp->exp_flvr.sf_rpc,
1900 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1901 req->rq_flvr.sf_rpc);
1904 /* now it doesn't match the current flavor, the only chance we can
1905 * accept it is match the old flavors which is not expired. */
1906 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1907 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1908 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1909 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1910 "oldest one ("CFS_DURATION_T")\n", exp,
1911 exp->exp_flvr.sf_rpc,
1912 exp->exp_flvr_old[0].sf_rpc,
1913 exp->exp_flvr_old[1].sf_rpc,
1914 exp->exp_flvr_expire[1] -
1915 cfs_time_current_sec());
1916 spin_unlock(&exp->exp_lock);
1920 CDEBUG(D_SEC, "mark oldest expired\n");
1921 exp->exp_flvr_expire[1] = 0;
1923 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1924 exp, exp->exp_flvr.sf_rpc,
1925 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1926 req->rq_flvr.sf_rpc);
1928 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1929 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1930 exp->exp_flvr_old[1].sf_rpc);
1933 spin_unlock(&exp->exp_lock);
1935 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1936 "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1937 exp, exp->exp_obd->obd_name,
1938 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1939 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1940 req->rq_flvr.sf_rpc,
1941 exp->exp_flvr.sf_rpc,
1942 exp->exp_flvr_old[0].sf_rpc,
1943 exp->exp_flvr_expire[0] ?
1944 (unsigned long) (exp->exp_flvr_expire[0] -
1945 cfs_time_current_sec()) : 0,
1946 exp->exp_flvr_old[1].sf_rpc,
1947 exp->exp_flvr_expire[1] ?
1948 (unsigned long) (exp->exp_flvr_expire[1] -
1949 cfs_time_current_sec()) : 0);
1952 EXPORT_SYMBOL(sptlrpc_target_export_check);
1954 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1955 struct sptlrpc_rule_set *rset)
1957 struct obd_export *exp;
1958 struct sptlrpc_flavor new_flvr;
1962 spin_lock(&obd->obd_dev_lock);
1964 cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1965 if (exp->exp_connection == NULL)
1968 /* note if this export had just been updated flavor
1969 * (exp_flvr_changed == 1), this will override the
1971 spin_lock(&exp->exp_lock);
1972 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1973 exp->exp_connection->c_peer.nid,
1975 if (exp->exp_flvr_changed ||
1976 !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1977 exp->exp_flvr_old[1] = new_flvr;
1978 exp->exp_flvr_expire[1] = 0;
1979 exp->exp_flvr_changed = 1;
1980 exp->exp_flvr_adapt = 1;
1982 CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1983 exp, sptlrpc_part2name(exp->exp_sp_peer),
1984 exp->exp_flvr.sf_rpc,
1985 exp->exp_flvr_old[1].sf_rpc);
1987 spin_unlock(&exp->exp_lock);
1990 spin_unlock(&obd->obd_dev_lock);
1992 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1994 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1996 /* peer's claim is unreliable unless gss is being used */
1997 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
2000 switch (req->rq_sp_from) {
2002 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
2003 DEBUG_REQ(D_ERROR, req, "faked source CLI");
2004 svc_rc = SECSVC_DROP;
2008 if (!req->rq_auth_usr_mdt) {
2009 DEBUG_REQ(D_ERROR, req, "faked source MDT");
2010 svc_rc = SECSVC_DROP;
2014 if (!req->rq_auth_usr_ost) {
2015 DEBUG_REQ(D_ERROR, req, "faked source OST");
2016 svc_rc = SECSVC_DROP;
2021 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2022 !req->rq_auth_usr_ost) {
2023 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2024 svc_rc = SECSVC_DROP;
2029 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2030 svc_rc = SECSVC_DROP;
2037 * Used by ptlrpc server, to perform transformation upon request message of
2038 * incoming \a req. This must be the first thing to do with a incoming
2039 * request in ptlrpc layer.
2041 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2042 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2043 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2044 * reply message has been prepared.
2045 * \retval SECSVC_DROP failed, this request should be dropped.
2047 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2049 struct ptlrpc_sec_policy *policy;
2050 struct lustre_msg *msg = req->rq_reqbuf;
2055 LASSERT(req->rq_reqmsg == NULL);
2056 LASSERT(req->rq_repmsg == NULL);
2057 LASSERT(req->rq_svc_ctx == NULL);
2059 req->rq_req_swab_mask = 0;
2061 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2064 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2068 CERROR("error unpacking request from %s x"LPU64"\n",
2069 libcfs_id2str(req->rq_peer), req->rq_xid);
2070 RETURN(SECSVC_DROP);
2073 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2074 req->rq_sp_from = LUSTRE_SP_ANY;
2075 req->rq_auth_uid = INVALID_UID;
2076 req->rq_auth_mapped_uid = INVALID_UID;
2078 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2080 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2081 RETURN(SECSVC_DROP);
2084 LASSERT(policy->sp_sops->accept);
2085 rc = policy->sp_sops->accept(req);
2086 sptlrpc_policy_put(policy);
2087 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2088 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2091 * if it's not null flavor (which means embedded packing msg),
2092 * reset the swab mask for the comming inner msg unpacking.
2094 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2095 req->rq_req_swab_mask = 0;
2097 /* sanity check for the request source */
2098 rc = sptlrpc_svc_check_from(req, rc);
2103 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2104 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2105 * a buffer of \a msglen size.
2107 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2109 struct ptlrpc_sec_policy *policy;
2110 struct ptlrpc_reply_state *rs;
2114 LASSERT(req->rq_svc_ctx);
2115 LASSERT(req->rq_svc_ctx->sc_policy);
2117 policy = req->rq_svc_ctx->sc_policy;
2118 LASSERT(policy->sp_sops->alloc_rs);
2120 rc = policy->sp_sops->alloc_rs(req, msglen);
2121 if (unlikely(rc == -ENOMEM)) {
2122 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
2123 if (svcpt->scp_service->srv_max_reply_size <
2124 msglen + sizeof(struct ptlrpc_reply_state)) {
2125 /* Just return failure if the size is too big */
2126 CERROR("size of message is too big (%zd), %d allowed",
2127 msglen + sizeof(struct ptlrpc_reply_state),
2128 svcpt->scp_service->srv_max_reply_size);
2132 /* failed alloc, try emergency pool */
2133 rs = lustre_get_emerg_rs(svcpt);
2137 req->rq_reply_state = rs;
2138 rc = policy->sp_sops->alloc_rs(req, msglen);
2140 lustre_put_emerg_rs(rs);
2141 req->rq_reply_state = NULL;
2146 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2152 * Used by ptlrpc server, to perform transformation upon reply message.
2154 * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2155 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2157 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2159 struct ptlrpc_sec_policy *policy;
2163 LASSERT(req->rq_svc_ctx);
2164 LASSERT(req->rq_svc_ctx->sc_policy);
2166 policy = req->rq_svc_ctx->sc_policy;
2167 LASSERT(policy->sp_sops->authorize);
2169 rc = policy->sp_sops->authorize(req);
2170 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2176 * Used by ptlrpc server, to free reply_state.
2178 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2180 struct ptlrpc_sec_policy *policy;
2181 unsigned int prealloc;
2184 LASSERT(rs->rs_svc_ctx);
2185 LASSERT(rs->rs_svc_ctx->sc_policy);
2187 policy = rs->rs_svc_ctx->sc_policy;
2188 LASSERT(policy->sp_sops->free_rs);
2190 prealloc = rs->rs_prealloc;
2191 policy->sp_sops->free_rs(rs);
2194 lustre_put_emerg_rs(rs);
2198 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2200 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2203 cfs_atomic_inc(&ctx->sc_refcount);
2206 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2208 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2213 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2214 if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
2215 if (ctx->sc_policy->sp_sops->free_ctx)
2216 ctx->sc_policy->sp_sops->free_ctx(ctx);
2218 req->rq_svc_ctx = NULL;
2221 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2223 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2228 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2229 if (ctx->sc_policy->sp_sops->invalidate_ctx)
2230 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2232 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2234 /****************************************
2236 ****************************************/
2239 * Perform transformation upon bulk data pointed by \a desc. This is called
2240 * before transforming the request message.
2242 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2243 struct ptlrpc_bulk_desc *desc)
2245 struct ptlrpc_cli_ctx *ctx;
2247 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2249 if (!req->rq_pack_bulk)
2252 ctx = req->rq_cli_ctx;
2253 if (ctx->cc_ops->wrap_bulk)
2254 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2257 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2260 * This is called after unwrap the reply message.
2261 * return nob of actual plain text size received, or error code.
2263 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2264 struct ptlrpc_bulk_desc *desc,
2267 struct ptlrpc_cli_ctx *ctx;
2270 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2272 if (!req->rq_pack_bulk)
2273 return desc->bd_nob_transferred;
2275 ctx = req->rq_cli_ctx;
2276 if (ctx->cc_ops->unwrap_bulk) {
2277 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2281 return desc->bd_nob_transferred;
2283 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2286 * This is called after unwrap the reply message.
2287 * return 0 for success or error code.
2289 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2290 struct ptlrpc_bulk_desc *desc)
2292 struct ptlrpc_cli_ctx *ctx;
2295 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2297 if (!req->rq_pack_bulk)
2300 ctx = req->rq_cli_ctx;
2301 if (ctx->cc_ops->unwrap_bulk) {
2302 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2308 * if everything is going right, nob should equals to nob_transferred.
2309 * in case of privacy mode, nob_transferred needs to be adjusted.
2311 if (desc->bd_nob != desc->bd_nob_transferred) {
2312 CERROR("nob %d doesn't match transferred nob %d",
2313 desc->bd_nob, desc->bd_nob_transferred);
2319 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2321 #ifdef HAVE_SERVER_SUPPORT
2323 * Performe transformation upon outgoing bulk read.
2325 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2326 struct ptlrpc_bulk_desc *desc)
2328 struct ptlrpc_svc_ctx *ctx;
2330 LASSERT(req->rq_bulk_read);
2332 if (!req->rq_pack_bulk)
2335 ctx = req->rq_svc_ctx;
2336 if (ctx->sc_policy->sp_sops->wrap_bulk)
2337 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2341 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2344 * Performe transformation upon incoming bulk write.
2346 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2347 struct ptlrpc_bulk_desc *desc)
2349 struct ptlrpc_svc_ctx *ctx;
2352 LASSERT(req->rq_bulk_write);
2355 * if it's in privacy mode, transferred should >= expected; otherwise
2356 * transferred should == expected.
2358 if (desc->bd_nob_transferred < desc->bd_nob ||
2359 (desc->bd_nob_transferred > desc->bd_nob &&
2360 SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2361 SPTLRPC_BULK_SVC_PRIV)) {
2362 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2363 desc->bd_nob_transferred, desc->bd_nob);
2367 if (!req->rq_pack_bulk)
2370 ctx = req->rq_svc_ctx;
2371 if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2372 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2374 CERROR("error unwrap bulk: %d\n", rc);
2377 /* return 0 to allow reply be sent */
2380 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2383 * Prepare buffers for incoming bulk write.
2385 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2386 struct ptlrpc_bulk_desc *desc)
2388 struct ptlrpc_svc_ctx *ctx;
2390 LASSERT(req->rq_bulk_write);
2392 if (!req->rq_pack_bulk)
2395 ctx = req->rq_svc_ctx;
2396 if (ctx->sc_policy->sp_sops->prep_bulk)
2397 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2401 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2403 #endif /* HAVE_SERVER_SUPPORT */
2405 /****************************************
2406 * user descriptor helpers *
2407 ****************************************/
2409 int sptlrpc_current_user_desc_size(void)
2414 ngroups = current_ngroups;
2416 if (ngroups > LUSTRE_MAX_GROUPS)
2417 ngroups = LUSTRE_MAX_GROUPS;
2421 return sptlrpc_user_desc_size(ngroups);
2423 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2425 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2427 struct ptlrpc_user_desc *pud;
2429 pud = lustre_msg_buf(msg, offset, 0);
2431 pud->pud_uid = current_uid();
2432 pud->pud_gid = current_gid();
2433 pud->pud_fsuid = current_fsuid();
2434 pud->pud_fsgid = current_fsgid();
2435 pud->pud_cap = cfs_curproc_cap_pack();
2436 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2440 if (pud->pud_ngroups > current_ngroups)
2441 pud->pud_ngroups = current_ngroups;
2442 memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2443 pud->pud_ngroups * sizeof(__u32));
2444 task_unlock(current);
2449 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2451 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2453 struct ptlrpc_user_desc *pud;
2456 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2461 __swab32s(&pud->pud_uid);
2462 __swab32s(&pud->pud_gid);
2463 __swab32s(&pud->pud_fsuid);
2464 __swab32s(&pud->pud_fsgid);
2465 __swab32s(&pud->pud_cap);
2466 __swab32s(&pud->pud_ngroups);
2469 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2470 CERROR("%u groups is too large\n", pud->pud_ngroups);
2474 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2475 msg->lm_buflens[offset]) {
2476 CERROR("%u groups are claimed but bufsize only %u\n",
2477 pud->pud_ngroups, msg->lm_buflens[offset]);
2482 for (i = 0; i < pud->pud_ngroups; i++)
2483 __swab32s(&pud->pud_groups[i]);
2488 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2490 /****************************************
2492 ****************************************/
2494 const char * sec2target_str(struct ptlrpc_sec *sec)
2496 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2498 if (sec_is_reverse(sec))
2500 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2502 EXPORT_SYMBOL(sec2target_str);
2505 * return true if the bulk data is protected
2507 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2509 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2510 case SPTLRPC_BULK_SVC_INTG:
2511 case SPTLRPC_BULK_SVC_PRIV:
2517 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2519 /****************************************
2520 * crypto API helper/alloc blkciper *
2521 ****************************************/
2523 /****************************************
2524 * initialize/finalize *
2525 ****************************************/
2527 int sptlrpc_init(void)
2531 rwlock_init(&policy_lock);
2533 rc = sptlrpc_gc_init();
2537 rc = sptlrpc_conf_init();
2541 rc = sptlrpc_enc_pool_init();
2545 rc = sptlrpc_null_init();
2549 rc = sptlrpc_plain_init();
2553 rc = sptlrpc_lproc_init();
2560 sptlrpc_plain_fini();
2562 sptlrpc_null_fini();
2564 sptlrpc_enc_pool_fini();
2566 sptlrpc_conf_fini();
2573 void sptlrpc_fini(void)
2575 sptlrpc_lproc_fini();
2576 sptlrpc_plain_fini();
2577 sptlrpc_null_fini();
2578 sptlrpc_enc_pool_fini();
2579 sptlrpc_conf_fini();