1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Eric Mei <ericm@clusterfs.com>
44 #define DEBUG_SUBSYSTEM S_SEC
46 #include <libcfs/libcfs.h>
48 #include <liblustre.h>
49 #include <libcfs/list.h>
51 #include <linux/crypto.h>
52 #include <linux/key.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_net.h>
59 #include <lustre_import.h>
60 #include <lustre_dlm.h>
61 #include <lustre_sec.h>
63 #include "ptlrpc_internal.h"
65 /***********************************************
67 ***********************************************/
69 static rwlock_t policy_lock;
70 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
74 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
76 __u16 number = policy->sp_policy;
78 LASSERT(policy->sp_name);
79 LASSERT(policy->sp_cops);
80 LASSERT(policy->sp_sops);
82 if (number >= SPTLRPC_POLICY_MAX)
85 write_lock(&policy_lock);
86 if (unlikely(policies[number])) {
87 write_unlock(&policy_lock);
90 policies[number] = policy;
91 write_unlock(&policy_lock);
93 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
96 EXPORT_SYMBOL(sptlrpc_register_policy);
98 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
100 __u16 number = policy->sp_policy;
102 LASSERT(number < SPTLRPC_POLICY_MAX);
104 write_lock(&policy_lock);
105 if (unlikely(policies[number] == NULL)) {
106 write_unlock(&policy_lock);
107 CERROR("%s: already unregistered\n", policy->sp_name);
111 LASSERT(policies[number] == policy);
112 policies[number] = NULL;
113 write_unlock(&policy_lock);
115 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
118 EXPORT_SYMBOL(sptlrpc_unregister_policy);
121 struct ptlrpc_sec_policy * sptlrpc_rpcflavor2policy(__u16 flavor)
123 static DECLARE_MUTEX(load_mutex);
124 static atomic_t loaded = ATOMIC_INIT(0);
125 struct ptlrpc_sec_policy *policy;
126 __u16 number = RPC_FLVR_POLICY(flavor), flag = 0;
128 if (number >= SPTLRPC_POLICY_MAX)
132 read_lock(&policy_lock);
133 policy = policies[number];
134 if (policy && !try_module_get(policy->sp_owner))
137 flag = atomic_read(&loaded);
138 read_unlock(&policy_lock);
140 if (policy != NULL || flag != 0 ||
141 number != SPTLRPC_POLICY_GSS)
144 /* try to load gss module, once */
145 mutex_down(&load_mutex);
146 if (atomic_read(&loaded) == 0) {
147 if (request_module("ptlrpc_gss") == 0)
148 CWARN("module ptlrpc_gss loaded on demand\n");
150 CERROR("Unable to load module ptlrpc_gss\n");
152 atomic_set(&loaded, 1);
154 mutex_up(&load_mutex);
160 __u16 sptlrpc_name2rpcflavor(const char *name)
162 if (!strcmp(name, "null"))
163 return SPTLRPC_FLVR_NULL;
164 if (!strcmp(name, "plain"))
165 return SPTLRPC_FLVR_PLAIN;
166 if (!strcmp(name, "krb5n"))
167 return SPTLRPC_FLVR_KRB5N;
168 if (!strcmp(name, "krb5a"))
169 return SPTLRPC_FLVR_KRB5A;
170 if (!strcmp(name, "krb5i"))
171 return SPTLRPC_FLVR_KRB5I;
172 if (!strcmp(name, "krb5p"))
173 return SPTLRPC_FLVR_KRB5P;
175 return SPTLRPC_FLVR_INVALID;
177 EXPORT_SYMBOL(sptlrpc_name2rpcflavor);
179 const char *sptlrpc_rpcflavor2name(__u16 flavor)
182 case SPTLRPC_FLVR_NULL:
184 case SPTLRPC_FLVR_PLAIN:
186 case SPTLRPC_FLVR_KRB5N:
188 case SPTLRPC_FLVR_KRB5A:
190 case SPTLRPC_FLVR_KRB5I:
192 case SPTLRPC_FLVR_KRB5P:
195 CERROR("invalid rpc flavor 0x%x(p%u,s%u,v%u)\n", flavor,
196 RPC_FLVR_POLICY(flavor), RPC_FLVR_MECH(flavor),
197 RPC_FLVR_SVC(flavor));
201 EXPORT_SYMBOL(sptlrpc_rpcflavor2name);
203 int sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
207 if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL)
209 else if (sf->sf_bulk_hash != BULK_HASH_ALG_NULL)
214 snprintf(buf, bufsize, "%s-%s:%s/%s",
215 sptlrpc_rpcflavor2name(sf->sf_rpc), bulk,
216 sptlrpc_get_hash_name(sf->sf_bulk_hash),
217 sptlrpc_get_ciph_name(sf->sf_bulk_ciph));
220 EXPORT_SYMBOL(sptlrpc_flavor2name);
222 /**************************************************
223 * client context APIs *
224 **************************************************/
227 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
229 struct vfs_cred vcred;
230 int create = 1, remove_dead = 1;
233 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
235 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
236 PTLRPC_SEC_FL_ROOTONLY)) {
239 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
244 vcred.vc_uid = cfs_current()->uid;
245 vcred.vc_gid = cfs_current()->gid;
248 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
249 create, remove_dead);
252 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
254 LASSERT(atomic_read(&ctx->cc_refcount) > 0);
255 atomic_inc(&ctx->cc_refcount);
258 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
260 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
262 struct ptlrpc_sec *sec = ctx->cc_sec;
265 LASSERT(atomic_read(&ctx->cc_refcount));
267 if (!atomic_dec_and_test(&ctx->cc_refcount))
270 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
272 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
275 * expire the context immediately.
276 * the caller must hold at least 1 ref on the ctx.
278 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
280 LASSERT(ctx->cc_ops->die);
281 ctx->cc_ops->die(ctx, 0);
283 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
285 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
287 struct ptlrpc_request *req, *next;
289 spin_lock(&ctx->cc_lock);
290 list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
291 list_del_init(&req->rq_ctx_chain);
292 ptlrpc_client_wake_req(req);
294 spin_unlock(&ctx->cc_lock);
296 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
298 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
300 LASSERT(ctx->cc_ops);
302 if (ctx->cc_ops->display == NULL)
305 return ctx->cc_ops->display(ctx, buf, bufsize);
308 static int import_sec_check_expire(struct obd_import *imp)
312 spin_lock(&imp->imp_lock);
313 if (imp->imp_sec_expire &&
314 imp->imp_sec_expire < cfs_time_current_sec()) {
316 imp->imp_sec_expire = 0;
318 spin_unlock(&imp->imp_lock);
323 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
324 return sptlrpc_import_sec_adapt(imp, NULL, 0);
327 static int import_sec_validate_get(struct obd_import *imp,
328 struct ptlrpc_sec **sec)
332 if (unlikely(imp->imp_sec_expire)) {
333 rc = import_sec_check_expire(imp);
338 *sec = sptlrpc_import_sec_ref(imp);
340 CERROR("import %p (%s) with no sec\n",
341 imp, ptlrpc_import_state_name(imp->imp_state));
345 if (unlikely((*sec)->ps_dying)) {
346 CERROR("attempt to use dying sec %p\n", sec);
347 sptlrpc_sec_put(*sec);
354 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
356 struct obd_import *imp = req->rq_import;
357 struct ptlrpc_sec *sec;
361 LASSERT(!req->rq_cli_ctx);
364 rc = import_sec_validate_get(imp, &sec);
368 req->rq_cli_ctx = get_my_ctx(sec);
370 sptlrpc_sec_put(sec);
372 if (!req->rq_cli_ctx) {
373 CERROR("req %p: fail to get context\n", req);
381 * if @sync == 0, this function should return quickly without sleep;
382 * otherwise might trigger ctx destroying rpc to server.
384 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
389 LASSERT(req->rq_cli_ctx);
391 /* request might be asked to release earlier while still
392 * in the context waiting list.
394 if (!list_empty(&req->rq_ctx_chain)) {
395 spin_lock(&req->rq_cli_ctx->cc_lock);
396 list_del_init(&req->rq_ctx_chain);
397 spin_unlock(&req->rq_cli_ctx->cc_lock);
400 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
401 req->rq_cli_ctx = NULL;
406 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
407 struct ptlrpc_cli_ctx *oldctx,
408 struct ptlrpc_cli_ctx *newctx)
410 struct sptlrpc_flavor old_flvr;
415 if (likely(oldctx->cc_sec == newctx->cc_sec))
418 LASSERT(req->rq_reqmsg);
419 LASSERT(req->rq_reqlen);
420 LASSERT(req->rq_replen);
422 CWARN("req %p: switch ctx %p -> %p, switch sec %p(%s) -> %p(%s)\n",
424 oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
425 newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
428 old_flvr = req->rq_flvr;
430 /* save request message */
431 reqmsg_size = req->rq_reqlen;
432 OBD_ALLOC(reqmsg, reqmsg_size);
435 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
437 /* release old req/rep buf */
438 req->rq_cli_ctx = oldctx;
439 sptlrpc_cli_free_reqbuf(req);
440 sptlrpc_cli_free_repbuf(req);
441 req->rq_cli_ctx = newctx;
443 /* recalculate the flavor */
444 sptlrpc_req_set_flavor(req, 0);
446 /* alloc new request buffer
447 * we don't need to alloc reply buffer here, leave it to the
448 * rest procedure of ptlrpc
450 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
452 LASSERT(req->rq_reqmsg);
453 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
455 CWARN("failed to alloc reqbuf: %d\n", rc);
456 req->rq_flvr = old_flvr;
459 OBD_FREE(reqmsg, reqmsg_size);
464 * if current context has died, or if we resend after flavor switched,
465 * call this func to switch context. if no switch is needed, request
466 * will end up with the same context.
468 * request must have a context. in any case of failure, restore the
469 * restore the old one - a request must have a context.
471 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
473 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
474 struct ptlrpc_cli_ctx *newctx;
480 sptlrpc_cli_ctx_get(oldctx);
481 sptlrpc_req_put_ctx(req, 0);
483 rc = sptlrpc_req_get_ctx(req);
485 LASSERT(!req->rq_cli_ctx);
487 /* restore old ctx */
488 req->rq_cli_ctx = oldctx;
492 newctx = req->rq_cli_ctx;
495 if (unlikely(newctx == oldctx)) {
496 if (test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags)) {
498 * still get the old ctx, usually means system busy
500 CWARN("ctx (%p, fl %lx) doesn't switch, "
501 "relax a little bit\n",
502 newctx, newctx->cc_flags);
504 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
507 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
509 /* restore old ctx */
510 sptlrpc_req_put_ctx(req, 0);
511 req->rq_cli_ctx = oldctx;
515 LASSERT(req->rq_cli_ctx == newctx);
518 sptlrpc_cli_ctx_put(oldctx, 1);
521 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
524 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
526 if (cli_ctx_is_refreshed(ctx))
532 int ctx_refresh_timeout(void *data)
534 struct ptlrpc_request *req = data;
537 /* conn_cnt is needed in expire_one_request */
538 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
540 rc = ptlrpc_expire_one_request(req, 1);
541 /* if we started recovery, we should mark this ctx dead; otherwise
542 * in case of lgssd died nobody would retire this ctx, following
543 * connecting will still find the same ctx thus cause deadlock.
544 * there's an assumption that expire time of the request should be
545 * later than the context refresh expire time.
548 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
553 void ctx_refresh_interrupt(void *data)
555 struct ptlrpc_request *req = data;
557 spin_lock(&req->rq_lock);
559 spin_unlock(&req->rq_lock);
563 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
565 spin_lock(&ctx->cc_lock);
566 if (!list_empty(&req->rq_ctx_chain))
567 list_del_init(&req->rq_ctx_chain);
568 spin_unlock(&ctx->cc_lock);
572 * the status of context could be subject to be changed by other threads at any
573 * time. we allow this race. but once we return with 0, the caller will
574 * suppose it's uptodated and keep using it until the owning rpc is done.
578 * = 0 - wait until success or fatal error occur
579 * > 0 - timeout value
581 * return 0 only if the context is uptodated.
583 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
585 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
586 struct ptlrpc_sec *sec;
587 struct l_wait_info lwi;
593 if (req->rq_ctx_init || req->rq_ctx_fini)
597 * during the process a request's context might change type even
598 * (e.g. from gss ctx to plain ctx), so each loop we need to re-check
602 rc = import_sec_validate_get(req->rq_import, &sec);
606 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc)
607 sptlrpc_req_replace_dead_ctx(req);
609 sptlrpc_sec_put(sec);
611 if (cli_ctx_is_eternal(ctx))
614 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
615 LASSERT(ctx->cc_ops->refresh);
616 ctx->cc_ops->refresh(ctx);
618 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
620 LASSERT(ctx->cc_ops->validate);
621 if (ctx->cc_ops->validate(ctx) == 0) {
622 req_off_ctx_list(req, ctx);
626 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
628 req_off_ctx_list(req, ctx);
632 /* This is subtle. For resent message we have to keep original
633 * context to survive following situation:
634 * 1. the request sent to server
635 * 2. recovery was kick start
636 * 3. recovery finished, the request marked as resent
637 * 4. resend the request
638 * 5. old reply from server received (because xid is the same)
639 * 6. verify reply (has to be success)
640 * 7. new reply from server received, lnet drop it
642 * Note we can't simply change xid for resent request because
643 * server reply on it for reply reconstruction.
645 * Commonly the original context should be uptodate because we
646 * have a expiry nice time; And server will keep their half part
647 * context because we at least hold a ref of old context which
648 * prevent the context detroy RPC be sent. So server still can
649 * accept the request and finish RPC. Two cases:
650 * 1. If server side context has been trimed, a NO_CONTEXT will
651 * be returned, gss_cli_ctx_verify/unseal will switch to new
653 * 2. Current context never be refreshed, then we are fine: we
654 * never really send request with old context before.
656 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
657 unlikely(req->rq_reqmsg) &&
658 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
659 req_off_ctx_list(req, ctx);
663 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
665 * don't switch ctx if import was deactivated
667 if (req->rq_import->imp_deactive) {
668 req_off_ctx_list(req, ctx);
673 rc = sptlrpc_req_replace_dead_ctx(req);
675 LASSERT(ctx == req->rq_cli_ctx);
676 CERROR("req %p: failed to replace dead ctx %p: %d\n",
679 LASSERT(list_empty(&req->rq_ctx_chain));
683 CWARN("req %p: replace dead ctx %p => ctx %p (%u->%s)\n",
684 req, ctx, req->rq_cli_ctx,
685 req->rq_cli_ctx->cc_vcred.vc_uid,
686 sec2target_str(req->rq_cli_ctx->cc_sec));
688 ctx = req->rq_cli_ctx;
689 LASSERT(list_empty(&req->rq_ctx_chain));
694 /* Now we're sure this context is during upcall, add myself into
697 spin_lock(&ctx->cc_lock);
698 if (list_empty(&req->rq_ctx_chain))
699 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
700 spin_unlock(&ctx->cc_lock);
703 RETURN(-EWOULDBLOCK);
705 /* Clear any flags that may be present from previous sends */
706 LASSERT(req->rq_receiving_reply == 0);
707 spin_lock(&req->rq_lock);
709 req->rq_timedout = 0;
712 spin_unlock(&req->rq_lock);
714 lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
715 ctx_refresh_interrupt, req);
716 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
718 /* following cases we could be here:
719 * - successfully refreshed;
721 * - timedout, and we don't want recover from the failure;
722 * - timedout, and waked up upon recovery finished;
723 * - someone else mark this ctx dead by force;
724 * - someone invalidate the req and call ptlrpc_client_wake_req(),
725 * e.g. ptlrpc_abort_inflight();
727 if (!cli_ctx_is_refreshed(ctx)) {
728 /* timed out or interruptted */
729 req_off_ctx_list(req, ctx);
739 * Note this could be called in two situations:
740 * - new request from ptlrpc_pre_req(), with proper @opcode
741 * - old request which changed ctx in the middle, with @opcode == 0
743 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
745 struct ptlrpc_sec *sec;
747 LASSERT(req->rq_import);
748 LASSERT(req->rq_cli_ctx);
749 LASSERT(req->rq_cli_ctx->cc_sec);
750 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
752 /* special security flags accoding to opcode */
755 req->rq_bulk_read = 1;
758 req->rq_bulk_write = 1;
761 req->rq_ctx_init = 1;
764 req->rq_ctx_fini = 1;
767 /* init/fini rpc won't be resend, so can't be here */
768 LASSERT(req->rq_ctx_init == 0);
769 LASSERT(req->rq_ctx_fini == 0);
771 /* cleanup flags, which should be recalculated */
772 req->rq_pack_udesc = 0;
773 req->rq_pack_bulk = 0;
777 sec = req->rq_cli_ctx->cc_sec;
779 spin_lock(&sec->ps_lock);
780 req->rq_flvr = sec->ps_flvr;
781 spin_unlock(&sec->ps_lock);
783 /* force SVC_NULL for context initiation rpc, SVC_INTG for context
785 if (unlikely(req->rq_ctx_init))
786 rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
787 else if (unlikely(req->rq_ctx_fini))
788 rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
790 /* user descriptor flag, null security can't do it anyway */
791 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
792 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
793 req->rq_pack_udesc = 1;
795 /* bulk security flag */
796 if ((req->rq_bulk_read || req->rq_bulk_write) &&
797 (req->rq_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
798 req->rq_flvr.sf_bulk_hash != BULK_HASH_ALG_NULL))
799 req->rq_pack_bulk = 1;
802 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
804 if (RPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
807 LASSERT(req->rq_clrbuf);
808 if (req->rq_pool || !req->rq_reqbuf)
811 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
812 req->rq_reqbuf = NULL;
813 req->rq_reqbuf_len = 0;
817 * check whether current user have valid context for an import or not.
818 * might repeatedly try in case of non-fatal errors.
819 * return 0 on success, < 0 on failure
821 int sptlrpc_import_check_ctx(struct obd_import *imp)
823 struct ptlrpc_sec *sec;
824 struct ptlrpc_cli_ctx *ctx;
825 struct ptlrpc_request *req = NULL;
831 sec = sptlrpc_import_sec_ref(imp);
832 ctx = get_my_ctx(sec);
833 sptlrpc_sec_put(sec);
838 if (cli_ctx_is_eternal(ctx) ||
839 ctx->cc_ops->validate(ctx) == 0) {
840 sptlrpc_cli_ctx_put(ctx, 1);
844 if (cli_ctx_is_error(ctx)) {
845 sptlrpc_cli_ctx_put(ctx, 1);
853 spin_lock_init(&req->rq_lock);
854 atomic_set(&req->rq_refcount, 10000);
855 CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
856 cfs_waitq_init(&req->rq_reply_waitq);
857 req->rq_import = imp;
858 req->rq_flvr = sec->ps_flvr;
859 req->rq_cli_ctx = ctx;
861 rc = sptlrpc_req_refresh_ctx(req, 0);
862 LASSERT(list_empty(&req->rq_ctx_chain));
863 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
869 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
871 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
876 LASSERT(ctx->cc_sec);
877 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
879 /* we wrap bulk request here because now we can be sure
880 * the context is uptodate.
883 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
888 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
889 case SPTLRPC_SVC_NULL:
890 case SPTLRPC_SVC_AUTH:
891 case SPTLRPC_SVC_INTG:
892 LASSERT(ctx->cc_ops->sign);
893 rc = ctx->cc_ops->sign(ctx, req);
895 case SPTLRPC_SVC_PRIV:
896 LASSERT(ctx->cc_ops->seal);
897 rc = ctx->cc_ops->seal(ctx, req);
904 LASSERT(req->rq_reqdata_len);
905 LASSERT(req->rq_reqdata_len % 8 == 0);
906 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
912 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
914 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
920 LASSERT(ctx->cc_sec);
921 LASSERT(req->rq_repbuf);
922 LASSERT(req->rq_repdata);
923 LASSERT(req->rq_repmsg == NULL);
925 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
926 CERROR("replied data length %d too small\n",
927 req->rq_repdata_len);
931 /* v2 message, check request/reply policy match */
932 rpc_flvr = WIRE_FLVR_RPC(req->rq_repdata->lm_secflvr);
934 if (req->rq_repdata->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
935 __swab16s(&rpc_flvr);
937 if (RPC_FLVR_POLICY(rpc_flvr) !=
938 RPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
939 CERROR("request policy was %u while reply with %u\n",
940 RPC_FLVR_POLICY(req->rq_flvr.sf_rpc),
941 RPC_FLVR_POLICY(rpc_flvr));
945 /* do nothing if it's null policy; otherwise unpack the
947 if (RPC_FLVR_POLICY(rpc_flvr) != SPTLRPC_POLICY_NULL &&
948 lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len))
951 switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
952 case SPTLRPC_SVC_NULL:
953 case SPTLRPC_SVC_AUTH:
954 case SPTLRPC_SVC_INTG:
955 LASSERT(ctx->cc_ops->verify);
956 rc = ctx->cc_ops->verify(ctx, req);
958 case SPTLRPC_SVC_PRIV:
959 LASSERT(ctx->cc_ops->unseal);
960 rc = ctx->cc_ops->unseal(ctx, req);
966 LASSERT(rc || req->rq_repmsg || req->rq_resend);
971 * upon this be called, the reply buffer should have been un-posted,
972 * so nothing is going to change.
974 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
976 LASSERT(req->rq_repbuf);
977 LASSERT(req->rq_repdata == NULL);
978 LASSERT(req->rq_repmsg == NULL);
979 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
981 if (req->rq_reply_off == 0 &&
982 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
983 CERROR("real reply with offset 0\n");
987 if (req->rq_reply_off % 8 != 0) {
988 CERROR("reply at odd offset %u\n", req->rq_reply_off);
992 req->rq_repdata = (struct lustre_msg *)
993 (req->rq_repbuf + req->rq_reply_off);
994 req->rq_repdata_len = req->rq_nob_received;
996 return do_cli_unwrap_reply(req);
1000 * Upon called, the receive buffer might be still posted, so the reply data
1001 * might be changed at any time, no matter we're holding rq_lock or not. we
1002 * expect the rq_reply_off be 0, rq_nob_received is the early reply size.
1004 * we allocate separate ptlrpc_request and reply buffer for early reply
1005 * processing, return 0 and @req_ret is a duplicated ptlrpc_request. caller
1006 * must call sptlrpc_cli_finish_early_reply() on the returned request to
1007 * release it. if anything goes wrong @req_ret will not be set.
1009 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1010 struct ptlrpc_request **req_ret)
1012 struct ptlrpc_request *early_req;
1014 int early_bufsz, early_size;
1018 OBD_ALLOC_PTR(early_req);
1019 if (early_req == NULL)
1022 early_size = req->rq_nob_received;
1023 early_bufsz = size_roundup_power2(early_size);
1024 OBD_ALLOC(early_buf, early_bufsz);
1025 if (early_buf == NULL)
1026 GOTO(err_req, rc = -ENOMEM);
1028 /* sanity checkings and copy data out, do it inside spinlock */
1029 spin_lock(&req->rq_lock);
1031 if (req->rq_replied) {
1032 spin_unlock(&req->rq_lock);
1033 GOTO(err_buf, rc = -EALREADY);
1036 LASSERT(req->rq_repbuf);
1037 LASSERT(req->rq_repdata == NULL);
1038 LASSERT(req->rq_repmsg == NULL);
1040 if (req->rq_reply_off != 0) {
1041 CERROR("early reply with offset %u\n", req->rq_reply_off);
1042 spin_unlock(&req->rq_lock);
1043 GOTO(err_buf, rc = -EPROTO);
1046 if (req->rq_nob_received != early_size) {
1047 /* even another early arrived the size should be the same */
1048 CERROR("data size has changed from %u to %u\n",
1049 early_size, req->rq_nob_received);
1050 spin_unlock(&req->rq_lock);
1051 GOTO(err_buf, rc = -EINVAL);
1054 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1055 CERROR("early reply length %d too small\n",
1056 req->rq_nob_received);
1057 spin_unlock(&req->rq_lock);
1058 GOTO(err_buf, rc = -EALREADY);
1061 memcpy(early_buf, req->rq_repbuf, early_size);
1062 spin_unlock(&req->rq_lock);
1064 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1065 early_req->rq_flvr = req->rq_flvr;
1066 early_req->rq_repbuf = early_buf;
1067 early_req->rq_repbuf_len = early_bufsz;
1068 early_req->rq_repdata = (struct lustre_msg *) early_buf;
1069 early_req->rq_repdata_len = early_size;
1070 early_req->rq_early = 1;
1072 rc = do_cli_unwrap_reply(early_req);
1074 DEBUG_REQ(D_ADAPTTO, early_req,
1075 "error %d unwrap early reply", rc);
1079 LASSERT(early_req->rq_repmsg);
1080 *req_ret = early_req;
1084 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1086 OBD_FREE(early_buf, early_bufsz);
1088 OBD_FREE_PTR(early_req);
1092 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1094 LASSERT(early_req->rq_repbuf);
1095 LASSERT(early_req->rq_repdata);
1096 LASSERT(early_req->rq_repmsg);
1098 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1099 OBD_FREE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1100 OBD_FREE_PTR(early_req);
1103 /**************************************************
1105 **************************************************/
1108 * "fixed" sec (e.g. null) use sec_id < 0
1110 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1112 int sptlrpc_get_next_secid(void)
1114 return atomic_inc_return(&sptlrpc_sec_id);
1116 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1118 /**************************************************
1119 * client side high-level security APIs *
1120 **************************************************/
1122 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1123 int grace, int force)
1125 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1127 LASSERT(policy->sp_cops);
1128 LASSERT(policy->sp_cops->flush_ctx_cache);
1130 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1133 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1135 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1137 LASSERT(atomic_read(&sec->ps_refcount) == 0);
1138 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1139 LASSERT(policy->sp_cops->destroy_sec);
1141 CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1143 policy->sp_cops->destroy_sec(sec);
1144 sptlrpc_policy_put(policy);
1147 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1149 sec_cop_destroy_sec(sec);
1151 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1153 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1155 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1157 if (sec->ps_policy->sp_cops->kill_sec) {
1158 sec->ps_policy->sp_cops->kill_sec(sec);
1160 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1164 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1167 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1168 atomic_inc(&sec->ps_refcount);
1173 EXPORT_SYMBOL(sptlrpc_sec_get);
1175 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1178 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1180 if (atomic_dec_and_test(&sec->ps_refcount)) {
1181 LASSERT(atomic_read(&sec->ps_nctx) == 0);
1183 sptlrpc_gc_del_sec(sec);
1184 sec_cop_destroy_sec(sec);
1188 EXPORT_SYMBOL(sptlrpc_sec_put);
1191 * it's policy module responsible for taking refrence of import
1194 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1195 struct ptlrpc_svc_ctx *svc_ctx,
1196 struct sptlrpc_flavor *sf,
1197 enum lustre_sec_part sp)
1199 struct ptlrpc_sec_policy *policy;
1200 struct ptlrpc_sec *sec;
1204 LASSERT(imp->imp_dlm_fake == 1);
1206 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1207 imp->imp_obd->obd_type->typ_name,
1208 imp->imp_obd->obd_name,
1209 sptlrpc_rpcflavor2name(sf->sf_rpc));
1211 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1212 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1214 LASSERT(imp->imp_dlm_fake == 0);
1216 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1217 imp->imp_obd->obd_type->typ_name,
1218 imp->imp_obd->obd_name,
1219 sptlrpc_rpcflavor2name(sf->sf_rpc));
1221 policy = sptlrpc_rpcflavor2policy(sf->sf_rpc);
1223 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1228 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1230 atomic_inc(&sec->ps_refcount);
1234 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1235 sptlrpc_gc_add_sec(sec);
1237 sptlrpc_policy_put(policy);
1243 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1245 struct ptlrpc_sec *sec;
1247 spin_lock(&imp->imp_lock);
1248 sec = sptlrpc_sec_get(imp->imp_sec);
1249 spin_unlock(&imp->imp_lock);
1253 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1255 static void sptlrpc_import_sec_install(struct obd_import *imp,
1256 struct ptlrpc_sec *sec)
1258 struct ptlrpc_sec *old_sec;
1260 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1262 spin_lock(&imp->imp_lock);
1263 old_sec = imp->imp_sec;
1265 spin_unlock(&imp->imp_lock);
1268 sptlrpc_sec_kill(old_sec);
1270 /* balance the ref taken by this import */
1271 sptlrpc_sec_put(old_sec);
1275 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1276 struct ptlrpc_sec *sec,
1277 struct sptlrpc_flavor *sf)
1279 if (sf->sf_bulk_ciph != sec->ps_flvr.sf_bulk_ciph ||
1280 sf->sf_bulk_hash != sec->ps_flvr.sf_bulk_hash) {
1281 CWARN("imp %p (%s->%s): changing bulk flavor %s/%s -> %s/%s\n",
1282 imp, imp->imp_obd->obd_name,
1283 obd_uuid2str(&imp->imp_connection->c_remote_uuid),
1284 sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
1285 sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
1286 sptlrpc_get_ciph_name(sf->sf_bulk_ciph),
1287 sptlrpc_get_hash_name(sf->sf_bulk_hash));
1289 spin_lock(&sec->ps_lock);
1290 sec->ps_flvr.sf_bulk_ciph = sf->sf_bulk_ciph;
1291 sec->ps_flvr.sf_bulk_hash = sf->sf_bulk_hash;
1292 spin_unlock(&sec->ps_lock);
1295 if (!equi(sf->sf_flags & PTLRPC_SEC_FL_UDESC,
1296 sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC)) {
1297 CWARN("imp %p (%s->%s): %s shipping user descriptor\n",
1298 imp, imp->imp_obd->obd_name,
1299 obd_uuid2str(&imp->imp_connection->c_remote_uuid),
1300 (sf->sf_flags & PTLRPC_SEC_FL_UDESC) ? "start" : "stop");
1302 spin_lock(&sec->ps_lock);
1303 sec->ps_flvr.sf_flags &= ~PTLRPC_SEC_FL_UDESC;
1304 sec->ps_flvr.sf_flags |= sf->sf_flags & PTLRPC_SEC_FL_UDESC;
1305 spin_unlock(&sec->ps_lock);
1310 * for normal import, @svc_ctx should be NULL and @rpc_flavor is ignored;
1311 * for reverse import, @svc_ctx and @rpc_flavor is from incoming request.
1313 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1314 struct ptlrpc_svc_ctx *svc_ctx,
1317 struct ptlrpc_connection *conn;
1318 struct sptlrpc_flavor sf;
1319 struct ptlrpc_sec *sec, *newsec;
1320 enum lustre_sec_part sp;
1328 conn = imp->imp_connection;
1330 if (svc_ctx == NULL) {
1331 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1332 /* normal import, determine flavor from rule set */
1333 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me, cliobd->cl_sp_to,
1334 &cliobd->cl_target_uuid,
1337 sp = imp->imp_obd->u.cli.cl_sp_me;
1339 /* reverse import, determine flavor from incoming reqeust */
1340 sf.sf_rpc = rpc_flavor;
1341 sf.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
1342 sf.sf_bulk_hash = BULK_HASH_ALG_NULL;
1343 sf.sf_flags = PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1345 sp = sptlrpc_target_sec_part(imp->imp_obd);
1348 sec = sptlrpc_import_sec_ref(imp);
1350 if (svc_ctx == NULL) {
1351 /* normal import, only check rpc flavor, if just bulk
1352 * flavor or flags changed, we can handle it on the fly
1353 * without switching sec. */
1354 if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
1355 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1361 /* reverse import, do not compare bulk flavor */
1362 if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
1368 CWARN("%simport %p (%s%s%s): changing flavor "
1369 "(%s, %s/%s) -> (%s, %s/%s)\n",
1370 svc_ctx ? "reverse " : "",
1371 imp, imp->imp_obd->obd_name,
1372 svc_ctx == NULL ? "->" : "<-",
1373 obd_uuid2str(&conn->c_remote_uuid),
1374 sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc),
1375 sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
1376 sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
1377 sptlrpc_rpcflavor2name(sf.sf_rpc),
1378 sptlrpc_get_hash_name(sf.sf_bulk_hash),
1379 sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
1381 CWARN("%simport %p (%s%s%s) netid %x: "
1382 "select initial flavor (%s, %s/%s)\n",
1383 svc_ctx == NULL ? "" : "reverse ",
1384 imp, imp->imp_obd->obd_name,
1385 svc_ctx == NULL ? "->" : "<-",
1386 obd_uuid2str(&conn->c_remote_uuid),
1387 LNET_NIDNET(conn->c_self),
1388 sptlrpc_rpcflavor2name(sf.sf_rpc),
1389 sptlrpc_get_hash_name(sf.sf_bulk_hash),
1390 sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
1393 mutex_down(&imp->imp_sec_mutex);
1395 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1397 sptlrpc_import_sec_install(imp, newsec);
1400 CERROR("%simport %p (%s): failed to create new sec\n",
1401 svc_ctx == NULL ? "" : "reverse ",
1402 imp, obd_uuid2str(&conn->c_remote_uuid));
1406 mutex_up(&imp->imp_sec_mutex);
1409 sptlrpc_sec_put(sec);
1413 void sptlrpc_import_sec_put(struct obd_import *imp)
1416 sptlrpc_sec_kill(imp->imp_sec);
1418 sptlrpc_sec_put(imp->imp_sec);
1419 imp->imp_sec = NULL;
1423 static void import_flush_ctx_common(struct obd_import *imp,
1424 uid_t uid, int grace, int force)
1426 struct ptlrpc_sec *sec;
1431 sec = sptlrpc_import_sec_ref(imp);
1435 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1436 sptlrpc_sec_put(sec);
1439 void sptlrpc_import_inval_all_ctx(struct obd_import *imp)
1441 /* use grace == 0 */
1442 import_flush_ctx_common(imp, -1, 0, 1);
1445 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1447 /* it's important to use grace mode, see explain in
1448 * sptlrpc_req_refresh_ctx() */
1449 import_flush_ctx_common(imp, 0, 1, 1);
1452 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1454 import_flush_ctx_common(imp, cfs_current()->uid, 1, 1);
1456 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1458 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1460 import_flush_ctx_common(imp, -1, 1, 1);
1462 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1465 * when complete successfully, req->rq_reqmsg should point to the
1468 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1470 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1471 struct ptlrpc_sec_policy *policy;
1475 LASSERT(atomic_read(&ctx->cc_refcount));
1476 LASSERT(ctx->cc_sec);
1477 LASSERT(ctx->cc_sec->ps_policy);
1478 LASSERT(req->rq_reqmsg == NULL);
1480 policy = ctx->cc_sec->ps_policy;
1481 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1483 LASSERT(req->rq_reqmsg);
1484 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1486 /* zeroing preallocated buffer */
1488 memset(req->rq_reqmsg, 0, msgsize);
1494 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1496 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1497 struct ptlrpc_sec_policy *policy;
1500 LASSERT(atomic_read(&ctx->cc_refcount));
1501 LASSERT(ctx->cc_sec);
1502 LASSERT(ctx->cc_sec->ps_policy);
1504 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1507 policy = ctx->cc_sec->ps_policy;
1508 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1512 * NOTE caller must guarantee the buffer size is enough for the enlargement
1514 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1515 int segment, int newsize)
1518 int oldsize, oldmsg_size, movesize;
1520 LASSERT(segment < msg->lm_bufcount);
1521 LASSERT(msg->lm_buflens[segment] <= newsize);
1523 if (msg->lm_buflens[segment] == newsize)
1526 /* nothing to do if we are enlarging the last segment */
1527 if (segment == msg->lm_bufcount - 1) {
1528 msg->lm_buflens[segment] = newsize;
1532 oldsize = msg->lm_buflens[segment];
1534 src = lustre_msg_buf(msg, segment + 1, 0);
1535 msg->lm_buflens[segment] = newsize;
1536 dst = lustre_msg_buf(msg, segment + 1, 0);
1537 msg->lm_buflens[segment] = oldsize;
1539 /* move from segment + 1 to end segment */
1540 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1541 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1542 movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1543 LASSERT(movesize >= 0);
1546 memmove(dst, src, movesize);
1548 /* note we don't clear the ares where old data live, not secret */
1550 /* finally set new segment size */
1551 msg->lm_buflens[segment] = newsize;
1553 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1556 * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
1557 * will be preserved after enlargement. this must be called after rq_reqmsg has
1558 * been intialized at least.
1560 * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
1563 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1564 int segment, int newsize)
1566 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1567 struct ptlrpc_sec_cops *cops;
1568 struct lustre_msg *msg = req->rq_reqmsg;
1572 LASSERT(msg->lm_bufcount > segment);
1573 LASSERT(msg->lm_buflens[segment] <= newsize);
1575 if (msg->lm_buflens[segment] == newsize)
1578 cops = ctx->cc_sec->ps_policy->sp_cops;
1579 LASSERT(cops->enlarge_reqbuf);
1580 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1582 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1584 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1586 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1587 struct ptlrpc_sec_policy *policy;
1591 LASSERT(atomic_read(&ctx->cc_refcount));
1592 LASSERT(ctx->cc_sec);
1593 LASSERT(ctx->cc_sec->ps_policy);
1598 policy = ctx->cc_sec->ps_policy;
1599 RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1602 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1604 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1605 struct ptlrpc_sec_policy *policy;
1609 LASSERT(atomic_read(&ctx->cc_refcount));
1610 LASSERT(ctx->cc_sec);
1611 LASSERT(ctx->cc_sec->ps_policy);
1613 if (req->rq_repbuf == NULL)
1615 LASSERT(req->rq_repbuf_len);
1617 policy = ctx->cc_sec->ps_policy;
1618 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1622 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1623 struct ptlrpc_cli_ctx *ctx)
1625 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1627 if (!policy->sp_cops->install_rctx)
1629 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1632 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1633 struct ptlrpc_svc_ctx *ctx)
1635 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1637 if (!policy->sp_sops->install_rctx)
1639 return policy->sp_sops->install_rctx(imp, ctx);
1642 /****************************************
1643 * server side security *
1644 ****************************************/
1646 static int flavor_allowed(struct sptlrpc_flavor *exp,
1647 struct ptlrpc_request *req)
1649 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1651 if (exp->sf_rpc == flvr->sf_rpc)
1654 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1655 RPC_FLVR_POLICY(exp->sf_rpc) == RPC_FLVR_POLICY(flvr->sf_rpc) &&
1656 RPC_FLVR_MECH(exp->sf_rpc) == RPC_FLVR_MECH(flvr->sf_rpc))
1662 #define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1664 int sptlrpc_target_export_check(struct obd_export *exp,
1665 struct ptlrpc_request *req)
1667 struct sptlrpc_flavor flavor;
1672 /* client side export has no imp_reverse, skip
1673 * FIXME maybe we should check flavor this as well??? */
1674 if (exp->exp_imp_reverse == NULL)
1677 /* don't care about ctx fini rpc */
1678 if (req->rq_ctx_fini)
1681 spin_lock(&exp->exp_lock);
1683 /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1684 * the first req with the new flavor, then treat it as current flavor,
1685 * adapt reverse sec according to it.
1686 * note the first rpc with new flavor might not be with root ctx, in
1687 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1688 if (unlikely(exp->exp_flvr_changed) &&
1689 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1690 /* make the new flavor as "current", and old ones as
1691 * about-to-expire */
1692 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1693 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1694 flavor = exp->exp_flvr_old[1];
1695 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1696 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1697 exp->exp_flvr_old[0] = exp->exp_flvr;
1698 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1699 EXP_FLVR_UPDATE_EXPIRE;
1700 exp->exp_flvr = flavor;
1702 /* flavor change finished */
1703 exp->exp_flvr_changed = 0;
1704 LASSERT(exp->exp_flvr_adapt == 1);
1706 /* if it's gss, we only interested in root ctx init */
1707 if (req->rq_auth_gss &&
1708 !(req->rq_ctx_init && (req->rq_auth_usr_root ||
1709 req->rq_auth_usr_mdt))) {
1710 spin_unlock(&exp->exp_lock);
1711 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
1712 req->rq_auth_gss, req->rq_ctx_init,
1713 req->rq_auth_usr_root, req->rq_auth_usr_mdt);
1717 exp->exp_flvr_adapt = 0;
1718 spin_unlock(&exp->exp_lock);
1720 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1721 req->rq_svc_ctx, flavor.sf_rpc);
1724 /* if it equals to the current flavor, we accept it, but need to
1725 * dealing with reverse sec/ctx */
1726 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1727 /* most cases should return here, we only interested in
1728 * gss root ctx init */
1729 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1730 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
1731 spin_unlock(&exp->exp_lock);
1735 /* if flavor just changed, we should not proceed, just leave
1736 * it and current flavor will be discovered and replaced
1737 * shortly, and let _this_ rpc pass through */
1738 if (exp->exp_flvr_changed) {
1739 LASSERT(exp->exp_flvr_adapt);
1740 spin_unlock(&exp->exp_lock);
1744 if (exp->exp_flvr_adapt) {
1745 exp->exp_flvr_adapt = 0;
1746 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1747 exp, exp->exp_flvr.sf_rpc,
1748 exp->exp_flvr_old[0].sf_rpc,
1749 exp->exp_flvr_old[1].sf_rpc);
1750 flavor = exp->exp_flvr;
1751 spin_unlock(&exp->exp_lock);
1753 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1757 CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1758 "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1759 exp->exp_flvr_old[0].sf_rpc,
1760 exp->exp_flvr_old[1].sf_rpc);
1761 spin_unlock(&exp->exp_lock);
1763 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1768 if (exp->exp_flvr_expire[0]) {
1769 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1770 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1771 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1772 "middle one ("CFS_DURATION_T")\n", exp,
1773 exp->exp_flvr.sf_rpc,
1774 exp->exp_flvr_old[0].sf_rpc,
1775 exp->exp_flvr_old[1].sf_rpc,
1776 exp->exp_flvr_expire[0] -
1777 cfs_time_current_sec());
1778 spin_unlock(&exp->exp_lock);
1782 CDEBUG(D_SEC, "mark middle expired\n");
1783 exp->exp_flvr_expire[0] = 0;
1785 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1786 exp->exp_flvr.sf_rpc,
1787 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1788 req->rq_flvr.sf_rpc);
1791 /* now it doesn't match the current flavor, the only chance we can
1792 * accept it is match the old flavors which is not expired. */
1793 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1794 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1795 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1796 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1797 "oldest one ("CFS_DURATION_T")\n", exp,
1798 exp->exp_flvr.sf_rpc,
1799 exp->exp_flvr_old[0].sf_rpc,
1800 exp->exp_flvr_old[1].sf_rpc,
1801 exp->exp_flvr_expire[1] -
1802 cfs_time_current_sec());
1803 spin_unlock(&exp->exp_lock);
1807 CDEBUG(D_SEC, "mark oldest expired\n");
1808 exp->exp_flvr_expire[1] = 0;
1810 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1811 exp, exp->exp_flvr.sf_rpc,
1812 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1813 req->rq_flvr.sf_rpc);
1815 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1816 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1817 exp->exp_flvr_old[1].sf_rpc);
1820 spin_unlock(&exp->exp_lock);
1822 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
1823 "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1824 exp, exp->exp_obd->obd_name,
1825 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1826 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_flvr.sf_rpc,
1827 exp->exp_flvr.sf_rpc,
1828 exp->exp_flvr_old[0].sf_rpc,
1829 exp->exp_flvr_expire[0] ?
1830 (unsigned long) (exp->exp_flvr_expire[0] -
1831 cfs_time_current_sec()) : 0,
1832 exp->exp_flvr_old[1].sf_rpc,
1833 exp->exp_flvr_expire[1] ?
1834 (unsigned long) (exp->exp_flvr_expire[1] -
1835 cfs_time_current_sec()) : 0);
1838 EXPORT_SYMBOL(sptlrpc_target_export_check);
1840 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1841 struct sptlrpc_rule_set *rset)
1843 struct obd_export *exp;
1844 struct sptlrpc_flavor new_flvr;
1848 spin_lock(&obd->obd_dev_lock);
1850 list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1851 if (exp->exp_connection == NULL)
1854 /* note if this export had just been updated flavor
1855 * (exp_flvr_changed == 1), this will override the
1857 spin_lock(&exp->exp_lock);
1858 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1859 exp->exp_connection->c_peer.nid,
1861 if (exp->exp_flvr_changed ||
1862 memcmp(&new_flvr, &exp->exp_flvr, sizeof(new_flvr))) {
1863 exp->exp_flvr_old[1] = new_flvr;
1864 exp->exp_flvr_expire[1] = 0;
1865 exp->exp_flvr_changed = 1;
1866 exp->exp_flvr_adapt = 1;
1868 CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1869 exp, sptlrpc_part2name(exp->exp_sp_peer),
1870 exp->exp_flvr.sf_rpc,
1871 exp->exp_flvr_old[1].sf_rpc);
1873 spin_unlock(&exp->exp_lock);
1876 spin_unlock(&obd->obd_dev_lock);
1878 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1880 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1882 if (svc_rc == SECSVC_DROP)
1885 switch (req->rq_sp_from) {
1894 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1898 if (!req->rq_auth_gss)
1901 if (unlikely(req->rq_sp_from == LUSTRE_SP_ANY)) {
1902 CERROR("not specific part\n");
1906 /* from MDT, must be authenticated as MDT */
1907 if (unlikely(req->rq_sp_from == LUSTRE_SP_MDT &&
1908 !req->rq_auth_usr_mdt)) {
1909 DEBUG_REQ(D_ERROR, req, "fake source MDT");
1913 /* from OST, must be callback to MDT and CLI, the reverse sec
1914 * was from mdt/root keytab, so it should be MDT or root FIXME */
1915 if (unlikely(req->rq_sp_from == LUSTRE_SP_OST &&
1916 !req->rq_auth_usr_mdt && !req->rq_auth_usr_root)) {
1917 DEBUG_REQ(D_ERROR, req, "fake source OST");
1924 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1926 struct ptlrpc_sec_policy *policy;
1927 struct lustre_msg *msg = req->rq_reqbuf;
1932 LASSERT(req->rq_reqmsg == NULL);
1933 LASSERT(req->rq_repmsg == NULL);
1935 req->rq_sp_from = LUSTRE_SP_ANY;
1936 req->rq_auth_uid = INVALID_UID;
1937 req->rq_auth_mapped_uid = INVALID_UID;
1939 if (req->rq_reqdata_len < sizeof(struct lustre_msg)) {
1940 CERROR("request size %d too small\n", req->rq_reqdata_len);
1941 RETURN(SECSVC_DROP);
1947 if (msg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1948 req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(msg->lm_secflvr);
1950 req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(__swab32(msg->lm_secflvr));
1952 /* unpack the wrapper message if the policy is not null */
1953 if ((RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) &&
1954 lustre_unpack_msg(msg, req->rq_reqdata_len))
1955 RETURN(SECSVC_DROP);
1957 policy = sptlrpc_rpcflavor2policy(req->rq_flvr.sf_rpc);
1959 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
1960 RETURN(SECSVC_DROP);
1963 LASSERT(policy->sp_sops->accept);
1964 rc = policy->sp_sops->accept(req);
1966 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1967 sptlrpc_policy_put(policy);
1969 /* sanity check for the request source */
1970 rc = sptlrpc_svc_check_from(req, rc);
1972 /* FIXME move to proper place */
1973 if (rc == SECSVC_OK) {
1974 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1976 if (opc == OST_WRITE)
1977 req->rq_bulk_write = 1;
1978 else if (opc == OST_READ)
1979 req->rq_bulk_read = 1;
1982 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1986 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
1989 struct ptlrpc_sec_policy *policy;
1990 struct ptlrpc_reply_state *rs;
1994 LASSERT(req->rq_svc_ctx);
1995 LASSERT(req->rq_svc_ctx->sc_policy);
1997 policy = req->rq_svc_ctx->sc_policy;
1998 LASSERT(policy->sp_sops->alloc_rs);
2000 rc = policy->sp_sops->alloc_rs(req, msglen);
2001 if (unlikely(rc == -ENOMEM)) {
2002 /* failed alloc, try emergency pool */
2003 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
2007 req->rq_reply_state = rs;
2008 rc = policy->sp_sops->alloc_rs(req, msglen);
2010 lustre_put_emerg_rs(rs);
2011 req->rq_reply_state = NULL;
2016 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2021 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2023 struct ptlrpc_sec_policy *policy;
2027 LASSERT(req->rq_svc_ctx);
2028 LASSERT(req->rq_svc_ctx->sc_policy);
2030 policy = req->rq_svc_ctx->sc_policy;
2031 LASSERT(policy->sp_sops->authorize);
2033 rc = policy->sp_sops->authorize(req);
2034 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2039 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2041 struct ptlrpc_sec_policy *policy;
2042 unsigned int prealloc;
2045 LASSERT(rs->rs_svc_ctx);
2046 LASSERT(rs->rs_svc_ctx->sc_policy);
2048 policy = rs->rs_svc_ctx->sc_policy;
2049 LASSERT(policy->sp_sops->free_rs);
2051 prealloc = rs->rs_prealloc;
2052 policy->sp_sops->free_rs(rs);
2055 lustre_put_emerg_rs(rs);
2059 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2061 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2066 LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2067 atomic_inc(&ctx->sc_refcount);
2070 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2072 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2077 LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2078 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2079 if (ctx->sc_policy->sp_sops->free_ctx)
2080 ctx->sc_policy->sp_sops->free_ctx(ctx);
2082 req->rq_svc_ctx = NULL;
2085 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2087 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2092 LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2093 if (ctx->sc_policy->sp_sops->invalidate_ctx)
2094 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2096 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2098 /****************************************
2100 ****************************************/
2102 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2103 struct ptlrpc_bulk_desc *desc)
2105 struct ptlrpc_cli_ctx *ctx;
2107 if (!req->rq_pack_bulk)
2110 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2112 ctx = req->rq_cli_ctx;
2113 if (ctx->cc_ops->wrap_bulk)
2114 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2117 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2120 void pga_to_bulk_desc(int nob, obd_count pg_count, struct brw_page **pga,
2121 struct ptlrpc_bulk_desc *desc)
2128 for (i = 0; i < pg_count && nob > 0; i++) {
2130 desc->bd_iov[i].kiov_page = pga[i]->pg;
2131 desc->bd_iov[i].kiov_len = pga[i]->count > nob ?
2132 nob : pga[i]->count;
2133 desc->bd_iov[i].kiov_offset = pga[i]->off & ~CFS_PAGE_MASK;
2135 /* FIXME currently liblustre doesn't support bulk encryption.
2136 * if we do, check again following may not be right. */
2137 LASSERTF(0, "Bulk encryption not implemented for liblustre\n");
2138 desc->bd_iov[i].iov_base = pga[i]->pg->addr;
2139 desc->bd_iov[i].iov_len = pga[i]->count > nob ?
2140 nob : pga[i]->count;
2143 desc->bd_iov_count++;
2144 nob -= pga[i]->count;
2148 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2149 int nob, obd_count pg_count,
2150 struct brw_page **pga)
2152 struct ptlrpc_bulk_desc *desc;
2153 struct ptlrpc_cli_ctx *ctx;
2156 if (!req->rq_pack_bulk)
2159 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2161 OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
2163 CERROR("out of memory, can't verify bulk read data\n");
2167 pga_to_bulk_desc(nob, pg_count, pga, desc);
2169 ctx = req->rq_cli_ctx;
2170 if (ctx->cc_ops->unwrap_bulk)
2171 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2173 OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
2177 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2179 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2180 struct ptlrpc_bulk_desc *desc)
2182 struct ptlrpc_cli_ctx *ctx;
2184 if (!req->rq_pack_bulk)
2187 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2189 ctx = req->rq_cli_ctx;
2190 if (ctx->cc_ops->unwrap_bulk)
2191 return ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2195 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2197 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2198 struct ptlrpc_bulk_desc *desc)
2200 struct ptlrpc_svc_ctx *ctx;
2202 if (!req->rq_pack_bulk)
2205 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2207 ctx = req->rq_svc_ctx;
2208 if (ctx->sc_policy->sp_sops->wrap_bulk)
2209 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2213 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2215 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2216 struct ptlrpc_bulk_desc *desc)
2218 struct ptlrpc_svc_ctx *ctx;
2220 if (!req->rq_pack_bulk)
2223 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2225 ctx = req->rq_svc_ctx;
2226 if (ctx->sc_policy->sp_sops->unwrap_bulk);
2227 return ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2231 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2234 /****************************************
2235 * user descriptor helpers *
2236 ****************************************/
2238 int sptlrpc_current_user_desc_size(void)
2243 ngroups = current_ngroups;
2245 if (ngroups > LUSTRE_MAX_GROUPS)
2246 ngroups = LUSTRE_MAX_GROUPS;
2250 return sptlrpc_user_desc_size(ngroups);
2252 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2254 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2256 struct ptlrpc_user_desc *pud;
2258 pud = lustre_msg_buf(msg, offset, 0);
2260 pud->pud_uid = cfs_current()->uid;
2261 pud->pud_gid = cfs_current()->gid;
2262 pud->pud_fsuid = cfs_current()->fsuid;
2263 pud->pud_fsgid = cfs_current()->fsgid;
2264 pud->pud_cap = cfs_curproc_cap_pack();
2265 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2269 if (pud->pud_ngroups > current_ngroups)
2270 pud->pud_ngroups = current_ngroups;
2271 memcpy(pud->pud_groups, cfs_current()->group_info->blocks[0],
2272 pud->pud_ngroups * sizeof(__u32));
2273 task_unlock(current);
2278 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2280 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset)
2282 struct ptlrpc_user_desc *pud;
2285 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2289 if (lustre_msg_swabbed(msg)) {
2290 __swab32s(&pud->pud_uid);
2291 __swab32s(&pud->pud_gid);
2292 __swab32s(&pud->pud_fsuid);
2293 __swab32s(&pud->pud_fsgid);
2294 __swab32s(&pud->pud_cap);
2295 __swab32s(&pud->pud_ngroups);
2298 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2299 CERROR("%u groups is too large\n", pud->pud_ngroups);
2303 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2304 msg->lm_buflens[offset]) {
2305 CERROR("%u groups are claimed but bufsize only %u\n",
2306 pud->pud_ngroups, msg->lm_buflens[offset]);
2310 if (lustre_msg_swabbed(msg)) {
2311 for (i = 0; i < pud->pud_ngroups; i++)
2312 __swab32s(&pud->pud_groups[i]);
2317 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2319 /****************************************
2321 ****************************************/
2323 const char * sec2target_str(struct ptlrpc_sec *sec)
2325 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2327 if (sec_is_reverse(sec))
2329 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2331 EXPORT_SYMBOL(sec2target_str);
2333 /****************************************
2334 * crypto API helper/alloc blkciper *
2335 ****************************************/
2337 /****************************************
2338 * initialize/finalize *
2339 ****************************************/
2341 int __init sptlrpc_init(void)
2345 rwlock_init(&policy_lock);
2347 rc = sptlrpc_gc_init();
2351 rc = sptlrpc_conf_init();
2355 rc = sptlrpc_enc_pool_init();
2359 rc = sptlrpc_null_init();
2363 rc = sptlrpc_plain_init();
2367 rc = sptlrpc_lproc_init();
2374 sptlrpc_plain_fini();
2376 sptlrpc_null_fini();
2378 sptlrpc_enc_pool_fini();
2380 sptlrpc_conf_fini();
2387 void __exit sptlrpc_fini(void)
2389 sptlrpc_lproc_fini();
2390 sptlrpc_plain_fini();
2391 sptlrpc_null_fini();
2392 sptlrpc_enc_pool_fini();
2393 sptlrpc_conf_fini();