* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
* policy registers *
***********************************************/
-static rwlock_t policy_lock;
+static cfs_rwlock_t policy_lock;
static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
NULL,
};
if (number >= SPTLRPC_POLICY_MAX)
return -EINVAL;
- write_lock(&policy_lock);
+ cfs_write_lock(&policy_lock);
if (unlikely(policies[number])) {
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
return -EALREADY;
}
policies[number] = policy;
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
return 0;
LASSERT(number < SPTLRPC_POLICY_MAX);
- write_lock(&policy_lock);
+ cfs_write_lock(&policy_lock);
if (unlikely(policies[number] == NULL)) {
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CERROR("%s: already unregistered\n", policy->sp_name);
return -EINVAL;
}
LASSERT(policies[number] == policy);
policies[number] = NULL;
- write_unlock(&policy_lock);
+ cfs_write_unlock(&policy_lock);
CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
return 0;
static
struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
{
- static DECLARE_MUTEX(load_mutex);
- static atomic_t loaded = ATOMIC_INIT(0);
+ static CFS_DECLARE_MUTEX(load_mutex);
+ static cfs_atomic_t loaded = CFS_ATOMIC_INIT(0);
struct ptlrpc_sec_policy *policy;
__u16 number = SPTLRPC_FLVR_POLICY(flavor);
__u16 flag = 0;
return NULL;
while (1) {
- read_lock(&policy_lock);
+ cfs_read_lock(&policy_lock);
policy = policies[number];
- if (policy && !try_module_get(policy->sp_owner))
+ if (policy && !cfs_try_module_get(policy->sp_owner))
policy = NULL;
if (policy == NULL)
- flag = atomic_read(&loaded);
- read_unlock(&policy_lock);
+ flag = cfs_atomic_read(&loaded);
+ cfs_read_unlock(&policy_lock);
if (policy != NULL || flag != 0 ||
number != SPTLRPC_POLICY_GSS)
break;
/* try to load gss module, once */
- mutex_down(&load_mutex);
- if (atomic_read(&loaded) == 0) {
- if (request_module("ptlrpc_gss") == 0)
+ cfs_mutex_down(&load_mutex);
+ if (cfs_atomic_read(&loaded) == 0) {
+ if (cfs_request_module("ptlrpc_gss") == 0)
CWARN("module ptlrpc_gss loaded on demand\n");
else
CERROR("Unable to load module ptlrpc_gss\n");
- atomic_set(&loaded, 1);
+ cfs_atomic_set(&loaded, 1);
}
- mutex_up(&load_mutex);
+ cfs_mutex_up(&load_mutex);
}
return policy;
remove_dead = 0;
}
} else {
- vcred.vc_uid = cfs_current()->uid;
- vcred.vc_gid = cfs_current()->gid;
+ vcred.vc_uid = cfs_curproc_uid();
+ vcred.vc_gid = cfs_curproc_gid();
}
return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
return ctx;
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
struct ptlrpc_sec *sec = ctx->cc_sec;
LASSERT(sec);
- LASSERT(atomic_read(&ctx->cc_refcount));
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
- if (!atomic_dec_and_test(&ctx->cc_refcount))
+ if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
return;
sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
-/*
- * expire the context immediately.
- * the caller must hold at least 1 ref on the ctx.
+/**
+ * Expire the client context immediately.
+ *
+ * \pre Caller must hold at least 1 reference on the \a ctx.
*/
void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
{
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
+/**
+ * To wake up the threads who are waiting for this client context. Called
+ * after some status change happened on \a ctx.
+ */
void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
{
struct ptlrpc_request *req, *next;
- spin_lock(&ctx->cc_lock);
- list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
- list_del_init(&req->rq_ctx_chain);
+ cfs_spin_lock(&ctx->cc_lock);
+ cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
+ rq_ctx_chain) {
+ cfs_list_del_init(&req->rq_ctx_chain);
ptlrpc_client_wake_req(req);
}
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_unlock(&ctx->cc_lock);
}
EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
{
int adapt = 0;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
if (imp->imp_sec_expire &&
imp->imp_sec_expire < cfs_time_current_sec()) {
adapt = 1;
imp->imp_sec_expire = 0;
}
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (!adapt)
return 0;
return 0;
}
+/**
+ * Given a \a req, find or allocate a appropriate context for it.
+ * \pre req->rq_cli_ctx == NULL.
+ *
+ * \retval 0 succeed, and req->rq_cli_ctx is set.
+ * \retval -ev error number, and req->rq_cli_ctx == NULL.
+ */
int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
RETURN(0);
}
-/*
- * if @sync == 0, this function should return quickly without sleep;
- * otherwise might trigger ctx destroying rpc to server.
+/**
+ * Drop the context for \a req.
+ * \pre req->rq_cli_ctx != NULL.
+ * \post req->rq_cli_ctx == NULL.
+ *
+ * If \a sync == 0, this function should return quickly without sleep;
+ * otherwise it might trigger and wait for the whole process of sending
+ * an context-destroying rpc to server.
*/
void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
{
/* request might be asked to release earlier while still
* in the context waiting list.
*/
- if (!list_empty(&req->rq_ctx_chain)) {
- spin_lock(&req->rq_cli_ctx->cc_lock);
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&req->rq_cli_ctx->cc_lock);
+ if (!cfs_list_empty(&req->rq_ctx_chain)) {
+ cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
+ cfs_list_del_init(&req->rq_ctx_chain);
+ cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
}
sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
struct ptlrpc_cli_ctx *newctx)
{
struct sptlrpc_flavor old_flvr;
- char *reqmsg;
+ char *reqmsg = NULL; /* to workaround old gcc */
int reqmsg_size;
- int rc;
-
- if (likely(oldctx->cc_sec == newctx->cc_sec))
- return 0;
+ int rc = 0;
LASSERT(req->rq_reqmsg);
LASSERT(req->rq_reqlen);
LASSERT(req->rq_replen);
- CWARN("req %p: switch ctx %p -> %p, switch sec %p(%s) -> %p(%s)\n",
- req, oldctx, newctx,
+ CWARN("req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
+ "switch sec %p(%s) -> %p(%s)\n", req,
+ oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
+ newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
/* save request message */
reqmsg_size = req->rq_reqlen;
- OBD_ALLOC(reqmsg, reqmsg_size);
- if (reqmsg == NULL)
- return -ENOMEM;
- memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
+ if (reqmsg_size != 0) {
+ OBD_ALLOC_LARGE(reqmsg, reqmsg_size);
+ if (reqmsg == NULL)
+ return -ENOMEM;
+ memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
+ }
/* release old req/rep buf */
req->rq_cli_ctx = oldctx;
/* alloc new request buffer
* we don't need to alloc reply buffer here, leave it to the
- * rest procedure of ptlrpc
- */
- rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
- if (!rc) {
- LASSERT(req->rq_reqmsg);
- memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
- } else {
- CWARN("failed to alloc reqbuf: %d\n", rc);
- req->rq_flvr = old_flvr;
- }
+ * rest procedure of ptlrpc */
+ if (reqmsg_size != 0) {
+ rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
+ if (!rc) {
+ LASSERT(req->rq_reqmsg);
+ memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
+ } else {
+ CWARN("failed to alloc reqbuf: %d\n", rc);
+ req->rq_flvr = old_flvr;
+ }
- OBD_FREE(reqmsg, reqmsg_size);
+ OBD_FREE_LARGE(reqmsg, reqmsg_size);
+ }
return rc;
}
/**
- * if current context has died, or if we resend after flavor switched,
- * call this func to switch context. if no switch is needed, request
- * will end up with the same context.
+ * If current context of \a req is dead somehow, e.g. we just switched flavor
+ * thus marked original contexts dead, we'll find a new context for it. if
+ * no switch is needed, \a req will end up with the same context.
*
- * request must have a context. in any case of failure, restore the
- * restore the old one - a request must have a context.
+ * \note a request must have a context, to keep other parts of code happy.
+ * In any case of failure during the switching, we must restore the old one.
*/
int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
{
newctx = req->rq_cli_ctx;
LASSERT(newctx);
- if (unlikely(newctx == oldctx)) {
- if (test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags)) {
- /*
- * still get the old ctx, usually means system busy
- */
- CWARN("ctx (%p, fl %lx) doesn't switch, "
- "relax a little bit\n",
- newctx, newctx->cc_flags);
+ if (unlikely(newctx == oldctx &&
+ cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
+ /*
+ * still get the old dead ctx, usually means system too busy
+ */
+ CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
+ newctx, newctx->cc_flags);
- cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
- }
+ cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
+ CFS_HZ);
} else {
+ /*
+ * it's possible newctx == oldctx if we're switching
+ * subflavor with the same sec.
+ */
rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
if (rc) {
/* restore old ctx */
{
struct ptlrpc_request *req = data;
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_intr = 1;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
}
static
void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
{
- spin_lock(&ctx->cc_lock);
- if (!list_empty(&req->rq_ctx_chain))
- list_del_init(&req->rq_ctx_chain);
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_lock(&ctx->cc_lock);
+ if (!cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_del_init(&req->rq_ctx_chain);
+ cfs_spin_unlock(&ctx->cc_lock);
}
-/*
- * the status of context could be subject to be changed by other threads at any
- * time. we allow this race. but once we return with 0, the caller will
- * suppose it's uptodated and keep using it until the owning rpc is done.
+/**
+ * To refresh the context of \req, if it's not up-to-date.
+ * \param timeout
+ * - < 0: don't wait
+ * - = 0: wait until success or fatal error occur
+ * - > 0: timeout value (in seconds)
*
- * @timeout:
- * < 0 - don't wait
- * = 0 - wait until success or fatal error occur
- * > 0 - timeout value
+ * The status of the context could be subject to be changed by other threads
+ * at any time. We allow this race, but once we return with 0, the caller will
+ * suppose it's uptodated and keep using it until the owning rpc is done.
*
- * return 0 only if the context is uptodated.
+ * \retval 0 only if the context is uptodated.
+ * \retval -ev error number.
*/
int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
{
/*
* during the process a request's context might change type even
- * (e.g. from gss ctx to plain ctx), so each loop we need to re-check
+ * (e.g. from gss ctx to null ctx), so each loop we need to re-check
* everything
*/
again:
if (rc)
RETURN(rc);
- if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc)
+ if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
+ CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
+ req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
+ req_off_ctx_list(req, ctx);
sptlrpc_req_replace_dead_ctx(req);
-
+ ctx = req->rq_cli_ctx;
+ }
sptlrpc_sec_put(sec);
if (cli_ctx_is_eternal(ctx))
RETURN(0);
- if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
LASSERT(ctx->cc_ops->refresh);
ctx->cc_ops->refresh(ctx);
}
- LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
LASSERT(ctx->cc_ops->validate);
if (ctx->cc_ops->validate(ctx) == 0) {
RETURN(0);
}
- if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
+ cfs_spin_lock(&req->rq_lock);
req->rq_err = 1;
+ cfs_spin_unlock(&req->rq_lock);
req_off_ctx_list(req, ctx);
RETURN(-EPERM);
}
- /* This is subtle. For resent message we have to keep original
- * context to survive following situation:
- * 1. the request sent to server
- * 2. recovery was kick start
- * 3. recovery finished, the request marked as resent
- * 4. resend the request
- * 5. old reply from server received (because xid is the same)
- * 6. verify reply (has to be success)
- * 7. new reply from server received, lnet drop it
+ /*
+ * There's a subtle issue for resending RPCs, suppose following
+ * situation:
+ * 1. the request was sent to server.
+ * 2. recovery was kicked start, after finished the request was
+ * marked as resent.
+ * 3. resend the request.
+ * 4. old reply from server received, we accept and verify the reply.
+ * this has to be success, otherwise the error will be aware
+ * by application.
+ * 5. new reply from server received, dropped by LNet.
*
- * Note we can't simply change xid for resent request because
- * server reply on it for reply reconstruction.
+ * Note the xid of old & new request is the same. We can't simply
+ * change xid for the resent request because the server replies on
+ * it for reply reconstruction.
*
* Commonly the original context should be uptodate because we
- * have a expiry nice time; And server will keep their half part
- * context because we at least hold a ref of old context which
- * prevent the context detroy RPC be sent. So server still can
- * accept the request and finish RPC. Two cases:
- * 1. If server side context has been trimed, a NO_CONTEXT will
+ * have a expiry nice time; server will keep its context because
+ * we at least hold a ref of old context which prevent context
+ * destroying RPC being sent. So server still can accept the request
+ * and finish the RPC. But if that's not the case:
+ * 1. If server side context has been trimmed, a NO_CONTEXT will
* be returned, gss_cli_ctx_verify/unseal will switch to new
* context by force.
* 2. Current context never be refreshed, then we are fine: we
* never really send request with old context before.
*/
- if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
+ if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
unlikely(req->rq_reqmsg) &&
lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
req_off_ctx_list(req, ctx);
RETURN(0);
}
- if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+ if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
+ req_off_ctx_list(req, ctx);
/*
* don't switch ctx if import was deactivated
*/
if (req->rq_import->imp_deactive) {
- req_off_ctx_list(req, ctx);
+ cfs_spin_lock(&req->rq_lock);
req->rq_err = 1;
+ cfs_spin_unlock(&req->rq_lock);
RETURN(-EINTR);
}
LASSERT(ctx == req->rq_cli_ctx);
CERROR("req %p: failed to replace dead ctx %p: %d\n",
req, ctx, rc);
+ cfs_spin_lock(&req->rq_lock);
req->rq_err = 1;
- LASSERT(list_empty(&req->rq_ctx_chain));
+ cfs_spin_unlock(&req->rq_lock);
RETURN(rc);
}
- CWARN("req %p: replace dead ctx %p => ctx %p (%u->%s)\n",
- req, ctx, req->rq_cli_ctx,
- req->rq_cli_ctx->cc_vcred.vc_uid,
- sec2target_str(req->rq_cli_ctx->cc_sec));
-
ctx = req->rq_cli_ctx;
- LASSERT(list_empty(&req->rq_ctx_chain));
-
goto again;
}
- /* Now we're sure this context is during upcall, add myself into
+ /*
+ * Now we're sure this context is during upcall, add myself into
* waiting list
*/
- spin_lock(&ctx->cc_lock);
- if (list_empty(&req->rq_ctx_chain))
- list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
- spin_unlock(&ctx->cc_lock);
+ cfs_spin_lock(&ctx->cc_lock);
+ if (cfs_list_empty(&req->rq_ctx_chain))
+ cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
+ cfs_spin_unlock(&ctx->cc_lock);
if (timeout < 0)
RETURN(-EWOULDBLOCK);
/* Clear any flags that may be present from previous sends */
LASSERT(req->rq_receiving_reply == 0);
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
req->rq_err = 0;
req->rq_timedout = 0;
req->rq_resend = 0;
req->rq_restart = 0;
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
ctx_refresh_interrupt, req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
- /* following cases we could be here:
+ /*
+ * following cases could lead us here:
* - successfully refreshed;
- * - interruptted;
+ * - interrupted;
* - timedout, and we don't want recover from the failure;
* - timedout, and waked up upon recovery finished;
* - someone else mark this ctx dead by force;
goto again;
}
-/*
- * Note this could be called in two situations:
+/**
+ * Initialize flavor settings for \a req, according to \a opcode.
+ *
+ * \note this could be called in two situations:
* - new request from ptlrpc_pre_req(), with proper @opcode
* - old request which changed ctx in the middle, with @opcode == 0
*/
sec = req->rq_cli_ctx->cc_sec;
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
req->rq_flvr = sec->ps_flvr;
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
/* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc */
req->rq_reqbuf_len = 0;
}
-/*
- * check whether current user have valid context for an import or not.
- * might repeatedly try in case of non-fatal errors.
- * return 0 on success, < 0 on failure
+/**
+ * Given an import \a imp, check whether current user has a valid context
+ * or not. We may create a new context and try to refresh it, and try
+ * repeatedly try in case of non-fatal errors. Return 0 means success.
*/
int sptlrpc_import_check_ctx(struct obd_import *imp)
{
int rc;
ENTRY;
- might_sleep();
+ cfs_might_sleep();
sec = sptlrpc_import_sec_ref(imp);
ctx = get_my_ctx(sec);
if (!req)
RETURN(-ENOMEM);
- spin_lock_init(&req->rq_lock);
- atomic_set(&req->rq_refcount, 10000);
+ cfs_spin_lock_init(&req->rq_lock);
+ cfs_atomic_set(&req->rq_refcount, 10000);
CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
cfs_waitq_init(&req->rq_reply_waitq);
+ cfs_waitq_init(&req->rq_set_waitq);
req->rq_import = imp;
req->rq_flvr = sec->ps_flvr;
req->rq_cli_ctx = ctx;
rc = sptlrpc_req_refresh_ctx(req, 0);
- LASSERT(list_empty(&req->rq_ctx_chain));
+ LASSERT(cfs_list_empty(&req->rq_ctx_chain));
sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
OBD_FREE_PTR(req);
RETURN(rc);
}
+/**
+ * Used by ptlrpc client, to perform the pre-defined security transformation
+ * upon the request message of \a req. After this function called,
+ * req->rq_reqmsg is still accessible as clear text.
+ */
int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc;
- __u32 flvr;
ENTRY;
LASSERT(ctx);
LASSERT(req->rq_repdata);
LASSERT(req->rq_repmsg == NULL);
+ req->rq_rep_swab_mask = 0;
+
+ rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
+ switch (rc) {
+ case 1:
+ lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ case 0:
+ break;
+ default:
+ CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
+ RETURN(-EPROTO);
+ }
+
if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
CERROR("replied data length %d too small\n",
req->rq_repdata_len);
RETURN(-EPROTO);
}
- /* v2 message, check request/reply policy match */
- flvr = WIRE_FLVR(req->rq_repdata->lm_secflvr);
-
- if (req->rq_repdata->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
- __swab32s(&flvr);
-
- if (SPTLRPC_FLVR_POLICY(flvr) !=
+ if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
- CERROR("request policy was %u while reply with %u\n",
- SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc),
- SPTLRPC_FLVR_POLICY(flvr));
+ CERROR("reply policy %u doesn't match request policy %u\n",
+ SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
+ SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
RETURN(-EPROTO);
}
- /* do nothing if it's null policy; otherwise unpack the
- * wrapper message */
- if (SPTLRPC_FLVR_POLICY(flvr) != SPTLRPC_POLICY_NULL &&
- lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len))
- RETURN(-EPROTO);
-
switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
default:
LBUG();
}
-
LASSERT(rc || req->rq_repmsg || req->rq_resend);
+
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
+ !req->rq_ctx_init)
+ req->rq_rep_swab_mask = 0;
RETURN(rc);
}
-/*
- * upon this be called, the reply buffer should have been un-posted,
- * so nothing is going to change.
+/**
+ * Used by ptlrpc client, to perform security transformation upon the reply
+ * message of \a req. After return successfully, req->rq_repmsg points to
+ * the reply message in clear text.
+ *
+ * \pre the reply buffer should have been un-posted from LNet, so nothing is
+ * going to change.
*/
int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
{
}
/**
- * Upon called, the receive buffer might be still posted, so the reply data
- * might be changed at any time, no matter we're holding rq_lock or not. we
- * expect the rq_reply_off be 0, rq_nob_received is the early reply size.
- *
- * we allocate separate ptlrpc_request and reply buffer for early reply
- * processing, return 0 and @req_ret is a duplicated ptlrpc_request. caller
- * must call sptlrpc_cli_finish_early_reply() on the returned request to
- * release it. if anything goes wrong @req_ret will not be set.
+ * Used by ptlrpc client, to perform security transformation upon the early
+ * reply message of \a req. We expect the rq_reply_off is 0, and
+ * rq_nob_received is the early reply size.
+ *
+ * Because the receive buffer might be still posted, the reply data might be
+ * changed at any time, no matter we're holding rq_lock or not. For this reason
+ * we allocate a separate ptlrpc_request and reply buffer for early reply
+ * processing.
+ *
+ * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
+ * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
+ * \a *req_ret to release it.
+ * \retval -ev error number, and \a req_ret will not be set.
*/
int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
struct ptlrpc_request **req_ret)
early_size = req->rq_nob_received;
early_bufsz = size_roundup_power2(early_size);
- OBD_ALLOC(early_buf, early_bufsz);
+ OBD_ALLOC_LARGE(early_buf, early_bufsz);
if (early_buf == NULL)
GOTO(err_req, rc = -ENOMEM);
/* sanity checkings and copy data out, do it inside spinlock */
- spin_lock(&req->rq_lock);
+ cfs_spin_lock(&req->rq_lock);
if (req->rq_replied) {
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EALREADY);
}
if (req->rq_reply_off != 0) {
CERROR("early reply with offset %u\n", req->rq_reply_off);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EPROTO);
}
/* even another early arrived the size should be the same */
CERROR("data size has changed from %u to %u\n",
early_size, req->rq_nob_received);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EINVAL);
}
if (req->rq_nob_received < sizeof(struct lustre_msg)) {
CERROR("early reply length %d too small\n",
req->rq_nob_received);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
GOTO(err_buf, rc = -EALREADY);
}
memcpy(early_buf, req->rq_repbuf, early_size);
- spin_unlock(&req->rq_lock);
+ cfs_spin_unlock(&req->rq_lock);
+ cfs_spin_lock_init(&early_req->rq_lock);
early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
early_req->rq_flvr = req->rq_flvr;
early_req->rq_repbuf = early_buf;
early_req->rq_repdata = (struct lustre_msg *) early_buf;
early_req->rq_repdata_len = early_size;
early_req->rq_early = 1;
+ early_req->rq_reqmsg = req->rq_reqmsg;
rc = do_cli_unwrap_reply(early_req);
if (rc) {
err_ctx:
sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
err_buf:
- OBD_FREE(early_buf, early_bufsz);
+ OBD_FREE_LARGE(early_buf, early_bufsz);
err_req:
OBD_FREE_PTR(early_req);
RETURN(rc);
}
+/**
+ * Used by ptlrpc client, to release a processed early reply \a early_req.
+ *
+ * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
+ */
void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
{
LASSERT(early_req->rq_repbuf);
LASSERT(early_req->rq_repmsg);
sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
- OBD_FREE(early_req->rq_repbuf, early_req->rq_repbuf_len);
+ OBD_FREE_LARGE(early_req->rq_repbuf, early_req->rq_repbuf_len);
OBD_FREE_PTR(early_req);
}
/*
* "fixed" sec (e.g. null) use sec_id < 0
*/
-static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
+static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
int sptlrpc_get_next_secid(void)
{
- return atomic_inc_return(&sptlrpc_sec_id);
+ return cfs_atomic_inc_return(&sptlrpc_sec_id);
}
EXPORT_SYMBOL(sptlrpc_get_next_secid);
{
struct ptlrpc_sec_policy *policy = sec->ps_policy;
- LASSERT(atomic_read(&sec->ps_refcount) == 0);
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
+ LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
LASSERT(policy->sp_cops->destroy_sec);
CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
if (sec->ps_policy->sp_cops->kill_sec) {
sec->ps_policy->sp_cops->kill_sec(sec);
struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
{
- if (sec) {
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- atomic_inc(&sec->ps_refcount);
- }
+ if (sec)
+ cfs_atomic_inc(&sec->ps_refcount);
return sec;
}
void sptlrpc_sec_put(struct ptlrpc_sec *sec)
{
if (sec) {
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
-
- if (atomic_dec_and_test(&sec->ps_refcount)) {
- LASSERT(atomic_read(&sec->ps_nctx) == 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
+ if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
sptlrpc_gc_del_sec(sec);
sec_cop_destroy_sec(sec);
}
sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
if (sec) {
- atomic_inc(&sec->ps_refcount);
+ cfs_atomic_inc(&sec->ps_refcount);
sec->ps_part = sp;
{
struct ptlrpc_sec *sec;
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
sec = sptlrpc_sec_get(imp->imp_sec);
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
return sec;
}
{
struct ptlrpc_sec *old_sec;
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
+ LASSERT_ATOMIC_POS(&sec->ps_refcount);
- spin_lock(&imp->imp_lock);
+ cfs_spin_lock(&imp->imp_lock);
old_sec = imp->imp_sec;
imp->imp_sec = sec;
- spin_unlock(&imp->imp_lock);
+ cfs_spin_unlock(&imp->imp_lock);
if (old_sec) {
sptlrpc_sec_kill(old_sec);
sptlrpc_secflags2str(sf->sf_flags,
str2, sizeof(str2)));
- spin_lock(&sec->ps_lock);
+ cfs_spin_lock(&sec->ps_lock);
flavor_copy(&sec->ps_flvr, sf);
- spin_unlock(&sec->ps_lock);
+ cfs_spin_unlock(&sec->ps_lock);
}
-/*
- * for normal import, @svc_ctx should be NULL and @flvr is ignored;
- * for reverse import, @svc_ctx and @flvr is from incoming request.
+/**
+ * To get an appropriate ptlrpc_sec for the \a imp, according to the current
+ * configuration. Upon called, imp->imp_sec may or may not be NULL.
+ *
+ * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
+ * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
*/
int sptlrpc_import_sec_adapt(struct obd_import *imp,
struct ptlrpc_svc_ctx *svc_ctx,
struct ptlrpc_sec *sec, *newsec;
enum lustre_sec_part sp;
char str[24];
- int rc;
+ int rc = 0;
+ ENTRY;
- might_sleep();
+ cfs_might_sleep();
if (imp == NULL)
- return 0;
+ RETURN(0);
conn = imp->imp_connection;
char str2[24];
if (flavor_equal(&sf, &sec->ps_flvr))
- goto out;
+ GOTO(out, rc);
- CWARN("%simport %p (%s%s%s): changing flavor "
- "%s -> %s\n", svc_ctx ? "reverse " : "",
- imp, imp->imp_obd->obd_name,
- svc_ctx == NULL ? "->" : "<-",
+ CWARN("import %s->%s: changing flavor %s -> %s\n",
+ imp->imp_obd->obd_name,
obd_uuid2str(&conn->c_remote_uuid),
sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
- goto out;
+ GOTO(out, rc);
}
} else {
- CWARN("%simport %p (%s%s%s) netid %x: select flavor %s\n",
- svc_ctx == NULL ? "" : "reverse ",
- imp, imp->imp_obd->obd_name,
- svc_ctx == NULL ? "->" : "<-",
+ CWARN("import %s->%s netid %x: select flavor %s\n",
+ imp->imp_obd->obd_name,
obd_uuid2str(&conn->c_remote_uuid),
LNET_NIDNET(conn->c_self),
sptlrpc_flavor2name(&sf, str, sizeof(str)));
}
- mutex_down(&imp->imp_sec_mutex);
+ cfs_mutex_down(&imp->imp_sec_mutex);
newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
if (newsec) {
sptlrpc_import_sec_install(imp, newsec);
- rc = 0;
} else {
- CERROR("%simport %p (%s): failed to create new sec\n",
- svc_ctx == NULL ? "" : "reverse ",
- imp, obd_uuid2str(&conn->c_remote_uuid));
+ CERROR("import %s->%s: failed to create new sec\n",
+ imp->imp_obd->obd_name,
+ obd_uuid2str(&conn->c_remote_uuid));
rc = -EPERM;
}
- mutex_up(&imp->imp_sec_mutex);
-
+ cfs_mutex_up(&imp->imp_sec_mutex);
out:
sptlrpc_sec_put(sec);
- return 0;
+ RETURN(rc);
}
void sptlrpc_import_sec_put(struct obd_import *imp)
sptlrpc_sec_put(sec);
}
-void sptlrpc_import_inval_all_ctx(struct obd_import *imp)
-{
- /* use grace == 0 */
- import_flush_ctx_common(imp, -1, 0, 1);
-}
-
void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
{
/* it's important to use grace mode, see explain in
void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
{
- import_flush_ctx_common(imp, cfs_current()->uid, 1, 1);
+ import_flush_ctx_common(imp, cfs_curproc_uid(), 1, 1);
}
EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
}
EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
-/*
- * when complete successfully, req->rq_reqmsg should point to the
- * right place.
+/**
+ * Used by ptlrpc client to allocate request buffer of \a req. Upon return
+ * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
*/
int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
{
int rc;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
LASSERT(req->rq_reqmsg == NULL);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
policy = ctx->cc_sec->ps_policy;
rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
return rc;
}
+/**
+ * Used by ptlrpc client to free request buffer of \a req. After this
+ * req->rq_reqmsg is set to NULL and should not be accessed anymore.
+ */
void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
struct ptlrpc_sec_policy *policy;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
return;
policy = ctx->cc_sec->ps_policy;
policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
+ req->rq_reqmsg = NULL;
}
/*
}
EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
-/*
- * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
- * will be preserved after enlargement. this must be called after rq_reqmsg has
- * been intialized at least.
+/**
+ * Used by ptlrpc client to enlarge the \a segment of request message pointed
+ * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
+ * preserved after the enlargement. this must be called after original request
+ * buffer being allocated.
*
- * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
- * been changed.
+ * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
+ * so caller should refresh its local pointers if needed.
*/
int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
int segment, int newsize)
}
EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
+/**
+ * Used by ptlrpc client to allocate reply buffer of \a req.
+ *
+ * \note After this, req->rq_repmsg is still not accessible.
+ */
int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
ENTRY;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
}
+/**
+ * Used by ptlrpc client to free reply buffer of \a req. After this
+ * req->rq_repmsg is set to NULL and should not be accessed anymore.
+ */
void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
ENTRY;
LASSERT(ctx);
- LASSERT(atomic_read(&ctx->cc_refcount));
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
+ LASSERT_ATOMIC_POS(&ctx->cc_refcount);
if (req->rq_repbuf == NULL)
return;
policy = ctx->cc_sec->ps_policy;
policy->sp_cops->free_repbuf(ctx->cc_sec, req);
+ req->rq_repmsg = NULL;
EXIT;
}
#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
+/**
+ * Given an export \a exp, check whether the flavor of incoming \a req
+ * is allowed by the export \a exp. Main logic is about taking care of
+ * changing configurations. Return 0 means success.
+ */
int sptlrpc_target_export_check(struct obd_export *exp,
struct ptlrpc_request *req)
{
if (req->rq_ctx_fini)
return 0;
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
/* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
* the first req with the new flavor, then treat it as current flavor,
/* if it's gss, we only interested in root ctx init */
if (req->rq_auth_gss &&
- !(req->rq_ctx_init && (req->rq_auth_usr_root ||
- req->rq_auth_usr_mdt))) {
- spin_unlock(&exp->exp_lock);
- CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
+ !(req->rq_ctx_init &&
+ (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
+ req->rq_auth_usr_ost))) {
+ cfs_spin_unlock(&exp->exp_lock);
+ CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
req->rq_auth_gss, req->rq_ctx_init,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt);
+ req->rq_auth_usr_root, req->rq_auth_usr_mdt,
+ req->rq_auth_usr_ost);
return 0;
}
exp->exp_flvr_adapt = 0;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx, &flavor);
/* most cases should return here, we only interested in
* gss root ctx init */
if (!req->rq_auth_gss || !req->rq_ctx_init ||
- (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
- spin_unlock(&exp->exp_lock);
+ (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
+ !req->rq_auth_usr_ost)) {
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
* shortly, and let _this_ rpc pass through */
if (exp->exp_flvr_changed) {
LASSERT(exp->exp_flvr_adapt);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
flavor = exp->exp_flvr;
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
req->rq_svc_ctx,
"install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
req->rq_svc_ctx);
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[0] -
cfs_time_current_sec());
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc,
exp->exp_flvr_expire[1] -
cfs_time_current_sec());
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
return 0;
}
} else {
exp->exp_flvr_old[1].sf_rpc);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
- CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
+ CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
"unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
exp, exp->exp_obd->obd_name,
req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
- req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_flvr.sf_rpc,
+ req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
+ req->rq_flvr.sf_rpc,
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[0].sf_rpc,
exp->exp_flvr_expire[0] ?
LASSERT(obd);
- spin_lock(&obd->obd_dev_lock);
+ cfs_spin_lock(&obd->obd_dev_lock);
- list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
+ cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
if (exp->exp_connection == NULL)
continue;
/* note if this export had just been updated flavor
* (exp_flvr_changed == 1), this will override the
* previous one. */
- spin_lock(&exp->exp_lock);
+ cfs_spin_lock(&exp->exp_lock);
sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
exp->exp_connection->c_peer.nid,
&new_flvr);
exp->exp_flvr.sf_rpc,
exp->exp_flvr_old[1].sf_rpc);
}
- spin_unlock(&exp->exp_lock);
+ cfs_spin_unlock(&exp->exp_lock);
}
- spin_unlock(&obd->obd_dev_lock);
+ cfs_spin_unlock(&obd->obd_dev_lock);
}
EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
{
- if (svc_rc == SECSVC_DROP)
- return SECSVC_DROP;
+ /* peer's claim is unreliable unless gss is being used */
+ if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
+ return svc_rc;
switch (req->rq_sp_from) {
case LUSTRE_SP_CLI:
+ if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source CLI");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
case LUSTRE_SP_MDT:
+ if (!req->rq_auth_usr_mdt) {
+ DEBUG_REQ(D_ERROR, req, "faked source MDT");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
case LUSTRE_SP_OST:
- case LUSTRE_SP_MGC:
+ if (!req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source OST");
+ svc_rc = SECSVC_DROP;
+ }
+ break;
case LUSTRE_SP_MGS:
- case LUSTRE_SP_ANY:
+ case LUSTRE_SP_MGC:
+ if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
+ !req->rq_auth_usr_ost) {
+ DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
+ svc_rc = SECSVC_DROP;
+ }
break;
+ case LUSTRE_SP_ANY:
default:
DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
- return SECSVC_DROP;
- }
-
- if (!req->rq_auth_gss)
- return svc_rc;
-
- if (unlikely(req->rq_sp_from == LUSTRE_SP_ANY)) {
- CERROR("not specific part\n");
- return SECSVC_DROP;
- }
-
- /* from MDT, must be authenticated as MDT */
- if (unlikely(req->rq_sp_from == LUSTRE_SP_MDT &&
- !req->rq_auth_usr_mdt)) {
- DEBUG_REQ(D_ERROR, req, "fake source MDT");
- return SECSVC_DROP;
- }
-
- /* from OST, must be callback to MDT and CLI, the reverse sec
- * was from mdt/root keytab, so it should be MDT or root FIXME */
- if (unlikely(req->rq_sp_from == LUSTRE_SP_OST &&
- !req->rq_auth_usr_mdt && !req->rq_auth_usr_root)) {
- DEBUG_REQ(D_ERROR, req, "fake source OST");
- return SECSVC_DROP;
+ svc_rc = SECSVC_DROP;
}
return svc_rc;
}
+/**
+ * Used by ptlrpc server, to perform transformation upon request message of
+ * incoming \a req. This must be the first thing to do with a incoming
+ * request in ptlrpc layer.
+ *
+ * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
+ * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
+ * \retval SECSVC_COMPLETE success, the request has been fully processed, and
+ * reply message has been prepared.
+ * \retval SECSVC_DROP failed, this request should be dropped.
+ */
int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
LASSERT(req->rq_repmsg == NULL);
LASSERT(req->rq_svc_ctx == NULL);
- req->rq_sp_from = LUSTRE_SP_ANY;
- req->rq_auth_uid = INVALID_UID;
- req->rq_auth_mapped_uid = INVALID_UID;
-
- if (req->rq_reqdata_len < sizeof(struct lustre_msg)) {
- CERROR("request size %d too small\n", req->rq_reqdata_len);
- RETURN(SECSVC_DROP);
- }
+ req->rq_req_swab_mask = 0;
- /*
- * only expect v2 message.
- */
- switch (msg->lm_magic) {
- case LUSTRE_MSG_MAGIC_V2:
- req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
- break;
- case LUSTRE_MSG_MAGIC_V2_SWABBED:
- req->rq_flvr.sf_rpc = WIRE_FLVR(__swab32(msg->lm_secflvr));
+ rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
+ switch (rc) {
+ case 1:
+ lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
+ case 0:
break;
default:
- CERROR("invalid magic %x\n", msg->lm_magic);
+ CERROR("error unpacking request from %s x"LPU64"\n",
+ libcfs_id2str(req->rq_peer), req->rq_xid);
RETURN(SECSVC_DROP);
}
- /* unpack the wrapper message if the policy is not null */
- if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
- lustre_unpack_msg(msg, req->rq_reqdata_len)) {
- CERROR("invalid wrapper msg format\n");
- RETURN(SECSVC_DROP);
- }
+ req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
+ req->rq_sp_from = LUSTRE_SP_ANY;
+ req->rq_auth_uid = INVALID_UID;
+ req->rq_auth_mapped_uid = INVALID_UID;
policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
if (!policy) {
LASSERT(policy->sp_sops->accept);
rc = policy->sp_sops->accept(req);
-
+ sptlrpc_policy_put(policy);
LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
- sptlrpc_policy_put(policy);
+
+ /*
+ * if it's not null flavor (which means embedded packing msg),
+ * reset the swab mask for the comming inner msg unpacking.
+ */
+ if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
+ req->rq_req_swab_mask = 0;
/* sanity check for the request source */
rc = sptlrpc_svc_check_from(req, rc);
RETURN(rc);
}
-int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
- int msglen)
+/**
+ * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
+ * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
+ * a buffer of \a msglen size.
+ */
+int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
{
struct ptlrpc_sec_policy *policy;
struct ptlrpc_reply_state *rs;
RETURN(rc);
}
+/**
+ * Used by ptlrpc server, to perform transformation upon reply message.
+ *
+ * \post req->rq_reply_off is set to approriate server-controlled reply offset.
+ * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
+ */
int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
RETURN(rc);
}
+/**
+ * Used by ptlrpc server, to free reply_state.
+ */
void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
{
struct ptlrpc_sec_policy *policy;
{
struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
- if (ctx == NULL)
- return;
-
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
- atomic_inc(&ctx->sc_refcount);
+ if (ctx != NULL)
+ cfs_atomic_inc(&ctx->sc_refcount);
}
void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
if (ctx == NULL)
return;
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
- if (atomic_dec_and_test(&ctx->sc_refcount)) {
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
+ if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
if (ctx->sc_policy->sp_sops->free_ctx)
ctx->sc_policy->sp_sops->free_ctx(ctx);
}
if (ctx == NULL)
return;
- LASSERT(atomic_read(&ctx->sc_refcount) > 0);
+ LASSERT_ATOMIC_POS(&ctx->sc_refcount);
if (ctx->sc_policy->sp_sops->invalidate_ctx)
ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
}
* bulk security *
****************************************/
+/**
+ * Perform transformation upon bulk data pointed by \a desc. This is called
+ * before transforming the request message.
+ */
int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
}
EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
-/*
+/**
+ * This is called after unwrap the reply message.
* return nob of actual plain text size received, or error code.
*/
int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
}
EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
-/*
+/**
+ * This is called after unwrap the reply message.
* return 0 for success or error code.
*/
int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
}
EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
+/**
+ * Performe transformation upon outgoing bulk read.
+ */
int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
}
EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
+/**
+ * Performe transformation upon incoming bulk write.
+ */
int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
LASSERT(req->rq_bulk_write);
- if (desc->bd_nob_transferred != desc->bd_nob &&
- SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
- SPTLRPC_BULK_SVC_PRIV) {
+ /*
+ * if it's in privacy mode, transferred should >= expected; otherwise
+ * transferred should == expected.
+ */
+ if (desc->bd_nob_transferred < desc->bd_nob ||
+ (desc->bd_nob_transferred > desc->bd_nob &&
+ SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
+ SPTLRPC_BULK_SVC_PRIV)) {
DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
desc->bd_nob_transferred, desc->bd_nob);
return -ETIMEDOUT;
}
EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
+/**
+ * Prepare buffers for incoming bulk write.
+ */
int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
pud = lustre_msg_buf(msg, offset, 0);
- pud->pud_uid = cfs_current()->uid;
- pud->pud_gid = cfs_current()->gid;
- pud->pud_fsuid = cfs_current()->fsuid;
- pud->pud_fsgid = cfs_current()->fsgid;
+ pud->pud_uid = cfs_curproc_uid();
+ pud->pud_gid = cfs_curproc_gid();
+ pud->pud_fsuid = cfs_curproc_fsuid();
+ pud->pud_fsgid = cfs_curproc_fsgid();
pud->pud_cap = cfs_curproc_cap_pack();
pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
task_lock(current);
if (pud->pud_ngroups > current_ngroups)
pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, cfs_current()->group_info->blocks[0],
+ memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
pud->pud_ngroups * sizeof(__u32));
task_unlock(current);
#endif
}
EXPORT_SYMBOL(sptlrpc_pack_user_desc);
-int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset)
+int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
{
struct ptlrpc_user_desc *pud;
int i;
if (!pud)
return -EINVAL;
- if (lustre_msg_swabbed(msg)) {
+ if (swabbed) {
__swab32s(&pud->pud_uid);
__swab32s(&pud->pud_gid);
__swab32s(&pud->pud_fsuid);
return -EINVAL;
}
- if (lustre_msg_swabbed(msg)) {
+ if (swabbed) {
for (i = 0; i < pud->pud_ngroups; i++)
__swab32s(&pud->pud_groups[i]);
}
{
int rc;
- rwlock_init(&policy_lock);
+ cfs_rwlock_init(&policy_lock);
rc = sptlrpc_gc_init();
if (rc)