return SPTLRPC_FLVR_NULL;
if (!strcmp(name, "plain"))
return SPTLRPC_FLVR_PLAIN;
- if (!strcmp(name, "krb5"))
- return SPTLRPC_FLVR_KRB5;
+ if (!strcmp(name, "krb5n"))
+ return SPTLRPC_FLVR_KRB5N;
if (!strcmp(name, "krb5i"))
return SPTLRPC_FLVR_KRB5I;
if (!strcmp(name, "krb5p"))
return "null";
case SPTLRPC_FLVR_PLAIN:
return "plain";
- case SPTLRPC_FLVR_KRB5:
- return "krb5";
+ case SPTLRPC_FLVR_KRB5N:
+ return "krb5n";
+ case SPTLRPC_FLVR_KRB5A:
+ return "krb5a";
case SPTLRPC_FLVR_KRB5I:
return "krb5i";
case SPTLRPC_FLVR_KRB5P:
return "krb5p";
default:
CERROR("invalid flavor 0x%x(p%u,s%u,v%u)\n", flavor,
- SEC_FLAVOR_POLICY(flavor), SEC_FLAVOR_SUBPOLICY(flavor),
+ SEC_FLAVOR_POLICY(flavor), SEC_FLAVOR_MECH(flavor),
SEC_FLAVOR_SVC(flavor));
}
return "UNKNOWN";
RETURN(0);
}
-void sptlrpc_req_put_ctx(struct ptlrpc_request *req)
+/*
+ * if @sync == 0, this function should return quickly without sleep;
+ * otherwise might trigger ctx destroying rpc to server.
+ */
+void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
{
ENTRY;
spin_unlock(&req->rq_cli_ctx->cc_lock);
}
- /* this could be called with spinlock hold, use async mode */
- sptlrpc_cli_ctx_put(req->rq_cli_ctx, 0);
+ sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
req->rq_cli_ctx = NULL;
EXIT;
}
spin_unlock(&ctx->cc_lock);
sptlrpc_cli_ctx_get(ctx);
- sptlrpc_req_put_ctx(req);
+ sptlrpc_req_put_ctx(req, 0);
rc = sptlrpc_req_get_ctx(req);
if (!rc) {
LASSERT(req->rq_cli_ctx);
static
void ctx_refresh_interrupt(void *data)
{
- /* do nothing */
+ struct ptlrpc_request *req = data;
+
+ spin_lock(&req->rq_lock);
+ req->rq_intr = 1;
+ spin_unlock(&req->rq_lock);
}
static
LASSERT(ctx);
- /* skip reverse ctxs */
- if (ctx->cc_sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
- RETURN(0);
-
/* skip special ctxs */
if (cli_ctx_is_eternal(ctx) || req->rq_ctx_init || req->rq_ctx_fini)
RETURN(0);
LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
again:
+ LASSERT(ctx->cc_ops->validate);
+ if (ctx->cc_ops->validate(ctx) == 0) {
+ req_off_ctx_list(req, ctx);
+ RETURN(0);
+ }
+
if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
req->rq_err = 1;
req_off_ctx_list(req, ctx);
goto again;
}
- LASSERT(ctx->cc_ops->validate);
- if (ctx->cc_ops->validate(ctx) == 0) {
- req_off_ctx_list(req, ctx);
- RETURN(0);
- }
-
/* Now we're sure this context is during upcall, add myself into
* waiting list
*/
req->rq_restart = 0;
spin_unlock(&req->rq_lock);
- lwi = LWI_TIMEOUT_INTR(timeout == 0 ? LONG_MAX : timeout * HZ,
- ctx_refresh_timeout, ctx_refresh_interrupt, req);
+ lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
+ ctx_refresh_interrupt, req);
rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
/* five cases we are here:
req->rq_sec_flavor = req->rq_cli_ctx->cc_sec->ps_flavor;
- /* force SVC_NONE for context initiation rpc, SVC_AUTH for context
+ /* force SVC_NULL for context initiation rpc, SVC_INTG for context
* destruction rpc
*/
if (unlikely(req->rq_ctx_init)) {
req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
SEC_FLAVOR_POLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SVC(SPTLRPC_SVC_NONE));
+ SEC_FLAVOR_MECH(req->rq_sec_flavor),
+ SPTLRPC_SVC_NULL);
} else if (unlikely(req->rq_ctx_fini)) {
req->rq_sec_flavor = SEC_MAKE_RPC_FLAVOR(
SEC_FLAVOR_POLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SUBPOLICY(req->rq_sec_flavor),
- SEC_FLAVOR_SVC(SPTLRPC_SVC_AUTH));
+ SEC_FLAVOR_MECH(req->rq_sec_flavor),
+ SPTLRPC_SVC_INTG);
}
conf = &req->rq_import->imp_obd->u.cli.cl_sec_conf;
spin_lock_init(&req->rq_lock);
atomic_set(&req->rq_refcount, 10000);
- INIT_LIST_HEAD(&req->rq_ctx_chain);
+ CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
init_waitqueue_head(&req->rq_reply_waitq);
req->rq_import = imp;
req->rq_cli_ctx = ctx;
}
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
LASSERT(ctx->cc_ops->sign);
rc = ctx->cc_ops->sign(ctx, req);
break;
}
switch (SEC_FLAVOR_SVC(req->rq_sec_flavor)) {
- case SPTLRPC_SVC_NONE:
+ case SPTLRPC_SVC_NULL:
case SPTLRPC_SVC_AUTH:
+ case SPTLRPC_SVC_INTG:
LASSERT(ctx->cc_ops->verify);
rc = ctx->cc_ops->verify(ctx, req);
break;
LASSERT(atomic_read(&sec->ps_busy) == 0);
LASSERT(policy->sp_cops->destroy_sec);
- CWARN("%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
+ CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
policy->sp_cops->destroy_sec(sec);
sptlrpc_policy_put(policy);
sec = imp->imp_sec;
policy = sec->ps_policy;
- if (!atomic_dec_and_test(&sec->ps_refcount)) {
- sptlrpc_policy_put(policy);
- goto out;
- }
+ if (atomic_dec_and_test(&sec->ps_refcount)) {
+ sec_cop_flush_ctx_cache(sec, -1, 1, 1);
+ sptlrpc_gc_del_sec(sec);
- sec_cop_flush_ctx_cache(sec, -1, 1, 1);
- sptlrpc_gc_del_sec(sec);
-
- if (atomic_dec_and_test(&sec->ps_busy))
- sec_cop_destroy_sec(sec);
- else {
- CWARN("delay to destroy %s@%p: busy contexts\n",
- policy->sp_name, sec);
+ if (atomic_dec_and_test(&sec->ps_busy))
+ sec_cop_destroy_sec(sec);
+ else {
+ CWARN("delay destroying busy sec %s %p\n",
+ policy->sp_name, sec);
+ }
+ } else {
+ sptlrpc_policy_put(policy);
}
-out:
imp->imp_sec = NULL;
}
if (imp == NULL || imp->imp_sec == NULL)
return;
- sec_cop_flush_ctx_cache(imp->imp_sec, -1, 0, 1);
+ sec_cop_flush_ctx_cache(imp->imp_sec, -1, 1, 1);
}
EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
* server side security *
****************************************/
+int sptlrpc_target_export_check(struct obd_export *exp,
+ struct ptlrpc_request *req)
+{
+ if (!req->rq_auth_gss ||
+ (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt))
+ return 0;
+
+ if (!req->rq_ctx_init)
+ return 0;
+
+ LASSERT(exp->exp_imp_reverse);
+ sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse, req->rq_svc_ctx);
+ return 0;
+}
+
int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
switch (rpc_flavor) {
case SPTLRPC_FLVR_NULL:
case SPTLRPC_FLVR_PLAIN:
+ case SPTLRPC_FLVR_KRB5N:
+ case SPTLRPC_FLVR_KRB5A:
break;
case SPTLRPC_FLVR_KRB5P:
conf->sfc_bulk_priv = BULK_PRIV_ALG_ARC4;
case SPTLRPC_FLVR_PLAIN:
conf->sfc_bulk_csum = BULK_CSUM_ALG_MD5;
break;
+ case SPTLRPC_FLVR_KRB5N:
+ case SPTLRPC_FLVR_KRB5A:
case SPTLRPC_FLVR_KRB5I:
case SPTLRPC_FLVR_KRB5P:
conf->sfc_bulk_csum = BULK_CSUM_ALG_SHA1;
static __u32 __flavors[] = {
SPTLRPC_FLVR_NULL,
SPTLRPC_FLVR_PLAIN,
+ SPTLRPC_FLVR_KRB5N,
+ SPTLRPC_FLVR_KRB5A,
SPTLRPC_FLVR_KRB5I,
SPTLRPC_FLVR_KRB5P,
};
{
if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
return "*";
- if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE)
+ if (sec_is_reverse(sec))
return "c";
return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
}