fix the race between gss ctx destroy and request callback.
b=13719
r=fanyong,tappro
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
"don't send destroy rpc\n", ctx,
if (cli_ctx_is_error(ctx) || !cli_ctx_is_uptodate(ctx)) {
CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
"don't send destroy rpc\n", ctx,
"server finishing reverse" : "client finishing forward",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
"server finishing reverse" : "client finishing forward",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- /* context's refcount could be 0, steal one */
- atomic_inc(&ctx->cc_refcount);
-
gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;
req = ptlrpc_prep_req_pool(imp, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
gctx->gc_proc = PTLRPC_GSS_PROC_DESTROY;
req = ptlrpc_prep_req_pool(imp, LUSTRE_OBD_VERSION, SEC_CTX_FINI,
if (!req) {
CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
ctx, ctx->cc_vcred.vc_uid);
if (!req) {
CWARN("ctx %p(%u): fail to prepare rpc, destroy locally\n",
ctx, ctx->cc_vcred.vc_uid);
- GOTO(out_ref, rc = -ENOMEM);
+ GOTO(out, rc = -ENOMEM);
}
/* fix the user desc */
}
/* fix the user desc */
}
ptlrpc_req_finished(req);
}
ptlrpc_req_finished(req);
-out_ref:
- atomic_dec(&ctx->cc_refcount);
LASSERT(gctx_kr->gck_timer == NULL);
rc = gss_cli_ctx_fini_common(sec, ctx);
LASSERT(gctx_kr->gck_timer == NULL);
rc = gss_cli_ctx_fini_common(sec, ctx);
CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
sec->ps_policy->sp_name, sec);
sptlrpc_sec_destroy(sec);
CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
sec->ps_policy->sp_name, sec);
sptlrpc_sec_destroy(sec);
int rc;
rc = gss_cli_ctx_fini_common(sec, ctx);
int rc;
rc = gss_cli_ctx_fini_common(sec, ctx);
+ if (rc < 0)
+ return;
+
CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
sec->ps_policy->sp_name, sec);
sptlrpc_sec_destroy(sec);
CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
sec->ps_policy->sp_name, sec);
sptlrpc_sec_destroy(sec);
static
void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
{
static
void gss_cli_ctx_finalize(struct gss_cli_ctx *gctx)
{
+ if (gctx->gc_mechctx) {
lgss_delete_sec_context(&gctx->gc_mechctx);
lgss_delete_sec_context(&gctx->gc_mechctx);
+ gctx->gc_mechctx = NULL;
+ }
rawobj_free(&gctx->gc_handle);
}
rawobj_free(&gctx->gc_handle);
}
- * return 1 if the busy count of the sec dropped to zero, then usually caller
- * should destroy the sec too; otherwise return 0.
+ * return:
+ * -1: the destroy has been taken care of by someone else
+ * 0: proceed to destroy the ctx
+ * 1: busy count dropped to 0, proceed to destroy ctx and sec
*/
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx)
*/
int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec,
struct ptlrpc_cli_ctx *ctx)
LASSERT(atomic_read(&sec->ps_busy) > 0);
if (gctx->gc_mechctx) {
LASSERT(atomic_read(&sec->ps_busy) > 0);
if (gctx->gc_mechctx) {
+ /* the final context fini rpc will use this ctx too, and it's
+ * asynchronous which finished by request_out_callback(). so
+ * we add refcount, whoever drop finally drop the refcount to
+ * 0 should responsible for the rest of destroy. */
+ atomic_inc(&ctx->cc_refcount);
+
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
gss_do_ctx_fini_rpc(gctx);
gss_cli_ctx_finalize(gctx);
+
+ if (!atomic_dec_and_test(&ctx->cc_refcount))
+ return -1;
}
if (sec_is_reverse(sec))
}
if (sec_is_reverse(sec))