struct ptlrpc_sec_sops {
int (*accept) (struct ptlrpc_request *req);
int (*authorize) (struct ptlrpc_request *req);
+ void (*invalidate_ctx)
+ (struct ptlrpc_svc_ctx *ctx);
/* buffer manipulation */
int (*alloc_rs) (struct ptlrpc_request *req,
int msgsize);
void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs);
void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
+void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
/*
* reverse context
*/
static int mdt_sec_ctx_handle(struct mdt_thread_info *info)
{
- return mdt_handle_idmap(info);
+ int rc;
+
+ rc = mdt_handle_idmap(info);
+
+ if (unlikely(rc)) {
+ struct ptlrpc_request *req = mdt_info_req(info);
+ __u32 opc;
+
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ if (opc = SEC_CTX_INIT || opc == SEC_CTX_INIT_CONT)
+ sptlrpc_svc_ctx_invalidate(req);
+ }
+
+ return rc;
}
static struct mdt_object *mdt_obj(struct lu_object *o)
rc = ptlrpc_queue_wait(req);
if (rc) {
/* If any _real_ denial be made, we expect server return
- * error reply instead of simply drop request. So here
- * all errors during networking just be treat as TIMEDOUT,
- * caller might re-try negotiation again and again, leave
- * recovery decisions to general ptlrpc layer.
+ * -EACCES reply or return success but indicate gss error
+ * inside reply messsage. All other errors are treated as
+ * timeout, caller might try the negotiation repeatedly,
+ * leave recovery decisions to general ptlrpc layer.
+ *
+ * FIXME maybe some other error code shouldn't be treated
+ * as timeout.
*/
- param.status = -ETIMEDOUT;
+ param.status = rc;
+ if (rc != -EACCES)
+ param.status = -ETIMEDOUT;
goto out_copy;
}
RETURN(rc);
}
+static
+void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
+{
+ struct gss_svc_reqctx *grctx;
+ ENTRY;
+
+ if (svc_ctx == NULL) {
+ EXIT;
+ return;
+ }
+
+ grctx = gss_svc_ctx2reqctx(svc_ctx);
+
+ CWARN("gss svc invalidate ctx %p(%u)\n",
+ grctx->src_ctx, grctx->src_ctx->gsc_uid);
+ gss_svc_upcall_destroy_ctx(grctx->src_ctx);
+
+ EXIT;
+}
+
static inline
int gss_svc_payload(struct gss_svc_reqctx *grctx, int msgsize, int privacy)
{
static struct ptlrpc_sec_sops gss_sec_sops = {
.accept = gss_svc_accept,
+ .invalidate_ctx = gss_svc_invalidate_ctx,
.alloc_rs = gss_svc_alloc_rs,
.authorize = gss_svc_authorize,
.free_rs = gss_svc_free_rs,
req->rq_svc_ctx = NULL;
}
+void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
+{
+ struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
+
+ if (ctx == NULL)
+ return;
+
+ LASSERT(atomic_read(&ctx->sc_refcount) > 0);
+ if (ctx->sc_policy->sp_sops->invalidate_ctx)
+ ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
+}
+EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
+
/****************************************
* bulk security *
****************************************/