X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fptlrpc%2Fgss%2Fsec_gss.c;h=f69e2d5c31e45087676dd4db18b64fd6c99e30a0;hb=refs%2Fchanges%2F74%2F13174%2F2;hp=672f1cb89505a778eeb1ababae2184bbcfb83f78;hpb=cec72a356891eaa729314a7bc89c4b2aaef0a31b;p=fs%2Flustre-release.git diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index 672f1cb..f69e2d5 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -1,10 +1,10 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * Modifications for Lustre * * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * + * Copyright (c) 2011, 2014, Intel Corporation. + * * Author: Eric Mei */ @@ -46,11 +46,7 @@ * */ -#ifndef EXPORT_SYMTAB -# define EXPORT_SYMTAB -#endif #define DEBUG_SUBSYSTEM S_SEC -#ifdef __KERNEL__ #include #include #include @@ -58,9 +54,6 @@ #include #include #include -#else -#include -#endif #include #include @@ -76,6 +69,7 @@ #include "gss_api.h" #include +#include /* * early reply have fixed size, respectively in privacy and integrity mode. @@ -332,23 +326,23 @@ out_free: int cli_ctx_expire(struct ptlrpc_cli_ctx *ctx) { - LASSERT(cfs_atomic_read(&ctx->cc_refcount)); + LASSERT(atomic_read(&ctx->cc_refcount)); - if (!cfs_test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) { - if (!ctx->cc_early_expire) - cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); + if (!test_and_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags)) { + if (!ctx->cc_early_expire) + clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); - CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n", - ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), - ctx->cc_expire, - ctx->cc_expire == 0 ? 0 : - cfs_time_sub(ctx->cc_expire, cfs_time_current_sec())); + CWARN("ctx %p(%u->%s) get expired: %lu(%+lds)\n", + ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), + ctx->cc_expire, + ctx->cc_expire == 0 ? 0 : + cfs_time_sub(ctx->cc_expire, cfs_time_current_sec())); - sptlrpc_cli_ctx_wakeup(ctx); - return 1; - } + sptlrpc_cli_ctx_wakeup(ctx); + return 1; + } - return 0; + return 0; } /* @@ -389,8 +383,8 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx) /* At this point this ctx might have been marked as dead by * someone else, in which case nobody will make further use * of it. we don't care, and mark it UPTODATE will help - * destroying server side context when it be destroied. */ - cfs_set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); + * destroying server side context when it be destroyed. */ + set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); if (sec_is_reverse(ctx->cc_sec)) { CWARN("server installed reverse ctx %p idx "LPX64", " @@ -512,7 +506,7 @@ int gss_do_check_seq(unsigned long *window, __u32 win_size, __u32 *max_seq, */ switch (phase) { case 0: - if (cfs_test_bit(seq_num % win_size, window)) + if (test_bit(seq_num % win_size, window)) goto replay; break; case 1: @@ -542,9 +536,9 @@ replay: */ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set) { - int rc = 0; + int rc = 0; - cfs_spin_lock(&ssd->ssd_lock); + spin_lock(&ssd->ssd_lock); if (set == 0) { /* @@ -578,8 +572,8 @@ int gss_check_seq_num(struct gss_svc_seq_data *ssd, __u32 seq_num, int set) gss_stat_oos_record_svc(2, 0); } exit: - cfs_spin_unlock(&ssd->ssd_lock); - return rc; + spin_unlock(&ssd->ssd_lock); + return rc; } /*************************************** @@ -626,24 +620,22 @@ int gss_cli_ctx_match(struct ptlrpc_cli_ctx *ctx, struct vfs_cred *vcred) void gss_cli_ctx_flags2str(unsigned long flags, char *buf, int bufsize) { - buf[0] = '\0'; + buf[0] = '\0'; - if (flags & PTLRPC_CTX_NEW) - strncat(buf, "new,", bufsize); - if (flags & PTLRPC_CTX_UPTODATE) - strncat(buf, "uptodate,", bufsize); - if (flags & PTLRPC_CTX_DEAD) - strncat(buf, "dead,", bufsize); - if (flags & PTLRPC_CTX_ERROR) - strncat(buf, "error,", bufsize); - if (flags & PTLRPC_CTX_CACHED) - strncat(buf, "cached,", bufsize); - if (flags & PTLRPC_CTX_ETERNAL) - strncat(buf, "eternal,", bufsize); - if (buf[0] == '\0') - strncat(buf, "-,", bufsize); - - buf[strlen(buf) - 1] = '\0'; + if (flags & PTLRPC_CTX_NEW) + strlcat(buf, "new,", bufsize); + if (flags & PTLRPC_CTX_UPTODATE) + strlcat(buf, "uptodate,", bufsize); + if (flags & PTLRPC_CTX_DEAD) + strlcat(buf, "dead,", bufsize); + if (flags & PTLRPC_CTX_ERROR) + strlcat(buf, "error,", bufsize); + if (flags & PTLRPC_CTX_CACHED) + strlcat(buf, "cached,", bufsize); + if (flags & PTLRPC_CTX_ETERNAL) + strlcat(buf, "eternal,", bufsize); + if (buf[0] == '\0') + strlcat(buf, "-,", bufsize); } int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, @@ -669,33 +661,33 @@ int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx, flags |= LUSTRE_GSS_PACK_USER; redo: - seq = cfs_atomic_inc_return(&gctx->gc_seq); - - rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx, - ctx->cc_sec->ps_part, - flags, gctx->gc_proc, seq, svc, - &gctx->gc_handle); - if (rc < 0) - RETURN(rc); - - /* gss_sign_msg() msg might take long time to finish, in which period - * more rpcs could be wrapped up and sent out. if we found too many - * of them we should repack this rpc, because sent it too late might - * lead to the sequence number fall behind the window on server and - * be dropped. also applies to gss_cli_ctx_seal(). - * - * Note: null mode dosen't check sequence number. */ - if (svc != SPTLRPC_SVC_NULL && - cfs_atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) { - int behind = cfs_atomic_read(&gctx->gc_seq) - seq; - - gss_stat_oos_record_cli(behind); - CWARN("req %p: %u behind, retry signing\n", req, behind); - goto redo; - } - - req->rq_reqdata_len = rc; - RETURN(0); + seq = atomic_inc_return(&gctx->gc_seq); + + rc = gss_sign_msg(req->rq_reqbuf, gctx->gc_mechctx, + ctx->cc_sec->ps_part, + flags, gctx->gc_proc, seq, svc, + &gctx->gc_handle); + if (rc < 0) + RETURN(rc); + + /* gss_sign_msg() msg might take long time to finish, in which period + * more rpcs could be wrapped up and sent out. if we found too many + * of them we should repack this rpc, because sent it too late might + * lead to the sequence number fall behind the window on server and + * be dropped. also applies to gss_cli_ctx_seal(). + * + * Note: null mode doesn't check sequence number. */ + if (svc != SPTLRPC_SVC_NULL && + atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) { + int behind = atomic_read(&gctx->gc_seq) - seq; + + gss_stat_oos_record_cli(behind); + CWARN("req %p: %u behind, retry signing\n", req, behind); + goto redo; + } + + req->rq_reqdata_len = rc; + RETURN(0); } static @@ -943,7 +935,7 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx, ghdr->gh_flags |= LUSTRE_GSS_PACK_USER; redo: - ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq); + ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq); /* buffer objects */ hdrobj.len = PTLRPC_GSS_HEADER_SIZE; @@ -961,29 +953,29 @@ redo: } LASSERT(token.len <= buflens[1]); - /* see explain in gss_cli_ctx_sign() */ - if (unlikely(cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq > - GSS_SEQ_REPACK_THRESHOLD)) { - int behind = cfs_atomic_read(&gctx->gc_seq) - ghdr->gh_seq; + /* see explain in gss_cli_ctx_sign() */ + if (unlikely(atomic_read(&gctx->gc_seq) - ghdr->gh_seq > + GSS_SEQ_REPACK_THRESHOLD)) { + int behind = atomic_read(&gctx->gc_seq) - ghdr->gh_seq; - gss_stat_oos_record_cli(behind); - CWARN("req %p: %u behind, retry sealing\n", req, behind); + gss_stat_oos_record_cli(behind); + CWARN("req %p: %u behind, retry sealing\n", req, behind); - ghdr->gh_seq = cfs_atomic_inc_return(&gctx->gc_seq); - goto redo; - } + ghdr->gh_seq = atomic_inc_return(&gctx->gc_seq); + goto redo; + } - /* now set the final wire data length */ - req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0); - RETURN(0); + /* now set the final wire data length */ + req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0); + RETURN(0); err_free: - if (!req->rq_pool) { - OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); - req->rq_reqbuf = NULL; - req->rq_reqbuf_len = 0; - } - RETURN(rc); + if (!req->rq_pool) { + OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); + req->rq_reqbuf = NULL; + req->rq_reqbuf_len = 0; + } + RETURN(rc); } int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx, @@ -1120,19 +1112,19 @@ int gss_sec_create_common(struct gss_sec *gsec, return -EOPNOTSUPP; } - cfs_spin_lock_init(&gsec->gs_lock); + spin_lock_init(&gsec->gs_lock); gsec->gs_rvs_hdl = 0ULL; - /* initialize upper ptlrpc_sec */ - sec = &gsec->gs_base; - sec->ps_policy = policy; - cfs_atomic_set(&sec->ps_refcount, 0); - cfs_atomic_set(&sec->ps_nctx, 0); - sec->ps_id = sptlrpc_get_next_secid(); - sec->ps_flvr = *sf; - sec->ps_import = class_import_get(imp); - cfs_spin_lock_init(&sec->ps_lock); - CFS_INIT_LIST_HEAD(&sec->ps_gc_list); + /* initialize upper ptlrpc_sec */ + sec = &gsec->gs_base; + sec->ps_policy = policy; + atomic_set(&sec->ps_refcount, 0); + atomic_set(&sec->ps_nctx, 0); + sec->ps_id = sptlrpc_get_next_secid(); + sec->ps_flvr = *sf; + sec->ps_import = class_import_get(imp); + spin_lock_init(&sec->ps_lock); + INIT_LIST_HEAD(&sec->ps_gc_list); if (!svcctx) { sec->ps_gc_interval = GSS_GC_INTERVAL; @@ -1153,29 +1145,29 @@ int gss_sec_create_common(struct gss_sec *gsec, void gss_sec_destroy_common(struct gss_sec *gsec) { - struct ptlrpc_sec *sec = &gsec->gs_base; - ENTRY; + struct ptlrpc_sec *sec = &gsec->gs_base; + ENTRY; - LASSERT(sec->ps_import); - LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0); - LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0); + LASSERT(sec->ps_import); + LASSERT(atomic_read(&sec->ps_refcount) == 0); + LASSERT(atomic_read(&sec->ps_nctx) == 0); - if (gsec->gs_mech) { - lgss_mech_put(gsec->gs_mech); - gsec->gs_mech = NULL; - } + if (gsec->gs_mech) { + lgss_mech_put(gsec->gs_mech); + gsec->gs_mech = NULL; + } - class_import_put(sec->ps_import); + class_import_put(sec->ps_import); - if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV) - sptlrpc_enc_pool_del_user(); + if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV) + sptlrpc_enc_pool_del_user(); - EXIT; + EXIT; } void gss_sec_kill(struct ptlrpc_sec *sec) { - sec->ps_dying = 1; + sec->ps_dying = 1; } int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, @@ -1183,31 +1175,31 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, struct ptlrpc_ctx_ops *ctxops, struct vfs_cred *vcred) { - struct gss_cli_ctx *gctx = ctx2gctx(ctx); - - gctx->gc_win = 0; - cfs_atomic_set(&gctx->gc_seq, 0); - - CFS_INIT_HLIST_NODE(&ctx->cc_cache); - cfs_atomic_set(&ctx->cc_refcount, 0); - ctx->cc_sec = sec; - ctx->cc_ops = ctxops; - ctx->cc_expire = 0; - ctx->cc_flags = PTLRPC_CTX_NEW; - ctx->cc_vcred = *vcred; - cfs_spin_lock_init(&ctx->cc_lock); - CFS_INIT_LIST_HEAD(&ctx->cc_req_list); - CFS_INIT_LIST_HEAD(&ctx->cc_gc_chain); - - /* take a ref on belonging sec, balanced in ctx destroying */ - cfs_atomic_inc(&sec->ps_refcount); - /* statistic only */ - cfs_atomic_inc(&sec->ps_nctx); - - CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n", - sec->ps_policy->sp_name, ctx->cc_sec, - ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); - return 0; + struct gss_cli_ctx *gctx = ctx2gctx(ctx); + + gctx->gc_win = 0; + atomic_set(&gctx->gc_seq, 0); + + INIT_HLIST_NODE(&ctx->cc_cache); + atomic_set(&ctx->cc_refcount, 0); + ctx->cc_sec = sec; + ctx->cc_ops = ctxops; + ctx->cc_expire = 0; + ctx->cc_flags = PTLRPC_CTX_NEW; + ctx->cc_vcred = *vcred; + spin_lock_init(&ctx->cc_lock); + INIT_LIST_HEAD(&ctx->cc_req_list); + INIT_LIST_HEAD(&ctx->cc_gc_chain); + + /* take a ref on belonging sec, balanced in ctx destroying */ + atomic_inc(&sec->ps_refcount); + /* statistic only */ + atomic_inc(&sec->ps_nctx); + + CDEBUG(D_SEC, "%s@%p: create ctx %p(%u->%s)\n", + sec->ps_policy->sp_name, ctx->cc_sec, + ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); + return 0; } /* @@ -1218,44 +1210,44 @@ int gss_cli_ctx_init_common(struct ptlrpc_sec *sec, int gss_cli_ctx_fini_common(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx) { - struct gss_cli_ctx *gctx = ctx2gctx(ctx); + struct gss_cli_ctx *gctx = ctx2gctx(ctx); - LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0); - LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0); - LASSERT(ctx->cc_sec == sec); + LASSERT(atomic_read(&sec->ps_nctx) > 0); + LASSERT(atomic_read(&ctx->cc_refcount) == 0); + LASSERT(ctx->cc_sec == sec); - /* - * remove UPTODATE flag of reverse ctx thus we won't send fini rpc, - * this is to avoid potential problems of client side reverse svc ctx - * be mis-destroyed in various recovery senarios. anyway client can - * manage its reverse ctx well by associating it with its buddy ctx. - */ - if (sec_is_reverse(sec)) - ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE; + /* + * remove UPTODATE flag of reverse ctx thus we won't send fini rpc, + * this is to avoid potential problems of client side reverse svc ctx + * be mis-destroyed in various recovery senarios. anyway client can + * manage its reverse ctx well by associating it with its buddy ctx. + */ + if (sec_is_reverse(sec)) + ctx->cc_flags &= ~PTLRPC_CTX_UPTODATE; - if (gctx->gc_mechctx) { - /* the final context fini rpc will use this ctx too, and it's - * asynchronous which finished by request_out_callback(). so - * we add refcount, whoever drop finally drop the refcount to - * 0 should responsible for the rest of destroy. */ - cfs_atomic_inc(&ctx->cc_refcount); + if (gctx->gc_mechctx) { + /* the final context fini rpc will use this ctx too, and it's + * asynchronous which finished by request_out_callback(). so + * we add refcount, whoever drop finally drop the refcount to + * 0 should responsible for the rest of destroy. */ + atomic_inc(&ctx->cc_refcount); - gss_do_ctx_fini_rpc(gctx); - gss_cli_ctx_finalize(gctx); + gss_do_ctx_fini_rpc(gctx); + gss_cli_ctx_finalize(gctx); - if (!cfs_atomic_dec_and_test(&ctx->cc_refcount)) - return 1; - } + if (!atomic_dec_and_test(&ctx->cc_refcount)) + return 1; + } - if (sec_is_reverse(sec)) - CWARN("reverse sec %p: destroy ctx %p\n", - ctx->cc_sec, ctx); - else - CWARN("%s@%p: destroy ctx %p(%u->%s)\n", - sec->ps_policy->sp_name, ctx->cc_sec, - ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); + if (sec_is_reverse(sec)) + CWARN("reverse sec %p: destroy ctx %p\n", + ctx->cc_sec, ctx); + else + CWARN("%s@%p: destroy ctx %p(%u->%s)\n", + sec->ps_policy->sp_name, ctx->cc_sec, + ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); - return 0; + return 0; } static @@ -1705,12 +1697,24 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec, if (newbuf == NULL) RETURN(-ENOMEM); + /* Must lock this, so that otherwise unprotected change of + * rq_reqmsg is not racing with parallel processing of + * imp_replay_list traversing threads. See LU-3333 + * This is a bandaid at best, we really need to deal with this + * in request enlarging code before unpacking that's already + * there */ + if (req->rq_import) + spin_lock(&req->rq_import->imp_lock); + memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); req->rq_reqbuf = newbuf; req->rq_reqbuf_len = newbuf_size; req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 1, 0); + + if (req->rq_import) + spin_unlock(&req->rq_import->imp_lock); } /* do enlargement, from wrapper to embedded, from end to begin */ @@ -1771,6 +1775,8 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) { void *src, *dst; + if (req->rq_import) + spin_lock(&req->rq_import->imp_lock); /* move clear text backward. */ src = req->rq_clrbuf; dst = (char *) req->rq_reqbuf + newcipbuf_size; @@ -1780,6 +1786,9 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, req->rq_clrbuf = (struct lustre_msg *) dst; req->rq_clrbuf_len = newclrbuf_size; req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0); + + if (req->rq_import) + spin_unlock(&req->rq_import->imp_lock); } else { /* sadly we have to split out the clear buffer */ LASSERT(req->rq_reqbuf_len >= newcipbuf_size); @@ -1794,6 +1803,15 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, if (newclrbuf == NULL) RETURN(-ENOMEM); + /* Must lock this, so that otherwise unprotected change of + * rq_reqmsg is not racing with parallel processing of + * imp_replay_list traversing threads. See LU-3333 + * This is a bandaid at best, we really need to deal with this + * in request enlarging code before unpacking that's already + * there */ + if (req->rq_import) + spin_lock(&req->rq_import->imp_lock); + memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len); if (req->rq_reqbuf == NULL || @@ -1806,6 +1824,9 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, req->rq_clrbuf = newclrbuf; req->rq_clrbuf_len = newclrbuf_size; req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0); + + if (req->rq_import) + spin_unlock(&req->rq_import->imp_lock); } _sptlrpc_enlarge_msg_inplace(req->rq_clrbuf, 0, newmsg_size); @@ -1876,17 +1897,17 @@ void gss_svc_reqctx_free(struct gss_svc_reqctx *grctx) static inline void gss_svc_reqctx_addref(struct gss_svc_reqctx *grctx) { - LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0); - cfs_atomic_inc(&grctx->src_base.sc_refcount); + LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0); + atomic_inc(&grctx->src_base.sc_refcount); } static inline void gss_svc_reqctx_decref(struct gss_svc_reqctx *grctx) { - LASSERT(cfs_atomic_read(&grctx->src_base.sc_refcount) > 0); + LASSERT(atomic_read(&grctx->src_base.sc_refcount) > 0); - if (cfs_atomic_dec_and_test(&grctx->src_base.sc_refcount)) - gss_svc_reqctx_free(grctx); + if (atomic_dec_and_test(&grctx->src_base.sc_refcount)) + gss_svc_reqctx_free(grctx); } static @@ -1901,7 +1922,7 @@ int gss_svc_sign(struct ptlrpc_request *req, LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0)); - /* embedded lustre_msg might have been shrinked */ + /* embedded lustre_msg might have been shrunk */ if (req->rq_replen != rs->rs_repbuf->lm_buflens[1]) lustre_shrink_msg(rs->rs_repbuf, 1, req->rq_replen, 1); @@ -2381,10 +2402,10 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req) if (!grctx) RETURN(SECSVC_DROP); - grctx->src_base.sc_policy = sptlrpc_policy_get(policy); - cfs_atomic_set(&grctx->src_base.sc_refcount, 1); - req->rq_svc_ctx = &grctx->src_base; - gw = &grctx->src_wirectx; + grctx->src_base.sc_policy = sptlrpc_policy_get(policy); + atomic_set(&grctx->src_base.sc_refcount, 1); + req->rq_svc_ctx = &grctx->src_base; + gw = &grctx->src_wirectx; /* save wire context */ gw->gw_flags = ghdr->gh_flags; @@ -2628,7 +2649,7 @@ static int gss_svc_seal(struct ptlrpc_request *req, ENTRY; /* get clear data length. note embedded lustre_msg might - * have been shrinked */ + * have been shrunk */ if (req->rq_replen != lustre_msg_buflen(rs->rs_repbuf, 0)) msglen = lustre_shrink_msg(rs->rs_repbuf, 0, req->rq_replen, 1); else @@ -2774,8 +2795,8 @@ void gss_svc_free_rs(struct ptlrpc_reply_state *rs) void gss_svc_free_ctx(struct ptlrpc_svc_ctx *ctx) { - LASSERT(cfs_atomic_read(&ctx->sc_refcount) == 0); - gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx)); + LASSERT(atomic_read(&ctx->sc_refcount) == 0); + gss_svc_reqctx_free(gss_svc_ctx2reqctx(ctx)); } int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx, @@ -2791,21 +2812,21 @@ int gss_copy_rvc_cli_ctx(struct ptlrpc_cli_ctx *cli_ctx, cli_gctx->gc_proc = PTLRPC_GSS_PROC_DATA; cli_gctx->gc_win = GSS_SEQ_WIN; - /* The problem is the reverse ctx might get lost in some recovery - * situations, and the same svc_ctx will be used to re-create it. - * if there's callback be sentout before that, new reverse ctx start - * with sequence 0 will lead to future callback rpc be treated as - * replay. - * - * each reverse root ctx will record its latest sequence number on its - * buddy svcctx before be destroied, so here we continue use it. - */ - cfs_atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq); - - if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) { - CERROR("failed to dup svc handle\n"); - goto err_out; - } + /* The problem is the reverse ctx might get lost in some recovery + * situations, and the same svc_ctx will be used to re-create it. + * if there's callback be sentout before that, new reverse ctx start + * with sequence 0 will lead to future callback rpc be treated as + * replay. + * + * each reverse root ctx will record its latest sequence number on its + * buddy svcctx before be destroyed, so here we continue use it. + */ + atomic_set(&cli_gctx->gc_seq, svc_gctx->gsc_rvs_seq); + + if (gss_svc_upcall_dup_handle(&cli_gctx->gc_svc_handle, svc_gctx)) { + CERROR("failed to dup svc handle\n"); + goto err_out; + } if (lgss_copy_reverse_context(svc_gctx->gsc_mechctx, &mechctx) != GSS_S_COMPLETE) { @@ -2865,49 +2886,54 @@ int __init sptlrpc_gss_init(void) if (rc) goto out_cli_upcall; - rc = init_kerberos_module(); - if (rc) - goto out_svc_upcall; + rc = init_null_module(); + if (rc) + goto out_svc_upcall; - /* register policy after all other stuff be intialized, because it - * might be in used immediately after the registration. */ + rc = init_kerberos_module(); + if (rc) + goto out_null; - rc = gss_init_keyring(); - if (rc) - goto out_kerberos; + rc = init_sk_module(); + if (rc) + goto out_kerberos; -#ifdef HAVE_GSS_PIPEFS - rc = gss_init_pipefs(); - if (rc) - goto out_keyring; -#endif + /* register policy after all other stuff be initialized, because it + * might be in used immediately after the registration. */ - gss_init_at_reply_offset(); + rc = gss_init_keyring(); + if (rc) + goto out_sk; - return 0; + rc = gss_init_pipefs(); + if (rc) + goto out_keyring; -#ifdef HAVE_GSS_PIPEFS -out_keyring: - gss_exit_keyring(); -#endif + gss_init_at_reply_offset(); + return 0; + +out_keyring: + gss_exit_keyring(); +out_sk: + cleanup_sk_module(); out_kerberos: - cleanup_kerberos_module(); + cleanup_kerberos_module(); +out_null: + cleanup_null_module(); out_svc_upcall: - gss_exit_svc_upcall(); + gss_exit_svc_upcall(); out_cli_upcall: - gss_exit_cli_upcall(); + gss_exit_cli_upcall(); out_lproc: - gss_exit_lproc(); - return rc; + gss_exit_lproc(); + return rc; } static void __exit sptlrpc_gss_exit(void) { gss_exit_keyring(); -#ifdef HAVE_GSS_PIPEFS gss_exit_pipefs(); -#endif cleanup_kerberos_module(); gss_exit_svc_upcall(); gss_exit_cli_upcall();