From: ericm Date: Mon, 24 Sep 2007 21:04:25 +0000 (+0000) Subject: branch: HEAD X-Git-Tag: v1_7_0_51~695 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=f3670ae343977b5444b032a9ab341f1da8ce80a6;p=fs%2Flustre-release.git branch: HEAD - coding style fix - add items of ChangeLog b=11832 r=vitaly, tappro --- diff --git a/lustre/ChangeLog b/lustre/ChangeLog index 3b4da7f..a185b2d 100644 --- a/lustre/ChangeLog +++ b/lustre/ChangeLog @@ -12,6 +12,18 @@ * Recommended e2fsprogs version: 1.40.2-cfs1 * Note that reiserfs quotas are disabled on SLES 10 in this kernel. +Severity : enhancement +Bugzilla : 13641 +Description: light-weight GSS support +Details : Support krb5n and krb5a mode, which keep Kerberos 5 authentication + and reduce performance overhead. + +Severity : enhancement +Bugzilla : 11832 +Description: Linux keyring support +Details : Support using service of Linux keyring for Lustre GSS internal + context refresh/cache mechanism. + Severity : normal Bugzilla : 12186 Description: Fix errors in lfs documentation diff --git a/lustre/ptlrpc/gss/gss_cli_upcall.c b/lustre/ptlrpc/gss/gss_cli_upcall.c index e36a092..9a55329a 100644 --- a/lustre/ptlrpc/gss/gss_cli_upcall.c +++ b/lustre/ptlrpc/gss/gss_cli_upcall.c @@ -109,8 +109,7 @@ int ctx_init_pack_request(struct obd_import *imp, LBUG(); /* 3. reverse context handle. actually only needed by root user, - * but we send it anyway. - */ + * but we send it anyway. */ gsec = container_of(imp->imp_sec, struct gss_sec, gs_base); obj.len = sizeof(gsec->gs_rvs_hdl); obj.data = (__u8 *) &gsec->gs_rvs_hdl; @@ -292,8 +291,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count) * leave recovery decisions to general ptlrpc layer. * * FIXME maybe some other error code shouldn't be treated - * as timeout. - */ + * as timeout. */ param.status = rc; if (rc != -EACCES) param.status = -ETIMEDOUT; @@ -361,8 +359,7 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx) /* fix the user desc */ if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) { /* we rely the fact that this request is in AUTH mode, - * and user_desc at offset 2. - */ + * and user_desc at offset 2. */ pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud)); LASSERT(pud); pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid; diff --git a/lustre/ptlrpc/gss/gss_keyring.c b/lustre/ptlrpc/gss/gss_keyring.c index 5c6722c..02c45fe 100644 --- a/lustre/ptlrpc/gss/gss_keyring.c +++ b/lustre/ptlrpc/gss/gss_keyring.c @@ -303,15 +303,11 @@ int ctx_unlist_kr(struct ptlrpc_cli_ctx *ctx, int locked) struct ptlrpc_sec *sec = ctx->cc_sec; struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec); - /* - * if hashed bit has gone, leave the job to somebody who is doing it - */ + /* if hashed bit has gone, leave the job to somebody who is doing it */ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0) return 0; - /* - * drop ref inside spin lock to prevent race with other operations - */ + /* drop ref inside spin lock to prevent race with other operations */ spin_lock_if(&sec->ps_lock, !locked); if (gsec_kr->gsk_root_ctx == ctx) @@ -335,9 +331,8 @@ void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx) LASSERT(atomic_read(&key->usage) > 0); LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL); LASSERT(key->payload.data == NULL); - /* - * at this time context may or may not in list. - */ + + /* at this time context may or may not in list. */ key_get(key); atomic_inc(&ctx->cc_refcount); ctx2gctx_keyring(ctx)->gck_key = key; @@ -431,11 +426,9 @@ static void dispose_ctx_list_kr(struct hlist_head *freelist) hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) { hlist_del_init(&ctx->cc_cache); - /* - * we need to wakeup waiting reqs here. the context might + /* we need to wakeup waiting reqs here. the context might * be forced released before upcall finished, then the - * late-arrived downcall can't find the ctx even. - */ + * late-arrived downcall can't find the ctx even. */ sptlrpc_cli_ctx_wakeup(ctx); unbind_ctx_kr(ctx); @@ -460,11 +453,10 @@ struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec) if (ctx == NULL && unlikely(sec_is_reverse(sec))) { struct hlist_node *node; struct ptlrpc_cli_ctx *tmp; - /* - * reverse ctx, search root ctx in list, choose the one + + /* reverse ctx, search root ctx in list, choose the one * with shortest expire time, which is most possibly have - * an established peer ctx at client side. - */ + * an established peer ctx at client side. */ hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) { if (ctx == NULL || ctx->cc_expire == 0 || ctx->cc_expire > tmp->cc_expire) { @@ -516,8 +508,7 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec, /* if there's root_ctx there, instead obsolete the current * immediately, we leave it continue operating for a little while. * hopefully when the first backward rpc with newest ctx send out, - * the client side already have the peer ctx well established. - */ + * the client side already have the peer ctx well established. */ ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1); if (key) @@ -602,8 +593,7 @@ int user_is_root(struct ptlrpc_sec *sec, struct vfs_cred *vcred) /* FIXME * more precisely deal with setuid. maybe add more infomation - * into vfs_cred ?? - */ + * into vfs_cred ?? */ return (vcred->vc_uid == 0); } @@ -670,9 +660,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, is_root = user_is_root(sec, vcred); - /* - * a little bit optimization for root context - */ + /* a little bit optimization for root context */ if (is_root) { ctx = sec_lookup_root_ctx_kr(sec); /* @@ -685,11 +673,9 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, LASSERT(create != 0); - /* - * for root context, obtain lock and check again, this time hold + /* for root context, obtain lock and check again, this time hold * the root upcall lock, make sure nobody else populated new root - * context after last check. - */ + * context after last check. */ if (is_root) { mutex_lock(&gsec_kr->gsk_root_uc_lock); @@ -705,9 +691,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid); - /* - * callout info: mech:flags:svc_type:peer_nid:target_uuid - */ + /* callout info: mech:flags:svc_type:peer_nid:target_uuid */ OBD_ALLOC(coinfo, coinfo_size); if (coinfo == NULL) goto out; @@ -728,12 +712,10 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, goto out; } - /* - * once payload.data was pointed to a ctx, it never changes until + /* once payload.data was pointed to a ctx, it never changes until * we de-associate them; but parallel request_key() may return * a key with payload.data == NULL at the same time. so we still - * need wirtelock of key->sem to serialize them. - */ + * need wirtelock of key->sem to serialize them. */ down_write(&key->sem); if (likely(key->payload.data != NULL)) { @@ -744,13 +726,12 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, LASSERT(atomic_read(&key->usage) >= 2); /* simply take a ref and return. it's upper layer's - * responsibility to detect & replace dead ctx. - */ + * responsibility to detect & replace dead ctx. */ atomic_inc(&ctx->cc_refcount); } else { /* pre initialization with a cli_ctx. this can't be done in - * key_instantiate() because we'v no enough information there. - */ + * key_instantiate() because we'v no enough information + * there. */ ctx = ctx_create_kr(sec, vcred); if (ctx != NULL) { ctx_enlist_kr(ctx, is_root, 0); @@ -761,10 +742,8 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec, CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n", key, ctx, sec); } else { - /* - * we'd prefer to call key_revoke(), but we more like - * to revoke it within this key->sem locked period. - */ + /* we'd prefer to call key_revoke(), but we more like + * to revoke it within this key->sem locked period. */ key_revoke_locked(key); } @@ -790,9 +769,9 @@ void gss_sec_release_ctx_kr(struct ptlrpc_sec *sec, { LASSERT(atomic_read(&ctx->cc_refcount) == 0); - if (sync) + if (sync) { ctx_destroy_kr(ctx); - else { + } else { atomic_inc(&ctx->cc_refcount); sptlrpc_gc_add_ctx(ctx); } @@ -820,8 +799,7 @@ void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec, construct_key_desc(desc, sizeof(desc), sec, uid); /* there should be only one valid key, but we put it in the - * loop in case of any weird cases - */ + * loop in case of any weird cases */ for (;;) { key = request_key(&gss_key_type, desc, NULL); if (IS_ERR(key)) { @@ -835,8 +813,7 @@ void flush_user_ctx_cache_kr(struct ptlrpc_sec *sec, /* kill_key_locked() should usually revoke the key, but we * revoke it again to make sure, e.g. some case the key may - * not well coupled with a context. - */ + * not well coupled with a context. */ key_revoke_locked(key); up_write(&key->sem); @@ -870,8 +847,7 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, continue; /* at this moment there's at least 2 base reference: - * key association and in-list. - */ + * key association and in-list. */ if (atomic_read(&ctx->cc_refcount) > 2) { if (!force) continue; @@ -1227,13 +1203,11 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) RETURN(-EINVAL); } - /* - * there's a race between userspace parent - child processes. if + /* there's a race between userspace parent - child processes. if * child finish negotiation too fast and call kt_update(), the ctx * might be still NULL. but the key will finally be associate * with a context, or be revoked. if key status is fine, return - * -EAGAIN to allow userspace sleep a while and call again. - */ + * -EAGAIN to allow userspace sleep a while and call again. */ if (ctx == NULL) { CWARN("race in userspace. key %p(%x) flags %lx\n", key, key->serial, key->flags); @@ -1259,10 +1233,10 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) sptlrpc_cli_ctx_get(ctx); gctx = ctx2gctx(ctx); - rc = -EFAULT; - if (buffer_extract_bytes(&data, &datalen, - &gctx->gc_win, sizeof(gctx->gc_win))) { + rc = buffer_extract_bytes(&data, &datalen, &gctx->gc_win, + sizeof(gctx->gc_win)); + if (rc) { CERROR("failed extract seq_win\n"); goto out; } @@ -1270,14 +1244,16 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) if (gctx->gc_win == 0) { __u32 nego_rpc_err, nego_gss_err; - if (buffer_extract_bytes(&data, &datalen, - &nego_rpc_err, sizeof(nego_rpc_err))) { + rc = buffer_extract_bytes(&data, &datalen, &nego_rpc_err, + sizeof(nego_rpc_err)); + if (rc) { CERROR("failed to extrace rpc rc\n"); goto out; } - if (buffer_extract_bytes(&data, &datalen, - &nego_gss_err, sizeof(nego_gss_err))) { + rc = buffer_extract_bytes(&data, &datalen, &nego_gss_err, + sizeof(nego_gss_err)); + if (rc) { CERROR("failed to extrace gss rc\n"); goto out; } @@ -1285,42 +1261,38 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen) CERROR("negotiation: rpc err %d, gss err %x\n", nego_rpc_err, nego_gss_err); - if (nego_rpc_err) - rc = nego_rpc_err; + rc = nego_rpc_err ? nego_rpc_err : -EACCES; } else { - if (rawobj_extract_local_alloc(&gctx->gc_handle, - (__u32 **)&data, &datalen)) { + rc = rawobj_extract_local_alloc(&gctx->gc_handle, + (__u32 **) &data, &datalen); + if (rc) { CERROR("failed extract handle\n"); goto out; } - if (rawobj_extract_local(&tmpobj, (__u32 **)&data, &datalen)) { + rc = rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen); + if (rc) { CERROR("failed extract mech\n"); goto out; } - if (lgss_import_sec_context(&tmpobj, - sec2gsec(ctx->cc_sec)->gs_mech, - &gctx->gc_mechctx) != - GSS_S_COMPLETE) { + rc = lgss_import_sec_context(&tmpobj, + sec2gsec(ctx->cc_sec)->gs_mech, + &gctx->gc_mechctx); + if (rc != GSS_S_COMPLETE) CERROR("failed import context\n"); - goto out; - } - - rc = 0; + else + rc = 0; } out: /* we don't care what current status of this ctx, even someone else * is operating on the ctx at the same time. we just add up our own - * opinions here. - */ + * opinions here. */ if (rc == 0) { gss_cli_ctx_uptodate(gctx); } else { - /* - * this will also revoke the key. has to be done before - * wakeup waiters otherwise they can find the stale key - */ + /* this will also revoke the key. has to be done before + * wakeup waiters otherwise they can find the stale key */ kill_key_locked(key); cli_ctx_expire(ctx); diff --git a/lustre/ptlrpc/gss/gss_krb5_mech.c b/lustre/ptlrpc/gss/gss_krb5_mech.c index 2e03843..50dcace 100644 --- a/lustre/ptlrpc/gss/gss_krb5_mech.c +++ b/lustre/ptlrpc/gss/gss_krb5_mech.c @@ -902,8 +902,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx, get_random_bytes(conf, ke->ke_conf_size); /* get encryption blocksize. note kc_keye might not associated with - * a tfm, currently only for arcfour-hmac - */ + * a tfm, currently only for arcfour-hmac */ if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) { LASSERT(kctx->kc_keye.kb_tfm == NULL); blocksize = 1; diff --git a/lustre/ptlrpc/gss/gss_pipefs.c b/lustre/ptlrpc/gss/gss_pipefs.c index ecbe43f..8cc7aea 100644 --- a/lustre/ptlrpc/gss/gss_pipefs.c +++ b/lustre/ptlrpc/gss/gss_pipefs.c @@ -100,13 +100,15 @@ struct ptlrpc_cli_ctx *ctx_create_pf(struct ptlrpc_sec *sec, struct vfs_cred *vcred) { struct gss_cli_ctx *gctx; + int rc; OBD_ALLOC_PTR(gctx); if (gctx == NULL) return NULL; - if (gss_cli_ctx_init_common(sec, &gctx->gc_base, &gss_pipefs_ctxops, - vcred)) { + rc = gss_cli_ctx_init_common(sec, &gctx->gc_base, + &gss_pipefs_ctxops, vcred); + if (rc) { OBD_FREE_PTR(gctx); return NULL; } @@ -154,8 +156,9 @@ void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist) if (atomic_dec_and_test(&ctx->cc_refcount)) { __hlist_del(&ctx->cc_cache); hlist_add_head(&ctx->cc_cache, freelist); - } else + } else { hlist_del_init(&ctx->cc_cache); + } } /* @@ -519,8 +522,7 @@ void gss_sec_release_ctx_pf(struct ptlrpc_sec *sec, LASSERT(hlist_unhashed(&ctx->cc_cache)); /* if required async, we must clear the UPTODATE bit to prevent extra - * rpcs during destroy procedure. - */ + * rpcs during destroy procedure. */ if (!sync) clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); @@ -939,8 +941,7 @@ out_free: OBD_FREE(buf, mlen); /* FIXME * hack pipefs: always return asked length unless all following - * downcalls might be messed up. - */ + * downcalls might be messed up. */ rc = mlen; RETURN(rc); } @@ -1085,8 +1086,7 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx) sizeof(gmsg->gum_data.gum_obd)); /* This only could happen when sysadmin set it dead/expired - * using lctl by force. - */ + * using lctl by force. */ if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) { CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n", ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec), @@ -1120,8 +1120,7 @@ static int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx) { /* if we are refreshing for root, also update the reverse - * handle index, do not confuse reverse contexts. - */ + * handle index, do not confuse reverse contexts. */ if (ctx->cc_vcred.vc_uid == 0) { struct gss_sec *gsec; @@ -1194,11 +1193,9 @@ int __init gss_init_pipefs_upcall(void) CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de)); return PTR_ERR(de); } - /* FIXME - * hack pipefs: dput will sometimes cause oops during module unload - * and lgssd close the pipe fds. - */ - //dput(de); + + /* FIXME hack pipefs: dput will sometimes cause oops during module + * unload and lgssd close the pipe fds. */ /* krb5 mechanism */ de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops, @@ -1224,10 +1221,8 @@ void __exit gss_exit_pipefs_upcall(void) for (i = 0; i < MECH_MAX; i++) { LASSERT(list_empty(&upcall_lists[i])); - /* - * dput pipe dentry here might cause lgssd oops. - */ - //dput(de_pipes[i]); + + /* dput pipe dentry here might cause lgssd oops. */ de_pipes[i] = NULL; } diff --git a/lustre/ptlrpc/gss/gss_svc_upcall.c b/lustre/ptlrpc/gss/gss_svc_upcall.c index ca32e4e..76c6b4d 100644 --- a/lustre/ptlrpc/gss/gss_svc_upcall.c +++ b/lustre/ptlrpc/gss/gss_svc_upcall.c @@ -497,8 +497,7 @@ int rsc_parse(struct cache_detail *cd, char *mesg, int mlen) } /* currently the expiry time passed down from user-space - * is invalid, here we retrive it from mech. - */ + * is invalid, here we retrive it from mech. */ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) { CERROR("unable to get expire time, drop it\n"); lgss_mech_put(gm); @@ -728,8 +727,7 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req, cache_check: /* Note each time cache_check() will drop a reference if return * non-zero. We hold an extra reference on initial rsip, but must - * take care of following calls. - */ + * take care of following calls. */ rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle); switch (rc) { case -EAGAIN: { @@ -758,8 +756,7 @@ cache_check: break; case 0: /* if not the first check, we have to release the extra - * reference we just added on it. - */ + * reference we just added on it. */ if (!first_check) cache_put(&rsip->h, &rsi_cache); CDEBUG(D_SEC, "cache_check is good\n"); @@ -837,8 +834,7 @@ cache_check: out: /* it looks like here we should put rsip also, but this mess up - * with NFS cache mgmt code... FIXME - */ + * with NFS cache mgmt code... FIXME */ #if 0 if (rsip) rsi_put(&rsip->h, &rsi_cache); @@ -894,8 +890,7 @@ int __init gss_init_svc_upcall(void) * the init upcall channel, otherwise there's big chance that the first * upcall issued before the channel be opened thus nfsv4 cache code will * drop the request direclty, thus lead to unnecessary recovery time. - * here we wait at miximum 1.5 seconds. - */ + * here we wait at miximum 1.5 seconds. */ for (i = 0; i < 6; i++) { if (atomic_read(&rsi_cache.readers) > 0) break; @@ -908,12 +903,10 @@ int __init gss_init_svc_upcall(void) CWARN("Init channel is not opened by lsvcgssd, following " "request might be dropped until lsvcgssd is active\n"); - /* - * this helps reducing context index confliction. after server reboot, + /* this helps reducing context index confliction. after server reboot, * conflicting request from clients might be filtered out by initial * sequence number checking, thus no chance to sent error notification - * back to clients. - */ + * back to clients. */ get_random_bytes(&__ctx_index, sizeof(__ctx_index)); return 0; diff --git a/lustre/ptlrpc/gss/sec_gss.c b/lustre/ptlrpc/gss/sec_gss.c index 2982fd9..11d2478 100644 --- a/lustre/ptlrpc/gss/sec_gss.c +++ b/lustre/ptlrpc/gss/sec_gss.c @@ -157,8 +157,7 @@ int gss_estimate_payload(struct gss_ctx *mechctx, int msgsize, int privacy) { if (privacy) { /* we suppose max cipher block size is 16 bytes. here we - * add 16 for confounder and 16 for padding. - */ + * add 16 for confounder and 16 for padding. */ return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16; } else { return GSS_KRB5_INTEG_MAX_PAYLOAD; @@ -362,8 +361,7 @@ int cli_ctx_check_death(struct ptlrpc_cli_ctx *ctx) return 1; /* expire is 0 means never expire. a newly created gss context - * which during upcall may has 0 expiration - */ + * which during upcall may has 0 expiration */ if (ctx->cc_expire == 0) return 0; @@ -392,8 +390,7 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx) /* At this point this ctx might have been marked as dead by * someone else, in which case nobody will make further use * of it. we don't care, and mark it UPTODATE will help - * destroying server side context when it be destroied. - */ + * destroying server side context when it be destroied. */ set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags); if (sec_is_reverse(ctx->cc_sec)) @@ -407,10 +404,8 @@ void gss_cli_ctx_uptodate(struct gss_cli_ctx *gctx) sec2target_str(ctx->cc_sec), ctx->cc_expire, ctx->cc_expire - cfs_time_current_sec()); - /* - * install reverse svc ctx, but only for forward connection - * and root context - */ + /* install reverse svc ctx, but only for forward connection + * and root context */ if (!sec_is_reverse(ctx->cc_sec) && ctx->cc_vcred.vc_uid == 0) { gss_sec_install_rctx(ctx->cc_sec->ps_import, ctx->cc_sec, ctx); @@ -645,8 +640,7 @@ redo: * lead to the sequence number fall behind the window on server and * be dropped. also applies to gss_cli_ctx_seal(). * - * Note: null mode dosen't check sequence number. - */ + * Note: null mode dosen't check sequence number. */ if (svc != SPTLRPC_SVC_NULL && atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) { int behind = atomic_read(&gctx->gc_seq) - seq; @@ -684,8 +678,7 @@ int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx, * returned in this case. * * but in any cases, don't resend ctx destroying rpc, don't resend - * reverse rpc. - */ + * reverse rpc. */ if (req->rq_ctx_fini) { CWARN("server respond error (%08x/%08x) for ctx fini\n", errhdr->gh_major, errhdr->gh_minor); @@ -704,12 +697,11 @@ int gss_cli_ctx_handle_err_notify(struct ptlrpc_cli_ctx *ctx, "NO_CONTEXT" : "BAD_SIG"); sptlrpc_cli_ctx_expire(ctx); - /* - * we need replace the ctx right here, otherwise during + + /* we need replace the ctx right here, otherwise during * resent we'll hit the logic in sptlrpc_req_refresh_ctx() * which keep the ctx with RESEND flag, thus we'll never - * get rid of this ctx. - */ + * get rid of this ctx. */ rc = sptlrpc_req_replace_dead_ctx(req); if (rc == 0) req->rq_resend = 1; @@ -739,8 +731,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx, gctx = container_of(ctx, struct gss_cli_ctx, gc_base); /* special case for context negotiation, rq_repmsg/rq_replen actually - * are not used currently. - */ + * are not used currently. */ if (req->rq_ctx_init) { req->rq_repmsg = lustre_msg_buf(msg, 1, 0); req->rq_replen = msg->lm_buflens[1]; @@ -1293,8 +1284,7 @@ int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec, memset(req->rq_reqbuf, 0, req->rq_reqbuf_len); /* if the pre-allocated buffer is big enough, we just pack - * both clear buf & request buf in it, to avoid more alloc. - */ + * both clear buf & request buf in it, to avoid more alloc. */ if (clearsize + wiresize <= req->rq_reqbuf_len) { req->rq_clrbuf = (void *) (((char *) req->rq_reqbuf) + wiresize); @@ -1664,18 +1654,14 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1); newcipbuf_size = lustre_msg_size_v2(3, buflens); - /* - * handle the case that we put both clear buf and cipher buf into - * pre-allocated single buffer. - */ + /* handle the case that we put both clear buf and cipher buf into + * pre-allocated single buffer. */ if (unlikely(req->rq_pool) && req->rq_clrbuf >= req->rq_reqbuf && (char *) req->rq_clrbuf < (char *) req->rq_reqbuf + req->rq_reqbuf_len) { - /* - * it couldn't be better we still fit into the - * pre-allocated buffer. - */ + /* it couldn't be better we still fit into the + * pre-allocated buffer. */ if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) { void *src, *dst; @@ -1689,9 +1675,7 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec, req->rq_clrbuf_len = newclrbuf_size; req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0); } else { - /* - * sadly we have to split out the clear buffer - */ + /* sadly we have to split out the clear buffer */ LASSERT(req->rq_reqbuf_len >= newcipbuf_size); LASSERT(req->rq_clrbuf_len < newclrbuf_size); } @@ -1907,8 +1891,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req, seclen -= 4; /* extract target uuid, note this code is somewhat fragile - * because touched internal structure of obd_uuid - */ + * because touched internal structure of obd_uuid */ if (rawobj_extract(&uuid_obj, &secdata, &seclen)) { CERROR("failed to extract target uuid\n"); RETURN(SECSVC_DROP); @@ -2151,10 +2134,8 @@ int gss_svc_handle_data(struct ptlrpc_request *req, gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid, libcfs_nid2str(req->rq_peer.nid)); error: - /* - * we only notify client in case of NO_CONTEXT/BAD_SIG, which - * might happen after server reboot, to allow recovery. - */ + /* we only notify client in case of NO_CONTEXT/BAD_SIG, which + * might happen after server reboot, to allow recovery. */ if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) && gss_pack_err_notify(req, major, 0) == 0) RETURN(SECSVC_COMPLETE); @@ -2485,10 +2466,8 @@ int gss_svc_seal(struct ptlrpc_request *req, } LASSERT(cipher_obj.len <= cipher_buflen); - /* - * we are about to override data at rs->rs_repbuf, nullify pointers - * to which to catch further illegal usage. - */ + /* we are about to override data at rs->rs_repbuf, nullify pointers + * to which to catch further illegal usage. */ grctx->src_repbsd = NULL; grctx->src_repbsd_size = 0; @@ -2657,10 +2636,8 @@ int __init sptlrpc_gss_init(void) if (rc) goto out_svc_upcall; - /* - * register policy after all other stuff be intialized, because it - * might be in used immediately after the registration. - */ + /* register policy after all other stuff be intialized, because it + * might be in used immediately after the registration. */ rc = gss_init_keyring(); if (rc) diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index ac281c9..6620bd0 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -195,10 +195,8 @@ static void enc_insert_pool(cfs_page_t ***pools, int npools, int npages) /* * (1) fill all the free slots of current pools. */ - /* - * free slots are those left by rent pages, and the extra ones with - * index >= eep_total_pages, locate at the tail of last pool. - */ + /* free slots are those left by rent pages, and the extra ones with + * index >= eep_total_pages, locate at the tail of last pool. */ freeslot = page_pools.epp_total_pages % PAGES_PER_POOL; if (freeslot != 0) freeslot = PAGES_PER_POOL - freeslot; @@ -394,14 +392,13 @@ again: if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen) page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen; - /* - * we just wait if someone else is adding more pages, or + + /* we just wait if someone else is adding more pages, or * wait queue length is not deep enough. otherwise try to * add more pages in the pools. * * FIXME the policy of detecting resource tight & growing pool - * need to be reconsidered. - */ + * need to be reconsidered. */ if (page_pools.epp_adding || page_pools.epp_waitqlen < 2 || page_pools.epp_full) { set_current_state(TASK_UNINTERRUPTIBLE); @@ -428,17 +425,15 @@ again: goto again; } - /* - * record max wait time - */ + + /* record max wait time */ if (unlikely(tick1 != 0)) { tick2 = cfs_time_current(); if (tick2 - tick1 > page_pools.epp_st_max_wait) page_pools.epp_st_max_wait = tick2 - tick1; } - /* - * proceed with rest of allocation - */ + + /* proceed with rest of allocation */ page_pools.epp_free_pages -= desc->bd_max_iov; p_idx = page_pools.epp_free_pages / PAGES_PER_POOL; @@ -929,8 +924,7 @@ int bulk_csum_cli_reply(struct ptlrpc_bulk_desc *desc, int read, } /* checksum mismatch, re-compute a new one and compare with - * others, give out proper warnings. - */ + * others, give out proper warnings. */ OBD_ALLOC(new, csum_size); if (new == NULL) return -ENOMEM; diff --git a/lustre/ptlrpc/sec_gc.c b/lustre/ptlrpc/sec_gc.c index 296b8eb..96ce6ef 100644 --- a/lustre/ptlrpc/sec_gc.c +++ b/lustre/ptlrpc/sec_gc.c @@ -105,9 +105,9 @@ static void sec_process_ctx_list(void) { struct ptlrpc_cli_ctx *ctx; -again: spin_lock(&sec_gc_ctx_list_lock); - if (!list_empty(&sec_gc_ctx_list)) { + + while (!list_empty(&sec_gc_ctx_list)) { ctx = list_entry(sec_gc_ctx_list.next, struct ptlrpc_cli_ctx, cc_gc_chain); list_del_init(&ctx->cc_gc_chain); @@ -119,8 +119,9 @@ again: ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec)); sptlrpc_cli_ctx_put(ctx, 1); - goto again; + spin_lock(&sec_gc_ctx_list_lock); } + spin_unlock(&sec_gc_ctx_list_lock); } @@ -168,10 +169,8 @@ static int sec_gc_main(void *arg) again: mutex_down(&sec_gc_mutex); list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) { - /* - * if someone is waiting to be deleted, let it - * proceed as soon as possible. - */ + /* if someone is waiting to be deleted, let it + * proceed as soon as possible. */ if (atomic_read(&sec_gc_wait_del)) { CWARN("deletion pending, retry\n"); mutex_up(&sec_gc_mutex); diff --git a/lustre/utils/gss/lgss_utils.c b/lustre/utils/gss/lgss_utils.c index 2f5f273..5e2672c 100644 --- a/lustre/utils/gss/lgss_utils.c +++ b/lustre/utils/gss/lgss_utils.c @@ -118,52 +118,64 @@ static struct lgss_mutex_s { [LGSS_MUTEX_KRB5] = { "keyring", 0x4292d473, 0 }, }; -int lgss_mutex_lock(lgss_mutex_id_t mid) +static int lgss_mutex_get(struct lgss_mutex_s *mutex) { - struct lgss_mutex_s *sem = &lgss_mutexes[mid]; - struct sembuf sembuf; - - lassert(mid < LGSS_MUTEX_MAX); + mutex->sem_id = semget(mutex->sem_key, 1, IPC_CREAT | IPC_EXCL | 0700); + if (mutex->sem_id != -1) { + if (semctl(mutex->sem_id, 0, SETVAL, 1) == -1) { + logmsg(LL_ERR, "initialize sem %x: %s\n", + mutex->sem_key, strerror(errno)); + return -1; + } - logmsg(LL_TRACE, "locking mutex %x for %s\n", - sem->sem_key, sem->sem_name); -again: - sem->sem_id = semget(sem->sem_key, 1, IPC_CREAT | IPC_EXCL | 0700); - if (sem->sem_id == -1) { + logmsg(LL_DEBUG, "created & initialized sem %x id %d for %s\n", + mutex->sem_key, mutex->sem_id, mutex->sem_name); + } else { if (errno != EEXIST) { logmsg(LL_ERR, "create sem %x: %s\n", - sem->sem_key, strerror(errno)); + mutex->sem_key, strerror(errno)); return -1; } - /* already exist. Note there's still a small window of racing - * with other processes, due to the stupid semaphore semantics. - */ - sem->sem_id = semget(sem->sem_key, 0, 0700); - if (sem->sem_id == -1) { + /* already created by someone else, simply get it. + * Note there's still a small window of racing between create + * and initialize, a flaw in semaphore semantics */ + mutex->sem_id = semget(mutex->sem_key, 0, 0700); + if (mutex->sem_id == -1) { if (errno == ENOENT) { logmsg(LL_WARN, "sem %x just disappeared " - "under us, try again\n", sem->sem_key); - goto again; + "under us, try again\n", mutex->sem_key); + return 1; } - logmsg(LL_ERR, "get sem %x: %s\n", sem->sem_key, + logmsg(LL_ERR, "get sem %x: %s\n", mutex->sem_key, strerror(errno)); return -1; } - } else { - int val = 1; - logmsg(LL_DEBUG, "created sem %x for %s, initialize to 1\n", - sem->sem_key, sem->sem_name); - if (semctl(sem->sem_id, 0, SETVAL, val) == -1) { - logmsg(LL_ERR, "initialize sem %x: %s\n", - sem->sem_key, strerror(errno)); - return -1; - } + logmsg(LL_TRACE, "got sem %x id %d for %s\n", + mutex->sem_key, mutex->sem_id, mutex->sem_name); } - logmsg(LL_TRACE, "got sem %x id %d for %s\n", - sem->sem_key, sem->sem_id, sem->sem_name); + + return 0; +} + +int lgss_mutex_lock(lgss_mutex_id_t mid) +{ + struct lgss_mutex_s *sem = &lgss_mutexes[mid]; + struct sembuf sembuf; + int rc; + + lassert(mid < LGSS_MUTEX_MAX); + + logmsg(LL_TRACE, "locking mutex %x for %s\n", + sem->sem_key, sem->sem_name); + + do { + rc = lgss_mutex_get(sem); + if (rc < 0) + return rc; + } while (rc); sembuf.sem_num = 0; sembuf.sem_op = -1;