* Recommended e2fsprogs version: 1.40.2-cfs1
* Note that reiserfs quotas are disabled on SLES 10 in this kernel.
+Severity : enhancement
+Bugzilla : 13641
+Description: light-weight GSS support
+Details : Support krb5n and krb5a mode, which keep Kerberos 5 authentication
+ and reduce performance overhead.
+
+Severity : enhancement
+Bugzilla : 11832
+Description: Linux keyring support
+Details : Support using service of Linux keyring for Lustre GSS internal
+ context refresh/cache mechanism.
+
Severity : normal
Bugzilla : 12186
Description: Fix errors in lfs documentation
LBUG();
/* 3. reverse context handle. actually only needed by root user,
- * but we send it anyway.
- */
+ * but we send it anyway. */
gsec = container_of(imp->imp_sec, struct gss_sec, gs_base);
obj.len = sizeof(gsec->gs_rvs_hdl);
obj.data = (__u8 *) &gsec->gs_rvs_hdl;
* leave recovery decisions to general ptlrpc layer.
*
* FIXME maybe some other error code shouldn't be treated
- * as timeout.
- */
+ * as timeout. */
param.status = rc;
if (rc != -EACCES)
param.status = -ETIMEDOUT;
/* fix the user desc */
if (SEC_FLAVOR_HAS_USER(req->rq_sec_flavor)) {
/* we rely the fact that this request is in AUTH mode,
- * and user_desc at offset 2.
- */
+ * and user_desc at offset 2. */
pud = lustre_msg_buf(req->rq_reqbuf, 2, sizeof(*pud));
LASSERT(pud);
pud->pud_uid = pud->pud_fsuid = ctx->cc_vcred.vc_uid;
struct ptlrpc_sec *sec = ctx->cc_sec;
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- /*
- * if hashed bit has gone, leave the job to somebody who is doing it
- */
+ /* if hashed bit has gone, leave the job to somebody who is doing it */
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
- /*
- * drop ref inside spin lock to prevent race with other operations
- */
+ /* drop ref inside spin lock to prevent race with other operations */
spin_lock_if(&sec->ps_lock, !locked);
if (gsec_kr->gsk_root_ctx == ctx)
LASSERT(atomic_read(&key->usage) > 0);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
LASSERT(key->payload.data == NULL);
- /*
- * at this time context may or may not in list.
- */
+
+ /* at this time context may or may not in list. */
key_get(key);
atomic_inc(&ctx->cc_refcount);
ctx2gctx_keyring(ctx)->gck_key = key;
hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
hlist_del_init(&ctx->cc_cache);
- /*
- * we need to wakeup waiting reqs here. the context might
+ /* we need to wakeup waiting reqs here. the context might
* be forced released before upcall finished, then the
- * late-arrived downcall can't find the ctx even.
- */
+ * late-arrived downcall can't find the ctx even. */
sptlrpc_cli_ctx_wakeup(ctx);
unbind_ctx_kr(ctx);
if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
struct hlist_node *node;
struct ptlrpc_cli_ctx *tmp;
- /*
- * reverse ctx, search root ctx in list, choose the one
+
+ /* reverse ctx, search root ctx in list, choose the one
* with shortest expire time, which is most possibly have
- * an established peer ctx at client side.
- */
+ * an established peer ctx at client side. */
hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
if (ctx == NULL || ctx->cc_expire == 0 ||
ctx->cc_expire > tmp->cc_expire) {
/* if there's root_ctx there, instead obsolete the current
* immediately, we leave it continue operating for a little while.
* hopefully when the first backward rpc with newest ctx send out,
- * the client side already have the peer ctx well established.
- */
+ * the client side already have the peer ctx well established. */
ctx_enlist_kr(new_ctx, gsec_kr->gsk_root_ctx ? 0 : 1, 1);
if (key)
/* FIXME
* more precisely deal with setuid. maybe add more infomation
- * into vfs_cred ??
- */
+ * into vfs_cred ?? */
return (vcred->vc_uid == 0);
}
is_root = user_is_root(sec, vcred);
- /*
- * a little bit optimization for root context
- */
+ /* a little bit optimization for root context */
if (is_root) {
ctx = sec_lookup_root_ctx_kr(sec);
/*
LASSERT(create != 0);
- /*
- * for root context, obtain lock and check again, this time hold
+ /* for root context, obtain lock and check again, this time hold
* the root upcall lock, make sure nobody else populated new root
- * context after last check.
- */
+ * context after last check. */
if (is_root) {
mutex_lock(&gsec_kr->gsk_root_uc_lock);
construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
- /*
- * callout info: mech:flags:svc_type:peer_nid:target_uuid
- */
+ /* callout info: mech:flags:svc_type:peer_nid:target_uuid */
OBD_ALLOC(coinfo, coinfo_size);
if (coinfo == NULL)
goto out;
goto out;
}
- /*
- * once payload.data was pointed to a ctx, it never changes until
+ /* once payload.data was pointed to a ctx, it never changes until
* we de-associate them; but parallel request_key() may return
* a key with payload.data == NULL at the same time. so we still
- * need wirtelock of key->sem to serialize them.
- */
+ * need wirtelock of key->sem to serialize them. */
down_write(&key->sem);
if (likely(key->payload.data != NULL)) {
LASSERT(atomic_read(&key->usage) >= 2);
/* simply take a ref and return. it's upper layer's
- * responsibility to detect & replace dead ctx.
- */
+ * responsibility to detect & replace dead ctx. */
atomic_inc(&ctx->cc_refcount);
} else {
/* pre initialization with a cli_ctx. this can't be done in
- * key_instantiate() because we'v no enough information there.
- */
+ * key_instantiate() because we'v no enough information
+ * there. */
ctx = ctx_create_kr(sec, vcred);
if (ctx != NULL) {
ctx_enlist_kr(ctx, is_root, 0);
CDEBUG(D_SEC, "installed key %p <-> ctx %p (sec %p)\n",
key, ctx, sec);
} else {
- /*
- * we'd prefer to call key_revoke(), but we more like
- * to revoke it within this key->sem locked period.
- */
+ /* we'd prefer to call key_revoke(), but we more like
+ * to revoke it within this key->sem locked period. */
key_revoke_locked(key);
}
{
LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- if (sync)
+ if (sync) {
ctx_destroy_kr(ctx);
- else {
+ } else {
atomic_inc(&ctx->cc_refcount);
sptlrpc_gc_add_ctx(ctx);
}
construct_key_desc(desc, sizeof(desc), sec, uid);
/* there should be only one valid key, but we put it in the
- * loop in case of any weird cases
- */
+ * loop in case of any weird cases */
for (;;) {
key = request_key(&gss_key_type, desc, NULL);
if (IS_ERR(key)) {
/* kill_key_locked() should usually revoke the key, but we
* revoke it again to make sure, e.g. some case the key may
- * not well coupled with a context.
- */
+ * not well coupled with a context. */
key_revoke_locked(key);
up_write(&key->sem);
continue;
/* at this moment there's at least 2 base reference:
- * key association and in-list.
- */
+ * key association and in-list. */
if (atomic_read(&ctx->cc_refcount) > 2) {
if (!force)
continue;
RETURN(-EINVAL);
}
- /*
- * there's a race between userspace parent - child processes. if
+ /* there's a race between userspace parent - child processes. if
* child finish negotiation too fast and call kt_update(), the ctx
* might be still NULL. but the key will finally be associate
* with a context, or be revoked. if key status is fine, return
- * -EAGAIN to allow userspace sleep a while and call again.
- */
+ * -EAGAIN to allow userspace sleep a while and call again. */
if (ctx == NULL) {
CWARN("race in userspace. key %p(%x) flags %lx\n",
key, key->serial, key->flags);
sptlrpc_cli_ctx_get(ctx);
gctx = ctx2gctx(ctx);
- rc = -EFAULT;
- if (buffer_extract_bytes(&data, &datalen,
- &gctx->gc_win, sizeof(gctx->gc_win))) {
+ rc = buffer_extract_bytes(&data, &datalen, &gctx->gc_win,
+ sizeof(gctx->gc_win));
+ if (rc) {
CERROR("failed extract seq_win\n");
goto out;
}
if (gctx->gc_win == 0) {
__u32 nego_rpc_err, nego_gss_err;
- if (buffer_extract_bytes(&data, &datalen,
- &nego_rpc_err, sizeof(nego_rpc_err))) {
+ rc = buffer_extract_bytes(&data, &datalen, &nego_rpc_err,
+ sizeof(nego_rpc_err));
+ if (rc) {
CERROR("failed to extrace rpc rc\n");
goto out;
}
- if (buffer_extract_bytes(&data, &datalen,
- &nego_gss_err, sizeof(nego_gss_err))) {
+ rc = buffer_extract_bytes(&data, &datalen, &nego_gss_err,
+ sizeof(nego_gss_err));
+ if (rc) {
CERROR("failed to extrace gss rc\n");
goto out;
}
CERROR("negotiation: rpc err %d, gss err %x\n",
nego_rpc_err, nego_gss_err);
- if (nego_rpc_err)
- rc = nego_rpc_err;
+ rc = nego_rpc_err ? nego_rpc_err : -EACCES;
} else {
- if (rawobj_extract_local_alloc(&gctx->gc_handle,
- (__u32 **)&data, &datalen)) {
+ rc = rawobj_extract_local_alloc(&gctx->gc_handle,
+ (__u32 **) &data, &datalen);
+ if (rc) {
CERROR("failed extract handle\n");
goto out;
}
- if (rawobj_extract_local(&tmpobj, (__u32 **)&data, &datalen)) {
+ rc = rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen);
+ if (rc) {
CERROR("failed extract mech\n");
goto out;
}
- if (lgss_import_sec_context(&tmpobj,
- sec2gsec(ctx->cc_sec)->gs_mech,
- &gctx->gc_mechctx) !=
- GSS_S_COMPLETE) {
+ rc = lgss_import_sec_context(&tmpobj,
+ sec2gsec(ctx->cc_sec)->gs_mech,
+ &gctx->gc_mechctx);
+ if (rc != GSS_S_COMPLETE)
CERROR("failed import context\n");
- goto out;
- }
-
- rc = 0;
+ else
+ rc = 0;
}
out:
/* we don't care what current status of this ctx, even someone else
* is operating on the ctx at the same time. we just add up our own
- * opinions here.
- */
+ * opinions here. */
if (rc == 0) {
gss_cli_ctx_uptodate(gctx);
} else {
- /*
- * this will also revoke the key. has to be done before
- * wakeup waiters otherwise they can find the stale key
- */
+ /* this will also revoke the key. has to be done before
+ * wakeup waiters otherwise they can find the stale key */
kill_key_locked(key);
cli_ctx_expire(ctx);
get_random_bytes(conf, ke->ke_conf_size);
/* get encryption blocksize. note kc_keye might not associated with
- * a tfm, currently only for arcfour-hmac
- */
+ * a tfm, currently only for arcfour-hmac */
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
LASSERT(kctx->kc_keye.kb_tfm == NULL);
blocksize = 1;
struct vfs_cred *vcred)
{
struct gss_cli_ctx *gctx;
+ int rc;
OBD_ALLOC_PTR(gctx);
if (gctx == NULL)
return NULL;
- if (gss_cli_ctx_init_common(sec, &gctx->gc_base, &gss_pipefs_ctxops,
- vcred)) {
+ rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
+ &gss_pipefs_ctxops, vcred);
+ if (rc) {
OBD_FREE_PTR(gctx);
return NULL;
}
if (atomic_dec_and_test(&ctx->cc_refcount)) {
__hlist_del(&ctx->cc_cache);
hlist_add_head(&ctx->cc_cache, freelist);
- } else
+ } else {
hlist_del_init(&ctx->cc_cache);
+ }
}
/*
LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
- * rpcs during destroy procedure.
- */
+ * rpcs during destroy procedure. */
if (!sync)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
OBD_FREE(buf, mlen);
/* FIXME
* hack pipefs: always return asked length unless all following
- * downcalls might be messed up.
- */
+ * downcalls might be messed up. */
rc = mlen;
RETURN(rc);
}
sizeof(gmsg->gum_data.gum_obd));
/* This only could happen when sysadmin set it dead/expired
- * using lctl by force.
- */
+ * using lctl by force. */
if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
/* if we are refreshing for root, also update the reverse
- * handle index, do not confuse reverse contexts.
- */
+ * handle index, do not confuse reverse contexts. */
if (ctx->cc_vcred.vc_uid == 0) {
struct gss_sec *gsec;
CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
return PTR_ERR(de);
}
- /* FIXME
- * hack pipefs: dput will sometimes cause oops during module unload
- * and lgssd close the pipe fds.
- */
- //dput(de);
+
+ /* FIXME hack pipefs: dput will sometimes cause oops during module
+ * unload and lgssd close the pipe fds. */
/* krb5 mechanism */
de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
for (i = 0; i < MECH_MAX; i++) {
LASSERT(list_empty(&upcall_lists[i]));
- /*
- * dput pipe dentry here might cause lgssd oops.
- */
- //dput(de_pipes[i]);
+
+ /* dput pipe dentry here might cause lgssd oops. */
de_pipes[i] = NULL;
}
}
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech.
- */
+ * is invalid, here we retrive it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
lgss_mech_put(gm);
cache_check:
/* Note each time cache_check() will drop a reference if return
* non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls.
- */
+ * take care of following calls. */
rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
switch (rc) {
case -EAGAIN: {
break;
case 0:
/* if not the first check, we have to release the extra
- * reference we just added on it.
- */
+ * reference we just added on it. */
if (!first_check)
cache_put(&rsip->h, &rsi_cache);
CDEBUG(D_SEC, "cache_check is good\n");
out:
/* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME
- */
+ * with NFS cache mgmt code... FIXME */
#if 0
if (rsip)
rsi_put(&rsip->h, &rsi_cache);
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds.
- */
+ * here we wait at miximum 1.5 seconds. */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
- /*
- * this helps reducing context index confliction. after server reboot,
+ /* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* sequence number checking, thus no chance to sent error notification
- * back to clients.
- */
+ * back to clients. */
get_random_bytes(&__ctx_index, sizeof(__ctx_index));
return 0;
{
if (privacy) {
/* we suppose max cipher block size is 16 bytes. here we
- * add 16 for confounder and 16 for padding.
- */
+ * add 16 for confounder and 16 for padding. */
return GSS_KRB5_INTEG_MAX_PAYLOAD + msgsize + 16 + 16 + 16;
} else {
return GSS_KRB5_INTEG_MAX_PAYLOAD;
return 1;
/* expire is 0 means never expire. a newly created gss context
- * which during upcall may has 0 expiration
- */
+ * which during upcall may has 0 expiration */
if (ctx->cc_expire == 0)
return 0;
/* At this point this ctx might have been marked as dead by
* someone else, in which case nobody will make further use
* of it. we don't care, and mark it UPTODATE will help
- * destroying server side context when it be destroied.
- */
+ * destroying server side context when it be destroied. */
set_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
if (sec_is_reverse(ctx->cc_sec))
sec2target_str(ctx->cc_sec), ctx->cc_expire,
ctx->cc_expire - cfs_time_current_sec());
- /*
- * install reverse svc ctx, but only for forward connection
- * and root context
- */
+ /* install reverse svc ctx, but only for forward connection
+ * and root context */
if (!sec_is_reverse(ctx->cc_sec) && ctx->cc_vcred.vc_uid == 0) {
gss_sec_install_rctx(ctx->cc_sec->ps_import,
ctx->cc_sec, ctx);
* lead to the sequence number fall behind the window on server and
* be dropped. also applies to gss_cli_ctx_seal().
*
- * Note: null mode dosen't check sequence number.
- */
+ * Note: null mode dosen't check sequence number. */
if (svc != SPTLRPC_SVC_NULL &&
atomic_read(&gctx->gc_seq) - seq > GSS_SEQ_REPACK_THRESHOLD) {
int behind = atomic_read(&gctx->gc_seq) - seq;
* returned in this case.
*
* but in any cases, don't resend ctx destroying rpc, don't resend
- * reverse rpc.
- */
+ * reverse rpc. */
if (req->rq_ctx_fini) {
CWARN("server respond error (%08x/%08x) for ctx fini\n",
errhdr->gh_major, errhdr->gh_minor);
"NO_CONTEXT" : "BAD_SIG");
sptlrpc_cli_ctx_expire(ctx);
- /*
- * we need replace the ctx right here, otherwise during
+
+ /* we need replace the ctx right here, otherwise during
* resent we'll hit the logic in sptlrpc_req_refresh_ctx()
* which keep the ctx with RESEND flag, thus we'll never
- * get rid of this ctx.
- */
+ * get rid of this ctx. */
rc = sptlrpc_req_replace_dead_ctx(req);
if (rc == 0)
req->rq_resend = 1;
gctx = container_of(ctx, struct gss_cli_ctx, gc_base);
/* special case for context negotiation, rq_repmsg/rq_replen actually
- * are not used currently.
- */
+ * are not used currently. */
if (req->rq_ctx_init) {
req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
req->rq_replen = msg->lm_buflens[1];
memset(req->rq_reqbuf, 0, req->rq_reqbuf_len);
/* if the pre-allocated buffer is big enough, we just pack
- * both clear buf & request buf in it, to avoid more alloc.
- */
+ * both clear buf & request buf in it, to avoid more alloc. */
if (clearsize + wiresize <= req->rq_reqbuf_len) {
req->rq_clrbuf =
(void *) (((char *) req->rq_reqbuf) + wiresize);
buflens[2] = gss_cli_payload(req->rq_cli_ctx, newclrbuf_size, 1);
newcipbuf_size = lustre_msg_size_v2(3, buflens);
- /*
- * handle the case that we put both clear buf and cipher buf into
- * pre-allocated single buffer.
- */
+ /* handle the case that we put both clear buf and cipher buf into
+ * pre-allocated single buffer. */
if (unlikely(req->rq_pool) &&
req->rq_clrbuf >= req->rq_reqbuf &&
(char *) req->rq_clrbuf <
(char *) req->rq_reqbuf + req->rq_reqbuf_len) {
- /*
- * it couldn't be better we still fit into the
- * pre-allocated buffer.
- */
+ /* it couldn't be better we still fit into the
+ * pre-allocated buffer. */
if (newclrbuf_size + newcipbuf_size <= req->rq_reqbuf_len) {
void *src, *dst;
req->rq_clrbuf_len = newclrbuf_size;
req->rq_reqmsg = lustre_msg_buf(req->rq_clrbuf, 0, 0);
} else {
- /*
- * sadly we have to split out the clear buffer
- */
+ /* sadly we have to split out the clear buffer */
LASSERT(req->rq_reqbuf_len >= newcipbuf_size);
LASSERT(req->rq_clrbuf_len < newclrbuf_size);
}
seclen -= 4;
/* extract target uuid, note this code is somewhat fragile
- * because touched internal structure of obd_uuid
- */
+ * because touched internal structure of obd_uuid */
if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
CERROR("failed to extract target uuid\n");
RETURN(SECSVC_DROP);
gw->gw_svc, major, grctx->src_ctx, grctx->src_ctx->gsc_uid,
libcfs_nid2str(req->rq_peer.nid));
error:
- /*
- * we only notify client in case of NO_CONTEXT/BAD_SIG, which
- * might happen after server reboot, to allow recovery.
- */
+ /* we only notify client in case of NO_CONTEXT/BAD_SIG, which
+ * might happen after server reboot, to allow recovery. */
if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
gss_pack_err_notify(req, major, 0) == 0)
RETURN(SECSVC_COMPLETE);
}
LASSERT(cipher_obj.len <= cipher_buflen);
- /*
- * we are about to override data at rs->rs_repbuf, nullify pointers
- * to which to catch further illegal usage.
- */
+ /* we are about to override data at rs->rs_repbuf, nullify pointers
+ * to which to catch further illegal usage. */
grctx->src_repbsd = NULL;
grctx->src_repbsd_size = 0;
if (rc)
goto out_svc_upcall;
- /*
- * register policy after all other stuff be intialized, because it
- * might be in used immediately after the registration.
- */
+ /* register policy after all other stuff be intialized, because it
+ * might be in used immediately after the registration. */
rc = gss_init_keyring();
if (rc)
/*
* (1) fill all the free slots of current pools.
*/
- /*
- * free slots are those left by rent pages, and the extra ones with
- * index >= eep_total_pages, locate at the tail of last pool.
- */
+ /* free slots are those left by rent pages, and the extra ones with
+ * index >= eep_total_pages, locate at the tail of last pool. */
freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
if (freeslot != 0)
freeslot = PAGES_PER_POOL - freeslot;
if (++page_pools.epp_waitqlen > page_pools.epp_st_max_wqlen)
page_pools.epp_st_max_wqlen = page_pools.epp_waitqlen;
- /*
- * we just wait if someone else is adding more pages, or
+
+ /* we just wait if someone else is adding more pages, or
* wait queue length is not deep enough. otherwise try to
* add more pages in the pools.
*
* FIXME the policy of detecting resource tight & growing pool
- * need to be reconsidered.
- */
+ * need to be reconsidered. */
if (page_pools.epp_adding || page_pools.epp_waitqlen < 2 ||
page_pools.epp_full) {
set_current_state(TASK_UNINTERRUPTIBLE);
goto again;
}
- /*
- * record max wait time
- */
+
+ /* record max wait time */
if (unlikely(tick1 != 0)) {
tick2 = cfs_time_current();
if (tick2 - tick1 > page_pools.epp_st_max_wait)
page_pools.epp_st_max_wait = tick2 - tick1;
}
- /*
- * proceed with rest of allocation
- */
+
+ /* proceed with rest of allocation */
page_pools.epp_free_pages -= desc->bd_max_iov;
p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
}
/* checksum mismatch, re-compute a new one and compare with
- * others, give out proper warnings.
- */
+ * others, give out proper warnings. */
OBD_ALLOC(new, csum_size);
if (new == NULL)
return -ENOMEM;
{
struct ptlrpc_cli_ctx *ctx;
-again:
spin_lock(&sec_gc_ctx_list_lock);
- if (!list_empty(&sec_gc_ctx_list)) {
+
+ while (!list_empty(&sec_gc_ctx_list)) {
ctx = list_entry(sec_gc_ctx_list.next,
struct ptlrpc_cli_ctx, cc_gc_chain);
list_del_init(&ctx->cc_gc_chain);
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
sptlrpc_cli_ctx_put(ctx, 1);
- goto again;
+ spin_lock(&sec_gc_ctx_list_lock);
}
+
spin_unlock(&sec_gc_ctx_list_lock);
}
again:
mutex_down(&sec_gc_mutex);
list_for_each_entry_safe(sec, next, &sec_gc_list, ps_gc_list) {
- /*
- * if someone is waiting to be deleted, let it
- * proceed as soon as possible.
- */
+ /* if someone is waiting to be deleted, let it
+ * proceed as soon as possible. */
if (atomic_read(&sec_gc_wait_del)) {
CWARN("deletion pending, retry\n");
mutex_up(&sec_gc_mutex);
[LGSS_MUTEX_KRB5] = { "keyring", 0x4292d473, 0 },
};
-int lgss_mutex_lock(lgss_mutex_id_t mid)
+static int lgss_mutex_get(struct lgss_mutex_s *mutex)
{
- struct lgss_mutex_s *sem = &lgss_mutexes[mid];
- struct sembuf sembuf;
-
- lassert(mid < LGSS_MUTEX_MAX);
+ mutex->sem_id = semget(mutex->sem_key, 1, IPC_CREAT | IPC_EXCL | 0700);
+ if (mutex->sem_id != -1) {
+ if (semctl(mutex->sem_id, 0, SETVAL, 1) == -1) {
+ logmsg(LL_ERR, "initialize sem %x: %s\n",
+ mutex->sem_key, strerror(errno));
+ return -1;
+ }
- logmsg(LL_TRACE, "locking mutex %x for %s\n",
- sem->sem_key, sem->sem_name);
-again:
- sem->sem_id = semget(sem->sem_key, 1, IPC_CREAT | IPC_EXCL | 0700);
- if (sem->sem_id == -1) {
+ logmsg(LL_DEBUG, "created & initialized sem %x id %d for %s\n",
+ mutex->sem_key, mutex->sem_id, mutex->sem_name);
+ } else {
if (errno != EEXIST) {
logmsg(LL_ERR, "create sem %x: %s\n",
- sem->sem_key, strerror(errno));
+ mutex->sem_key, strerror(errno));
return -1;
}
- /* already exist. Note there's still a small window of racing
- * with other processes, due to the stupid semaphore semantics.
- */
- sem->sem_id = semget(sem->sem_key, 0, 0700);
- if (sem->sem_id == -1) {
+ /* already created by someone else, simply get it.
+ * Note there's still a small window of racing between create
+ * and initialize, a flaw in semaphore semantics */
+ mutex->sem_id = semget(mutex->sem_key, 0, 0700);
+ if (mutex->sem_id == -1) {
if (errno == ENOENT) {
logmsg(LL_WARN, "sem %x just disappeared "
- "under us, try again\n", sem->sem_key);
- goto again;
+ "under us, try again\n", mutex->sem_key);
+ return 1;
}
- logmsg(LL_ERR, "get sem %x: %s\n", sem->sem_key,
+ logmsg(LL_ERR, "get sem %x: %s\n", mutex->sem_key,
strerror(errno));
return -1;
}
- } else {
- int val = 1;
- logmsg(LL_DEBUG, "created sem %x for %s, initialize to 1\n",
- sem->sem_key, sem->sem_name);
- if (semctl(sem->sem_id, 0, SETVAL, val) == -1) {
- logmsg(LL_ERR, "initialize sem %x: %s\n",
- sem->sem_key, strerror(errno));
- return -1;
- }
+ logmsg(LL_TRACE, "got sem %x id %d for %s\n",
+ mutex->sem_key, mutex->sem_id, mutex->sem_name);
}
- logmsg(LL_TRACE, "got sem %x id %d for %s\n",
- sem->sem_key, sem->sem_id, sem->sem_name);
+
+ return 0;
+}
+
+int lgss_mutex_lock(lgss_mutex_id_t mid)
+{
+ struct lgss_mutex_s *sem = &lgss_mutexes[mid];
+ struct sembuf sembuf;
+ int rc;
+
+ lassert(mid < LGSS_MUTEX_MAX);
+
+ logmsg(LL_TRACE, "locking mutex %x for %s\n",
+ sem->sem_key, sem->sem_name);
+
+ do {
+ rc = lgss_mutex_get(sem);
+ if (rc < 0)
+ return rc;
+ } while (rc);
sembuf.sem_num = 0;
sembuf.sem_op = -1;