* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Modifications for Lustre
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
struct vfs_cred *vcred)
{
struct gss_cli_ctx *gctx;
+ int rc;
OBD_ALLOC_PTR(gctx);
if (gctx == NULL)
return NULL;
- if (gss_cli_ctx_init_common(sec, &gctx->gc_base, &gss_pipefs_ctxops,
- vcred)) {
+ rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
+ &gss_pipefs_ctxops, vcred);
+ if (rc) {
OBD_FREE_PTR(gctx);
return NULL;
}
void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- int rc;
- rc = gss_cli_ctx_fini_common(sec, ctx);
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
+
OBD_FREE_PTR(gctx);
- if (rc) {
- CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
- sec->ps_policy->sp_name, sec);
- sptlrpc_sec_destroy(sec);
- }
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static
{
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount);
- hlist_add_head(&ctx->cc_hash, hash);
+ hlist_add_head(&ctx->cc_cache, hash);
}
/*
LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, freelist);
- } else
- hlist_del_init(&ctx->cc_hash);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ hlist_del_init(&ctx->cc_cache);
+ }
}
/*
struct ptlrpc_cli_ctx *ctx;
while (!hlist_empty(head)) {
- ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_hash);
+ ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
LASSERT(atomic_read(&ctx->cc_refcount) == 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
ctx_destroy_pf(ctx->cc_sec, ctx);
}
}
{
if (ctx_check_death_pf(ctx, NULL))
return 1;
- if (cli_ctx_is_uptodate(ctx))
+ if (cli_ctx_is_ready(ctx))
return 0;
return 1;
}
spin_lock(&ctx->cc_sec->ps_lock);
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
LASSERT(atomic_read(&ctx->cc_refcount) > 1);
- hlist_del_init(&ctx->cc_hash);
+ hlist_del_init(&ctx->cc_cache);
if (atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
unsigned int hash;
ENTRY;
spin_lock(&gsec->gs_base.ps_lock);
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[hash], cc_hash) {
+ &gsec_pf->gsp_chash[hash], cc_cache) {
if (!ctx_match_pf(ctx, &new->cc_vcred))
continue;
}
ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
- atomic_inc(&gsec->gs_base.ps_busy);
spin_unlock(&gsec->gs_base.ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash)
+ &gsec_pf->gsp_chash[i], cc_cache)
ctx_check_death_locked_pf(ctx, freelist);
}
static
struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx,
- __u32 flavor,
- unsigned long flags)
+ struct sptlrpc_flavor *sf)
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
- if (ctx || flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
+ if (ctx ||
+ sf->sf_flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
hash_size = 1;
else
hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
- INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+ CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
- imp, ctx, flavor, flags))
+ imp, ctx, sf))
goto err_free;
if (ctx == NULL) {
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
struct hlist_head *hash_head;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
unsigned int hash, gc = 0, found = 0;
ENTRY;
gc = 1;
}
- hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_hash) {
+ hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- hlist_add_head(&new->cc_hash, &freelist);
+ hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
- if (hash_head->first != &ctx->cc_hash) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, hash_head);
+ if (hash_head->first != &ctx->cc_cache) {
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
- if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
+ if (sec_is_reverse(sec)) {
spin_unlock(&sec->ps_lock);
RETURN(NULL);
}
int sync)
{
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(hlist_unhashed(&ctx->cc_hash));
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
- * rpcs during destroy procedure.
- */
+ * rpcs during destroy procedure. */
if (!sync)
clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ CFS_HLIST_HEAD(freelist);
int i, busy = 0;
ENTRY;
spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash) {
+ &gsec_pf->gsp_chash[i], cc_cache) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
int gss_svc_install_rctx_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx)
{
- struct gss_sec *gsec;
+ struct ptlrpc_sec *sec;
+ int rc;
- LASSERT(imp->imp_sec);
- LASSERT(ctx);
+ sec = sptlrpc_import_sec_ref(imp);
+ LASSERT(sec);
+ rc = gss_install_rvs_cli_ctx_pf(sec2gsec(sec), ctx);
- gsec = container_of(imp->imp_sec, struct gss_sec, gs_base);
- return gss_install_rvs_cli_ctx_pf(gsec, ctx);
+ sptlrpc_sec_put(sec);
+ return rc;
}
/****************************************
OBD_FREE(buf, mlen);
/* FIXME
* hack pipefs: always return asked length unless all following
- * downcalls might be messed up.
- */
+ * downcalls might be messed up. */
rc = mlen;
RETURN(rc);
}
RETURN(-ENOMEM);
/* initialize pipefs base msg */
- INIT_LIST_HEAD(&gmsg->gum_base.list);
+ CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
sizeof(gmsg->gum_data.gum_obd));
/* This only could happen when sysadmin set it dead/expired
- * using lctl by force.
- */
+ * using lctl by force. */
if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
/* if we are refreshing for root, also update the reverse
- * handle index, do not confuse reverse contexts.
- */
+ * handle index, do not confuse reverse contexts. */
if (ctx->cc_vcred.vc_uid == 0) {
struct gss_sec *gsec;
.refresh = gss_cli_ctx_refresh_pf,
.validate = gss_cli_ctx_validate_pf,
.die = gss_cli_ctx_die_pf,
- .display = gss_cli_ctx_display,
.sign = gss_cli_ctx_sign,
.verify = gss_cli_ctx_verify,
.seal = gss_cli_ctx_seal,
static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
.create_sec = gss_sec_create_pf,
.destroy_sec = gss_sec_destroy_pf,
+ .kill_sec = gss_sec_kill,
.lookup_ctx = gss_sec_lookup_ctx_pf,
.release_ctx = gss_sec_release_ctx_pf,
.flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
return PTR_ERR(de);
}
- /* FIXME
- * hack pipefs: dput will sometimes cause oops during module unload
- * and lgssd close the pipe fds.
- */
- //dput(de);
+
+ /* FIXME hack pipefs: dput will sometimes cause oops during module
+ * unload and lgssd close the pipe fds. */
/* krb5 mechanism */
de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
}
de_pipes[MECH_KRB5] = de;
- INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
- upcall_locks[MECH_KRB5] = SPIN_LOCK_UNLOCKED;
+ CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ spin_lock_init(&upcall_locks[MECH_KRB5]);
return 0;
}
for (i = 0; i < MECH_MAX; i++) {
LASSERT(list_empty(&upcall_lists[i]));
- /*
- * dput pipe dentry here might cause lgssd oops.
- */
- //dput(de_pipes[i]);
+
+ /* dput pipe dentry here might cause lgssd oops. */
de_pipes[i] = NULL;
}