- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, Intel Corporation.
+ *
- if (gss_cli_ctx_init_common(sec, &gctx->gc_base, &gss_pipefs_ctxops,
- vcred)) {
+ rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
+ &gss_pipefs_ctxops, vcred);
+ if (rc) {
void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
{
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- if (rc) {
- CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
- sec->ps_policy->sp_name, sec);
- sptlrpc_sec_destroy(sec);
- }
+ cfs_atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
- set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount);
- hlist_add_head(&ctx->cc_hash, hash);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_atomic_inc(&ctx->cc_refcount);
+ cfs_hlist_add_head(&ctx->cc_cache, hash);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, freelist);
- } else
- hlist_del_init(&ctx->cc_hash);
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ cfs_hlist_del_init(&ctx->cc_cache);
+ }
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- while (!hlist_empty(head)) {
- ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_hash);
+ while (!cfs_hlist_empty(head)) {
+ ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
+ cc_cache);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
+ &ctx->cc_flags) == 0);
- if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!hlist_unhashed(&ctx->cc_hash));
- LASSERT(atomic_read(&ctx->cc_refcount) > 1);
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[hash], cc_hash) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[hash], cc_cache) {
CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash)
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i], cc_cache)
static
struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx,
static
struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx,
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
- struct hlist_head *hash_head;
- struct hlist_node *pos, *next;
- HLIST_HEAD(freelist);
+ cfs_hlist_head_t *hash_head;
+ cfs_hlist_node_t *pos, *next;
+ CFS_HLIST_HEAD(freelist);
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
- hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_hash) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
- if (hash_head->first != &ctx->cc_hash) {
- __hlist_del(&ctx->cc_hash);
- hlist_add_head(&ctx->cc_hash, hash_head);
+ if (hash_head->first != &ctx->cc_cache) {
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, hash_head);
- if (sec->ps_flags & PTLRPC_SEC_FL_REVERSE) {
- spin_unlock(&sec->ps_lock);
- RETURN(NULL);
- }
-
- if (new) {
- ctx_enhash_pf(new, hash_head);
- ctx = new;
- } else if (create) {
- spin_unlock(&sec->ps_lock);
- new = ctx_create_pf(sec, vcred);
- if (new) {
- clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
- goto retry;
- }
- } else
- ctx = NULL;
- }
-
- /* hold a ref */
- if (ctx)
- atomic_inc(&ctx->cc_refcount);
-
- spin_unlock(&sec->ps_lock);
+ if (sec_is_reverse(sec)) {
+ spin_unlock(&sec->ps_lock);
+ RETURN(NULL);
+ }
+
+ if (new) {
+ ctx_enhash_pf(new, hash_head);
+ ctx = new;
+ } else if (create) {
+ spin_unlock(&sec->ps_lock);
+ new = ctx_create_pf(sec, vcred);
+ if (new) {
+ clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ goto retry;
+ }
+ } else {
+ ctx = NULL;
+ }
+ }
+
+ /* hold a ref */
+ if (ctx)
+ cfs_atomic_inc(&ctx->cc_refcount);
+
+ spin_unlock(&sec->ps_lock);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(hlist_unhashed(&ctx->cc_hash));
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
/* destroy this context */
ctx_destroy_pf(sec, ctx);
/* destroy this context */
ctx_destroy_pf(sec, ctx);
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
gsec = container_of(sec, struct gss_sec, gs_base);
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_hash) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i],
+ cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
ctx, ctx->cc_vcred.vc_uid,
sec2target_str(ctx->cc_sec), grace);
}
ctx_unhash_pf(ctx, &freelist);
ctx, ctx->cc_vcred.vc_uid,
sec2target_str(ctx->cc_sec), grace);
}
ctx_unhash_pf(ctx, &freelist);
- set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT,
- &ctx->cc_flags);
- }
- }
- spin_unlock(&sec->ps_lock);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
int gss_svc_install_rctx_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx)
{
int gss_svc_install_rctx_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx)
{
__u32 gum_mechidx;
struct gss_sec *gum_gsec;
struct gss_cli_ctx *gum_gctx;
struct gss_upcall_msg_data gum_data;
};
__u32 gum_mechidx;
struct gss_sec *gum_gsec;
struct gss_cli_ctx *gum_gctx;
struct gss_upcall_msg_data gum_data;
};
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
/* all upcall messgaes linked here */
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
/* all upcall messgaes linked here */
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
static inline
void upcall_list_lock(int idx)
{
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
static inline
void upcall_list_lock(int idx)
{
- list_del_init(&gmsg->gum_list);
- LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
- atomic_dec(&gmsg->gum_refcount);
+ cfs_list_del_init(&gmsg->gum_list);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+ cfs_atomic_dec(&gmsg->gum_refcount);
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
/* timeout is not in use for now */
if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
/* timeout is not in use for now */
if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
"errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd, msg->errno);
CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
"errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd, msg->errno);
- gmsg = list_entry(upcall_lists[idx].next,
- struct gss_upcall_msg, gum_list);
+ gmsg = cfs_list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
"nid "LPX64", obd %.*s\n", gmsg,
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
"nid "LPX64", obd %.*s\n", gmsg,
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
gmsg->gum_base.errno = 0;
/* init upcall msg */
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
gmsg->gum_base.errno = 0;
/* init upcall msg */
gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
gmsg->gum_gsec = gsec;
gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
gmsg->gum_gsec = gsec;
gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
/* if we are refreshing for root, also update the reverse
int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
/* if we are refreshing for root, also update the reverse
.refresh = gss_cli_ctx_refresh_pf,
.validate = gss_cli_ctx_validate_pf,
.die = gss_cli_ctx_die_pf,
.refresh = gss_cli_ctx_refresh_pf,
.validate = gss_cli_ctx_validate_pf,
.die = gss_cli_ctx_die_pf,
.sign = gss_cli_ctx_sign,
.verify = gss_cli_ctx_verify,
.seal = gss_cli_ctx_seal,
.sign = gss_cli_ctx_sign,
.verify = gss_cli_ctx_verify,
.seal = gss_cli_ctx_seal,
static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
.create_sec = gss_sec_create_pf,
.destroy_sec = gss_sec_destroy_pf,
static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
.create_sec = gss_sec_create_pf,
.destroy_sec = gss_sec_destroy_pf,
.lookup_ctx = gss_sec_lookup_ctx_pf,
.release_ctx = gss_sec_release_ctx_pf,
.flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
.lookup_ctx = gss_sec_lookup_ctx_pf,
.release_ctx = gss_sec_release_ctx_pf,
.flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
- /* FIXME
- * hack pipefs: dput will sometimes cause oops during module unload
- * and lgssd close the pipe fds.
- */
- //dput(de);
+
+ /* FIXME hack pipefs: dput will sometimes cause oops during module
+ * unload and lgssd close the pipe fds. */
/* krb5 mechanism */
de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
/* krb5 mechanism */
de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,