*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
+ * Copyright (c) 2012, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
static
void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
{
- cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount);
cfs_hlist_add_head(&ctx->cc_cache, hash);
}
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
{
- LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
-
- cfs_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
-
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, freelist);
- } else {
- cfs_hlist_del_init(&ctx->cc_cache);
- }
+ LASSERT(spin_is_locked(&ctx->cc_sec->ps_lock));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+
+ clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount)) {
+ __cfs_hlist_del(&ctx->cc_cache);
+ cfs_hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ cfs_hlist_del_init(&ctx->cc_cache);
+ }
}
/*
{
LASSERT(ctx->cc_sec);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
return ctx_check_death_pf(ctx, freelist);
}
cc_cache);
LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT,
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
&ctx->cc_flags) == 0);
cfs_hlist_del_init(&ctx->cc_cache);
static
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(ctx->cc_sec);
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- cli_ctx_expire(ctx);
+ cli_ctx_expire(ctx);
- cfs_spin_lock(&ctx->cc_sec->ps_lock);
+ spin_lock(&ctx->cc_sec->ps_lock);
- if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 1);
- cfs_hlist_del_init(&ctx->cc_cache);
- if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
- LBUG();
- }
+ cfs_hlist_del_init(&ctx->cc_cache);
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
+ LBUG();
+ }
- cfs_spin_unlock(&ctx->cc_sec->ps_lock);
+ spin_unlock(&ctx->cc_sec->ps_lock);
}
/****************************************
(__u64) new->cc_vcred.vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
- cfs_spin_lock(&gsec->gs_base.ps_lock);
+ spin_lock(&gsec->gs_base.ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_pf->gsp_chash[hash], cc_cache) {
ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
- cfs_spin_unlock(&gsec->gs_base.ps_lock);
+ spin_unlock(&gsec->gs_base.ps_lock);
ctx_list_destroy_pf(&freelist);
EXIT;
struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
cfs_hlist_head_t *hash_head;
cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash, gc = 0, found = 0;
- ENTRY;
+ CFS_HLIST_HEAD(freelist);
+ unsigned int hash, gc = 0, found = 0;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
hash = ctx_hash_index(gsec_pf->gsp_chash_size,
(__u64) vcred->vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
retry:
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
/* gc_next == 0 means never do gc */
if (remove_dead && sec->ps_gc_next &&
} else {
/* don't allocate for reverse sec */
if (sec_is_reverse(sec)) {
- cfs_spin_unlock(&sec->ps_lock);
- RETURN(NULL);
- }
-
- if (new) {
- ctx_enhash_pf(new, hash_head);
- ctx = new;
- } else if (create) {
- cfs_spin_unlock(&sec->ps_lock);
- new = ctx_create_pf(sec, vcred);
- if (new) {
- cfs_clear_bit(PTLRPC_CTX_NEW_BIT,
- &new->cc_flags);
- goto retry;
- }
- } else
- ctx = NULL;
- }
-
- /* hold a ref */
- if (ctx)
- cfs_atomic_inc(&ctx->cc_refcount);
-
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
+ RETURN(NULL);
+ }
+
+ if (new) {
+ ctx_enhash_pf(new, hash_head);
+ ctx = new;
+ } else if (create) {
+ spin_unlock(&sec->ps_lock);
+ new = ctx_create_pf(sec, vcred);
+ if (new) {
+ clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ goto retry;
+ }
+ } else {
+ ctx = NULL;
+ }
+ }
+
+ /* hold a ref */
+ if (ctx)
+ cfs_atomic_inc(&ctx->cc_refcount);
+
+ spin_unlock(&sec->ps_lock);
/* the allocator of the context must give the first push to refresh */
if (new) {
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure. */
if (!sync)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
/* destroy this context */
ctx_destroy_pf(sec, ctx);
struct gss_sec *gsec;
struct gss_sec_pipefs *gsec_pf;
struct ptlrpc_cli_ctx *ctx;
- cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- int i, busy = 0;
- ENTRY;
+ cfs_hlist_node_t *pos, *next;
+ CFS_HLIST_HEAD(freelist);
+ int i, busy = 0;
+ ENTRY;
- might_sleep_if(grace);
+ might_sleep_if(grace);
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_pf->gsp_chash[i],
}
ctx_unhash_pf(ctx, &freelist);
- cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT,
- &ctx->cc_flags);
- }
- }
- cfs_spin_unlock(&sec->ps_lock);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
- ctx_list_destroy_pf(&freelist);
- RETURN(busy);
+ ctx_list_destroy_pf(&freelist);
+ RETURN(busy);
}
/****************************************
/* all upcall messgaes linked here */
static cfs_list_t upcall_lists[MECH_MAX];
/* and protected by this */
-static cfs_spinlock_t upcall_locks[MECH_MAX];
+static spinlock_t upcall_locks[MECH_MAX];
static inline
void upcall_list_lock(int idx)
{
- cfs_spin_lock(&upcall_locks[idx]);
+ spin_lock(&upcall_locks[idx]);
}
static inline
void upcall_list_unlock(int idx)
{
- cfs_spin_unlock(&upcall_locks[idx]);
+ spin_unlock(&upcall_locks[idx]);
}
static
static
void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
{
- __u32 idx = gmsg->gum_mechidx;
+ __u32 idx = gmsg->gum_mechidx;
- LASSERT(idx < MECH_MAX);
- LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
+ LASSERT(idx < MECH_MAX);
+ LASSERT(spin_is_locked(&upcall_locks[idx]));
- if (cfs_list_empty(&gmsg->gum_list))
- return;
+ if (cfs_list_empty(&gmsg->gum_list))
+ return;
- cfs_list_del_init(&gmsg->gum_list);
- LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
- cfs_atomic_dec(&gmsg->gum_refcount);
+ cfs_list_del_init(&gmsg->gum_list);
+ LASSERT(cfs_atomic_read(&gmsg->gum_refcount) > 1);
+ cfs_atomic_dec(&gmsg->gum_refcount);
}
static
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
sptlrpc_cli_ctx_expire(ctx);
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
}
if (mlen > buflen)
mlen = buflen;
- left = cfs_copy_to_user(dst, data, mlen);
+ left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
RETURN(left);
if (!buf)
RETURN(-ENOMEM);
- if (cfs_copy_from_user(buf, src, mlen)) {
+ if (copy_from_user(buf, src, mlen)) {
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
ctx = &gctx->gc_base;
sptlrpc_cli_ctx_expire(ctx);
if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
- cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+ test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
"fatal error" : "non-fatal");
}
static
int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
- struct obd_import *imp;
- struct gss_sec *gsec;
- struct gss_upcall_msg *gmsg;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp;
+ struct gss_sec *gsec;
+ struct gss_upcall_msg *gmsg;
+ int rc = 0;
+ ENTRY;
- cfs_might_sleep();
+ might_sleep();
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_import);
- LASSERT(ctx->cc_sec->ps_import->imp_obd);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_import);
+ LASSERT(ctx->cc_sec->ps_import->imp_obd);
imp = ctx->cc_sec->ps_import;
if (!imp->imp_connection) {
de_pipes[MECH_KRB5] = de;
CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
- cfs_spin_lock_init(&upcall_locks[MECH_KRB5]);
+ spin_lock_init(&upcall_locks[MECH_KRB5]);
- return 0;
+ return 0;
}
static