-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/mutex.h>
#include <linux/crypto.h>
#include <asm/atomic.h>
struct rpc_clnt; /* for rpc_pipefs */
#include <linux/sunrpc/rpc_pipe_fs.h>
-#else
-#include <liblustre.h>
-#endif
+#include <libcfs/linux/linux-list.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
}
/****************************************
- * internel context helpers *
+ * internal context helpers *
****************************************/
static
struct vfs_cred *vcred)
{
struct gss_cli_ctx *gctx;
+ int rc;
OBD_ALLOC_PTR(gctx);
if (gctx == NULL)
return NULL;
- if (gss_cli_ctx_init_common(sec, &gctx->gc_base, &gss_pipefs_ctxops,
- vcred)) {
+ rc = gss_cli_ctx_init_common(sec, &gctx->gc_base,
+ &gss_pipefs_ctxops, vcred);
+ if (rc) {
OBD_FREE_PTR(gctx);
return NULL;
}
static
void ctx_destroy_pf(struct ptlrpc_sec *sec, struct ptlrpc_cli_ctx *ctx)
{
- struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- int rc;
+ struct gss_cli_ctx *gctx = ctx2gctx(ctx);
- rc = gss_cli_ctx_fini_common(sec, ctx);
- OBD_FREE_PTR(gctx);
+ if (gss_cli_ctx_fini_common(sec, ctx))
+ return;
- if (rc) {
- CWARN("released the last ctx, proceed to destroy sec %s@%p\n",
- sec->ps_policy->sp_name, sec);
- sptlrpc_sec_destroy(sec);
- }
+ OBD_FREE_PTR(gctx);
+
+ atomic_dec(&sec->ps_nctx);
+ sptlrpc_sec_put(sec);
}
static
void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
{
- set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount);
- hlist_add_head(&ctx->cc_cache, hash);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ atomic_inc(&ctx->cc_refcount);
+ hlist_add_head(&ctx->cc_cache, hash);
}
/*
static
void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{
- LASSERT_SPIN_LOCKED(&ctx->cc_sec->ps_lock);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!hlist_unhashed(&ctx->cc_cache));
-
- clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
-
- if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __hlist_del(&ctx->cc_cache);
- hlist_add_head(&ctx->cc_cache, freelist);
- } else
- hlist_del_init(&ctx->cc_cache);
+ assert_spin_locked(&ctx->cc_sec->ps_lock);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
+
+ clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+
+ if (atomic_dec_and_test(&ctx->cc_refcount)) {
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
+ } else {
+ hlist_del_init(&ctx->cc_cache);
+ }
}
/*
* return 1 if the context is dead.
*/
static
-int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
+int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
+ struct hlist_head *freelist)
{
if (cli_ctx_check_death(ctx)) {
if (freelist)
static inline
int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
- struct hlist_head *freelist)
+ struct hlist_head *freelist)
{
- LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- return ctx_check_death_pf(ctx, freelist);
+ return ctx_check_death_pf(ctx, freelist);
}
static inline
static
void ctx_list_destroy_pf(struct hlist_head *head)
{
- struct ptlrpc_cli_ctx *ctx;
+ struct ptlrpc_cli_ctx *ctx;
- while (!hlist_empty(head)) {
- ctx = hlist_entry(head->first, struct ptlrpc_cli_ctx, cc_cache);
+ while (!hlist_empty(head)) {
+ ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
+ cc_cache);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
+ &ctx->cc_flags) == 0);
- hlist_del_init(&ctx->cc_cache);
- ctx_destroy_pf(ctx->cc_sec, ctx);
- }
+ hlist_del_init(&ctx->cc_cache);
+ ctx_destroy_pf(ctx->cc_sec, ctx);
+ }
}
/****************************************
static
void gss_cli_ctx_die_pf(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(ctx->cc_sec);
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(ctx->cc_sec);
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- cli_ctx_expire(ctx);
+ cli_ctx_expire(ctx);
- spin_lock(&ctx->cc_sec->ps_lock);
+ spin_lock(&ctx->cc_sec->ps_lock);
- if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!hlist_unhashed(&ctx->cc_cache));
- LASSERT(atomic_read(&ctx->cc_refcount) > 1);
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
+ LASSERT(atomic_read(&ctx->cc_refcount) > 1);
- hlist_del_init(&ctx->cc_cache);
- if (atomic_dec_and_test(&ctx->cc_refcount))
- LBUG();
- }
+ hlist_del_init(&ctx->cc_cache);
+ if (atomic_dec_and_test(&ctx->cc_refcount))
+ LBUG();
+ }
- spin_unlock(&ctx->cc_sec->ps_lock);
+ spin_unlock(&ctx->cc_sec->ps_lock);
}
/****************************************
void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
struct ptlrpc_cli_ctx *new)
{
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash;
- ENTRY;
+ struct hlist_node __maybe_unused *pos, *next;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ HLIST_HEAD(freelist);
+ unsigned int hash;
+ ENTRY;
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
(__u64) new->cc_vcred.vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
- spin_lock(&gsec->gs_base.ps_lock);
+ spin_lock(&gsec->gs_base.ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[hash], cc_cache) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[hash], cc_cache) {
if (!ctx_match_pf(ctx, &new->cc_vcred))
continue;
}
ctx_enhash_pf(new, &gsec_pf->gsp_chash[hash]);
- atomic_inc(&gsec->gs_base.ps_busy);
- spin_unlock(&gsec->gs_base.ps_lock);
+ spin_unlock(&gsec->gs_base.ps_lock);
ctx_list_destroy_pf(&freelist);
EXIT;
static
void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
- struct hlist_head *freelist)
+ struct hlist_head *freelist)
{
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
- int i;
- ENTRY;
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node __maybe_unused *pos;
+ struct hlist_node *next;
+ int i;
+ ENTRY;
sec = &gsec_pf->gsp_base.gs_base;
CDEBUG(D_SEC, "do gc on sec %s@%p\n", sec->ps_policy->sp_name, sec);
for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_cache)
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i], cc_cache)
ctx_check_death_locked_pf(ctx, freelist);
}
static
struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx,
- __u32 flavor,
- unsigned long flags)
+ struct sptlrpc_flavor *sf)
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
- if (ctx || flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
+ if (ctx ||
+ sf->sf_flags & (PTLRPC_SEC_FL_ROOTONLY | PTLRPC_SEC_FL_REVERSE))
hash_size = 1;
else
hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
alloc_size = sizeof(*gsec_pf) +
- sizeof(struct hlist_head) * hash_size;
+ sizeof(struct hlist_head) * hash_size;
OBD_ALLOC(gsec_pf, alloc_size);
if (!gsec_pf)
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
- CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+ INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
- imp, ctx, flavor, flags))
+ imp, ctx, sf))
goto err_free;
if (ctx == NULL) {
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
- sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
+ sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
}
static
struct vfs_cred *vcred,
int create, int remove_dead)
{
- struct gss_sec *gsec;
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
- struct hlist_head *hash_head;
- struct hlist_node *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash, gc = 0, found = 0;
- ENTRY;
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
+ struct hlist_head *hash_head;
+ struct hlist_node __maybe_unused *pos, *next;
+ unsigned int hash, gc = 0, found = 0;
+ HLIST_HEAD(freelist);
+ ENTRY;
- might_sleep();
+ might_sleep();
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
hash = ctx_hash_index(gsec_pf->gsp_chash_size,
(__u64) vcred->vc_uid);
LASSERT(hash < gsec_pf->gsp_chash_size);
retry:
- spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
/* gc_next == 0 means never do gc */
if (remove_dead && sec->ps_gc_next &&
gc = 1;
}
- hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
ctx_check_death_locked_pf(ctx,
remove_dead ? &freelist : NULL))
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- hlist_add_head(&new->cc_cache, &freelist);
+ hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
if (hash_head->first != &ctx->cc_cache) {
- __hlist_del(&ctx->cc_cache);
- hlist_add_head(&ctx->cc_cache, hash_head);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
if (sec_is_reverse(sec)) {
- spin_unlock(&sec->ps_lock);
- RETURN(NULL);
- }
-
- if (new) {
- ctx_enhash_pf(new, hash_head);
- ctx = new;
- } else if (create) {
- spin_unlock(&sec->ps_lock);
- new = ctx_create_pf(sec, vcred);
- if (new) {
- clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
- goto retry;
- }
- } else
- ctx = NULL;
- }
-
- /* hold a ref */
- if (ctx)
- atomic_inc(&ctx->cc_refcount);
-
- spin_unlock(&sec->ps_lock);
-
- /* the allocator of the context must give the first push to refresh */
- if (new) {
- LASSERT(new == ctx);
- gss_cli_ctx_refresh_pf(new);
- }
-
- ctx_list_destroy_pf(&freelist);
- RETURN(ctx);
+ spin_unlock(&sec->ps_lock);
+ RETURN(NULL);
+ }
+
+ if (new) {
+ ctx_enhash_pf(new, hash_head);
+ ctx = new;
+ } else if (create) {
+ spin_unlock(&sec->ps_lock);
+ new = ctx_create_pf(sec, vcred);
+ if (new) {
+ clear_bit(PTLRPC_CTX_NEW_BIT, &new->cc_flags);
+ goto retry;
+ }
+ } else {
+ ctx = NULL;
+ }
+ }
+
+ /* hold a ref */
+ if (ctx)
+ atomic_inc(&ctx->cc_refcount);
+
+ spin_unlock(&sec->ps_lock);
+
+ /* the allocator of the context must give the first push to refresh */
+ if (new) {
+ LASSERT(new == ctx);
+ gss_cli_ctx_refresh_pf(new);
+ }
+
+ ctx_list_destroy_pf(&freelist);
+ RETURN(ctx);
}
static
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(hlist_unhashed(&ctx->cc_cache));
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
- * rpcs during destroy procedure.
- */
+ * rpcs during destroy procedure. */
if (!sync)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
/* destroy this context */
ctx_destroy_pf(sec, ctx);
uid_t uid,
int grace, int force)
{
- struct gss_sec *gsec;
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx;
- struct hlist_node *pos, *next;
- CFS_HLIST_HEAD(freelist);
- int i, busy = 0;
- ENTRY;
-
- might_sleep_if(grace);
-
- gsec = container_of(sec, struct gss_sec, gs_base);
- gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
-
- spin_lock(&sec->ps_lock);
- for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_pf->gsp_chash[i], cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
-
- if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
- continue;
-
- if (atomic_read(&ctx->cc_refcount) > 1) {
- busy++;
- if (!force)
- continue;
-
- CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
- "grace %d\n",
- atomic_read(&ctx->cc_refcount),
- ctx, ctx->cc_vcred.vc_uid,
- sec2target_str(ctx->cc_sec), grace);
- }
- ctx_unhash_pf(ctx, &freelist);
-
- set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT,
- &ctx->cc_flags);
- }
- }
- spin_unlock(&sec->ps_lock);
-
- ctx_list_destroy_pf(&freelist);
- RETURN(busy);
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node __maybe_unused *pos, *next;
+ HLIST_HEAD(freelist);
+ int i, busy = 0;
+ ENTRY;
+
+ might_sleep_if(grace);
+
+ gsec = container_of(sec, struct gss_sec, gs_base);
+ gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
+
+ spin_lock(&sec->ps_lock);
+ for (i = 0; i < gsec_pf->gsp_chash_size; i++) {
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_pf->gsp_chash[i],
+ cc_cache) {
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+
+ if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
+ continue;
+
+ if (atomic_read(&ctx->cc_refcount) > 1) {
+ busy++;
+ if (!force)
+ continue;
+
+ CWARN("flush busy(%d) ctx %p(%u->%s) by force, "
+ "grace %d\n",
+ atomic_read(&ctx->cc_refcount),
+ ctx, ctx->cc_vcred.vc_uid,
+ sec2target_str(ctx->cc_sec), grace);
+ }
+ ctx_unhash_pf(ctx, &freelist);
+
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT,
+ &ctx->cc_flags);
+ }
+ }
+ spin_unlock(&sec->ps_lock);
+
+ ctx_list_destroy_pf(&freelist);
+ RETURN(busy);
}
/****************************************
int gss_svc_install_rctx_pf(struct obd_import *imp,
struct ptlrpc_svc_ctx *ctx)
{
- struct gss_sec *gsec;
+ struct ptlrpc_sec *sec;
+ int rc;
- LASSERT(imp->imp_sec);
- LASSERT(ctx);
+ sec = sptlrpc_import_sec_ref(imp);
+ LASSERT(sec);
+ rc = gss_install_rvs_cli_ctx_pf(sec2gsec(sec), ctx);
- gsec = container_of(imp->imp_sec, struct gss_sec, gs_base);
- return gss_install_rvs_cli_ctx_pf(gsec, ctx);
+ sptlrpc_sec_put(sec);
+ return rc;
}
/****************************************
};
struct gss_upcall_msg {
- struct rpc_pipe_msg gum_base;
- atomic_t gum_refcount;
- struct list_head gum_list;
- __u32 gum_mechidx;
- struct gss_sec *gum_gsec;
- struct gss_cli_ctx *gum_gctx;
- struct gss_upcall_msg_data gum_data;
+ struct rpc_pipe_msg gum_base;
+ atomic_t gum_refcount;
+ struct list_head gum_list;
+ __u32 gum_mechidx;
+ struct gss_sec *gum_gsec;
+ struct gss_cli_ctx *gum_gctx;
+ struct gss_upcall_msg_data gum_data;
};
static atomic_t upcall_seq = ATOMIC_INIT(0);
static inline
__u32 upcall_get_sequence(void)
{
- return (__u32) atomic_inc_return(&upcall_seq);
+ return (__u32) atomic_inc_return(&upcall_seq);
}
enum mech_idx_t {
static inline
void upcall_list_lock(int idx)
{
- spin_lock(&upcall_locks[idx]);
+ spin_lock(&upcall_locks[idx]);
}
static inline
void upcall_list_unlock(int idx)
{
- spin_unlock(&upcall_locks[idx]);
+ spin_unlock(&upcall_locks[idx]);
}
static
void upcall_msg_enlist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- list_add(&msg->gum_list, &upcall_lists[idx]);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_add(&msg->gum_list, &upcall_lists[idx]);
+ upcall_list_unlock(idx);
}
static
void upcall_msg_delist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- list_del_init(&msg->gum_list);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_del_init(&msg->gum_list);
+ upcall_list_unlock(idx);
}
/****************************************
static
void gss_release_msg(struct gss_upcall_msg *gmsg)
{
- ENTRY;
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ ENTRY;
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
- EXIT;
- return;
- }
+ if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
+ EXIT;
+ return;
+ }
if (gmsg->gum_gctx) {
sptlrpc_cli_ctx_wakeup(&gmsg->gum_gctx->gc_base);
gmsg->gum_gctx = NULL;
}
- LASSERT(list_empty(&gmsg->gum_list));
- LASSERT(list_empty(&gmsg->gum_base.list));
- OBD_FREE_PTR(gmsg);
- EXIT;
+ LASSERT(list_empty(&gmsg->gum_list));
+ LASSERT(list_empty(&gmsg->gum_base.list));
+ OBD_FREE_PTR(gmsg);
+ EXIT;
}
static
void gss_unhash_msg_nolock(struct gss_upcall_msg *gmsg)
{
- __u32 idx = gmsg->gum_mechidx;
+ __u32 idx = gmsg->gum_mechidx;
- LASSERT(idx < MECH_MAX);
- LASSERT_SPIN_LOCKED(&upcall_locks[idx]);
+ LASSERT(idx < MECH_MAX);
+ assert_spin_locked(&upcall_locks[idx]);
- if (list_empty(&gmsg->gum_list))
- return;
+ if (list_empty(&gmsg->gum_list))
+ return;
- list_del_init(&gmsg->gum_list);
- LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
- atomic_dec(&gmsg->gum_refcount);
+ list_del_init(&gmsg->gum_list);
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
+ atomic_dec(&gmsg->gum_refcount);
}
static
static
void gss_msg_fail_ctx(struct gss_upcall_msg *gmsg)
{
- if (gmsg->gum_gctx) {
- struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
+ if (gmsg->gum_gctx) {
+ struct ptlrpc_cli_ctx *ctx = &gmsg->gum_gctx->gc_base;
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- sptlrpc_cli_ctx_expire(ctx);
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
- }
+ LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ sptlrpc_cli_ctx_expire(ctx);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ }
}
static
struct gss_upcall_msg * gss_find_upcall(__u32 mechidx, __u32 seq)
{
- struct gss_upcall_msg *gmsg;
-
- upcall_list_lock(mechidx);
- list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
- if (gmsg->gum_data.gum_seq != seq)
- continue;
-
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- LASSERT(gmsg->gum_mechidx == mechidx);
-
- atomic_inc(&gmsg->gum_refcount);
- upcall_list_unlock(mechidx);
- return gmsg;
- }
- upcall_list_unlock(mechidx);
- return NULL;
+ struct gss_upcall_msg *gmsg;
+
+ upcall_list_lock(mechidx);
+ list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ if (gmsg->gum_data.gum_seq != seq)
+ continue;
+
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+ LASSERT(gmsg->gum_mechidx == mechidx);
+
+ atomic_inc(&gmsg->gum_refcount);
+ upcall_list_unlock(mechidx);
+ return gmsg;
+ }
+ upcall_list_unlock(mechidx);
+ return NULL;
}
static
if (mlen > buflen)
mlen = buflen;
- left = copy_to_user(dst, data, mlen);
+ left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
RETURN(left);
static
ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
{
- struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct gss_upcall_msg *gss_msg;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx = NULL;
if (!buf)
RETURN(-ENOMEM);
- if (copy_from_user(buf, src, mlen)) {
+ if (copy_from_user(buf, src, mlen)) {
CERROR("failed copy user space data\n");
GOTO(out_free, rc = -EFAULT);
}
GOTO(out_free, rc = -EINVAL);
}
- gss_unhash_msg(gss_msg);
- gctx = gss_msg->gum_gctx;
- LASSERT(gctx);
- LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
+ gss_unhash_msg(gss_msg);
+ gctx = gss_msg->gum_gctx;
+ LASSERT(gctx);
+ LASSERT(atomic_read(&gctx->gc_base.cc_refcount) > 0);
/* timeout is not in use for now */
if (simple_get_bytes(&data, &datalen, &timeout, sizeof(timeout)))
ctx = &gctx->gc_base;
sptlrpc_cli_ctx_expire(ctx);
if (rc != -ERESTART || gss_err != GSS_S_COMPLETE)
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
CERROR("refresh ctx %p(uid %d) failed: %d/0x%08x: %s\n",
ctx, ctx->cc_vcred.vc_uid, rc, gss_err,
- test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
+ test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags) ?
"fatal error" : "non-fatal");
}
OBD_FREE(buf, mlen);
/* FIXME
* hack pipefs: always return asked length unless all following
- * downcalls might be messed up.
- */
+ * downcalls might be messed up. */
rc = mlen;
RETURN(rc);
}
static cfs_time_t ratelimit = 0;
ENTRY;
- LASSERT(list_empty(&msg->list));
+ LASSERT(list_empty(&msg->list));
/* normally errno is >= 0 */
if (msg->errno >= 0) {
return;
}
- gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
- gumd = &gmsg->gum_data;
- LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
-
- CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
- "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
- gumd->gum_nid, (int) sizeof(gumd->gum_obd),
- gumd->gum_obd, msg->errno);
-
- atomic_inc(&gmsg->gum_refcount);
- gss_unhash_msg(gmsg);
- if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
- cfs_time_t now = cfs_time_current_sec();
-
- if (cfs_time_after(now, ratelimit)) {
- CWARN("upcall timed out, is lgssd running?\n");
- ratelimit = now + 15;
- }
- }
- gss_msg_fail_ctx(gmsg);
- gss_release_msg(gmsg);
- EXIT;
+ gmsg = container_of(msg, struct gss_upcall_msg, gum_base);
+ gumd = &gmsg->gum_data;
+ LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
+
+ CERROR("failed msg %p (seq %u, uid %u, svc %u, nid %#llx, obd %.*s): "
+ "errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
+ gumd->gum_nid, (int) sizeof(gumd->gum_obd),
+ gumd->gum_obd, msg->errno);
+
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg(gmsg);
+ if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
+ cfs_time_t now = cfs_time_current_sec();
+
+ if (cfs_time_after(now, ratelimit)) {
+ CWARN("upcall timed out, is lgssd running?\n");
+ ratelimit = now + 15;
+ }
+ }
+ gss_msg_fail_ctx(gmsg);
+ gss_release_msg(gmsg);
+ EXIT;
}
static
void gss_pipe_release(struct inode *inode)
{
- struct rpc_inode *rpci = RPC_I(inode);
- __u32 idx;
- ENTRY;
+ struct rpc_inode *rpci = RPC_I(inode);
+ __u32 idx;
+ ENTRY;
- idx = (__u32) (long) rpci->private;
- LASSERT(idx < MECH_MAX);
+ idx = (__u32) (long) rpci->private;
+ LASSERT(idx < MECH_MAX);
- upcall_list_lock(idx);
- while (!list_empty(&upcall_lists[idx])) {
- struct gss_upcall_msg *gmsg;
- struct gss_upcall_msg_data *gumd;
+ upcall_list_lock(idx);
+ while (!list_empty(&upcall_lists[idx])) {
+ struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg_data *gumd;
- gmsg = list_entry(upcall_lists[idx].next,
- struct gss_upcall_msg, gum_list);
- gumd = &gmsg->gum_data;
- LASSERT(list_empty(&gmsg->gum_base.list));
+ gmsg = list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
+ gumd = &gmsg->gum_data;
+ LASSERT(list_empty(&gmsg->gum_base.list));
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
- "nid "LPX64", obd %.*s\n", gmsg,
+ "nid %#llx, obd %.*s\n", gmsg,
gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd);
- gmsg->gum_base.errno = -EPIPE;
- atomic_inc(&gmsg->gum_refcount);
- gss_unhash_msg_nolock(gmsg);
+ gmsg->gum_base.errno = -EPIPE;
+ atomic_inc(&gmsg->gum_refcount);
+ gss_unhash_msg_nolock(gmsg);
- gss_msg_fail_ctx(gmsg);
+ gss_msg_fail_ctx(gmsg);
- upcall_list_unlock(idx);
- gss_release_msg(gmsg);
- upcall_list_lock(idx);
- }
- upcall_list_unlock(idx);
- EXIT;
+ upcall_list_unlock(idx);
+ gss_release_msg(gmsg);
+ upcall_list_lock(idx);
+ }
+ upcall_list_unlock(idx);
+ EXIT;
}
static struct rpc_pipe_ops gss_upcall_ops = {
static
int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
- struct obd_import *imp;
- struct gss_sec *gsec;
- struct gss_upcall_msg *gmsg;
- int rc = 0;
- ENTRY;
+ struct obd_import *imp;
+ struct gss_sec *gsec;
+ struct gss_upcall_msg *gmsg;
+ int rc = 0;
+ ENTRY;
- might_sleep();
+ might_sleep();
- LASSERT(ctx->cc_sec);
- LASSERT(ctx->cc_sec->ps_import);
- LASSERT(ctx->cc_sec->ps_import->imp_obd);
+ LASSERT(ctx->cc_sec);
+ LASSERT(ctx->cc_sec->ps_import);
+ LASSERT(ctx->cc_sec->ps_import->imp_obd);
imp = ctx->cc_sec->ps_import;
if (!imp->imp_connection) {
RETURN(-ENOMEM);
/* initialize pipefs base msg */
- CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
+ INIT_LIST_HEAD(&gmsg->gum_base.list);
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
gmsg->gum_base.errno = 0;
- /* init upcall msg */
- atomic_set(&gmsg->gum_refcount, 1);
- gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
- gmsg->gum_gsec = gsec;
- gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
- struct gss_cli_ctx, gc_base);
- gmsg->gum_data.gum_seq = upcall_get_sequence();
- gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
- gmsg->gum_data.gum_gid = 0; /* not used for now */
- gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
- gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
- strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
- sizeof(gmsg->gum_data.gum_obd));
+ /* init upcall msg */
+ atomic_set(&gmsg->gum_refcount, 1);
+ gmsg->gum_mechidx = mech_name2idx(gsec->gs_mech->gm_name);
+ gmsg->gum_gsec = gsec;
+ gmsg->gum_gctx = container_of(sptlrpc_cli_ctx_get(ctx),
+ struct gss_cli_ctx, gc_base);
+ gmsg->gum_data.gum_seq = upcall_get_sequence();
+ gmsg->gum_data.gum_uid = ctx->cc_vcred.vc_uid;
+ gmsg->gum_data.gum_gid = 0; /* not used for now */
+ gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
+ gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
+ strlcpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
+ sizeof(gmsg->gum_data.gum_obd));
/* This only could happen when sysadmin set it dead/expired
- * using lctl by force.
- */
+ * using lctl by force. */
if (ctx->cc_flags & PTLRPC_CTX_STATUS_MASK) {
CWARN("ctx %p(%u->%s) was set flags %lx unexpectedly\n",
ctx, ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec),
int gss_cli_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
{
/* if we are refreshing for root, also update the reverse
- * handle index, do not confuse reverse contexts.
- */
+ * handle index, do not confuse reverse contexts. */
if (ctx->cc_vcred.vc_uid == 0) {
struct gss_sec *gsec;
static struct ptlrpc_sec_cops gss_sec_pipefs_cops = {
.create_sec = gss_sec_create_pf,
.destroy_sec = gss_sec_destroy_pf,
+ .kill_sec = gss_sec_kill,
.lookup_ctx = gss_sec_lookup_ctx_pf,
.release_ctx = gss_sec_release_ctx_pf,
.flush_ctx_cache = gss_sec_flush_ctx_cache_pf,
CERROR("Failed to create gss pipe dir: %ld\n", PTR_ERR(de));
return PTR_ERR(de);
}
- /* FIXME
- * hack pipefs: dput will sometimes cause oops during module unload
- * and lgssd close the pipe fds.
- */
- //dput(de);
+
+ /* FIXME hack pipefs: dput will sometimes cause oops during module
+ * unload and lgssd close the pipe fds. */
/* krb5 mechanism */
de = rpc_mkpipe(LUSTRE_PIPE_KRB5, (void *) MECH_KRB5, &gss_upcall_ops,
}
de_pipes[MECH_KRB5] = de;
- CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
- upcall_locks[MECH_KRB5] = SPIN_LOCK_UNLOCKED;
+ INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ spin_lock_init(&upcall_locks[MECH_KRB5]);
- return 0;
+ return 0;
}
static
void __exit gss_exit_pipefs_upcall(void)
{
- __u32 i;
+ __u32 i;
- for (i = 0; i < MECH_MAX; i++) {
- LASSERT(list_empty(&upcall_lists[i]));
- /*
- * dput pipe dentry here might cause lgssd oops.
- */
- //dput(de_pipes[i]);
- de_pipes[i] = NULL;
- }
+ for (i = 0; i < MECH_MAX; i++) {
+ LASSERT(list_empty(&upcall_lists[i]));
+
+ /* dput pipe dentry here might cause lgssd oops. */
+ de_pipes[i] = NULL;
+ }
- rpc_unlink(LUSTRE_PIPE_KRB5);
- rpc_rmdir(LUSTRE_PIPE_ROOT);
+ rpc_unlink(LUSTRE_PIPE_KRB5);
+ rpc_rmdir(LUSTRE_PIPE_ROOT);
}
int __init gss_init_pipefs(void)
void __exit gss_exit_pipefs(void)
{
- gss_exit_pipefs_upcall();
- sptlrpc_unregister_policy(&gss_policy_pipefs);
+ gss_exit_pipefs_upcall();
+ sptlrpc_unregister_policy(&gss_policy_pipefs);
}