*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/atomic.h>
struct rpc_clnt; /* for rpc_pipefs */
#include <linux/sunrpc/rpc_pipe_fs.h>
-#else
-#include <liblustre.h>
-#endif
+#include <libcfs/linux/linux-list.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
-#include <lustre/lustre_idl.h>
+#include <uapi/linux/lustre/lustre_idl.h>
#include <lustre_sec.h>
#include <lustre_net.h>
#include <lustre_import.h>
}
/****************************************
- * internel context helpers *
+ * internal context helpers *
****************************************/
static
}
static
-void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *hash)
+void ctx_enhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *hash)
{
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
atomic_inc(&ctx->cc_refcount);
- cfs_hlist_add_head(&ctx->cc_cache, hash);
+ hlist_add_head(&ctx->cc_cache, hash);
}
/*
* caller must hold spinlock
*/
static
-void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, cfs_hlist_head_t *freelist)
+void ctx_unhash_pf(struct ptlrpc_cli_ctx *ctx, struct hlist_head *freelist)
{
assert_spin_locked(&ctx->cc_sec->ps_lock);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
if (atomic_dec_and_test(&ctx->cc_refcount)) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, freelist);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, freelist);
} else {
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
}
}
*/
static
int ctx_check_death_pf(struct ptlrpc_cli_ctx *ctx,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
if (cli_ctx_check_death(ctx)) {
if (freelist)
static inline
int ctx_check_death_locked_pf(struct ptlrpc_cli_ctx *ctx,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
LASSERT(ctx->cc_sec);
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
}
static
-void ctx_list_destroy_pf(cfs_hlist_head_t *head)
+void ctx_list_destroy_pf(struct hlist_head *head)
{
struct ptlrpc_cli_ctx *ctx;
- while (!cfs_hlist_empty(head)) {
+ while (!hlist_empty(head)) {
ctx = cfs_hlist_entry(head->first, struct ptlrpc_cli_ctx,
cc_cache);
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT,
&ctx->cc_flags) == 0);
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
ctx_destroy_pf(ctx->cc_sec, ctx);
}
}
spin_lock(&ctx->cc_sec->ps_lock);
if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags)) {
- LASSERT(!cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(!hlist_unhashed(&ctx->cc_cache));
LASSERT(atomic_read(&ctx->cc_refcount) > 1);
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
if (atomic_dec_and_test(&ctx->cc_refcount))
LBUG();
}
void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
struct ptlrpc_cli_ctx *new)
{
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx;
- cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash;
- ENTRY;
+ struct hlist_node __maybe_unused *pos, *next;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ HLIST_HEAD(freelist);
+ unsigned int hash;
+ ENTRY;
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
static
void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
- cfs_hlist_head_t *freelist)
+ struct hlist_head *freelist)
{
- struct ptlrpc_sec *sec;
- struct ptlrpc_cli_ctx *ctx;
- cfs_hlist_node_t *pos, *next;
- int i;
- ENTRY;
+ struct ptlrpc_sec *sec;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node __maybe_unused *pos;
+ struct hlist_node *next;
+ int i;
+ ENTRY;
sec = &gsec_pf->gsp_base.gs_base;
ctx_check_death_locked_pf(ctx, freelist);
}
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- EXIT;
+ sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
+ EXIT;
}
static
hash_size = GSS_SEC_PIPEFS_CTX_HASH_SIZE;
alloc_size = sizeof(*gsec_pf) +
- sizeof(cfs_hlist_head_t) * hash_size;
+ sizeof(struct hlist_head) * hash_size;
OBD_ALLOC(gsec_pf, alloc_size);
if (!gsec_pf)
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
- CFS_INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
+ INIT_HLIST_HEAD(&gsec_pf->gsp_chash[i]);
if (gss_sec_create_common(&gsec_pf->gsp_base, &gss_policy_pipefs,
imp, ctx, sf))
gss_sec_destroy_common(gsec);
OBD_FREE(gsec, sizeof(*gsec_pf) +
- sizeof(cfs_hlist_head_t) * gsec_pf->gsp_chash_size);
+ sizeof(struct hlist_head) * gsec_pf->gsp_chash_size);
}
static
struct vfs_cred *vcred,
int create, int remove_dead)
{
- struct gss_sec *gsec;
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
- cfs_hlist_head_t *hash_head;
- cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
- unsigned int hash, gc = 0, found = 0;
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx = NULL, *new = NULL;
+ struct hlist_head *hash_head;
+ struct hlist_node __maybe_unused *pos, *next;
+ unsigned int hash, gc = 0, found = 0;
+ HLIST_HEAD(freelist);
ENTRY;
might_sleep();
retry:
spin_lock(&sec->ps_lock);
- /* gc_next == 0 means never do gc */
- if (remove_dead && sec->ps_gc_next &&
- cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
- gss_ctx_cache_gc_pf(gsec_pf, &freelist);
- gc = 1;
- }
+ /* gc_next == 0 means never do gc */
+ if (remove_dead && sec->ps_gc_next &&
+ (ktime_get_real_seconds() > sec->ps_gc_next)) {
+ gss_ctx_cache_gc_pf(gsec_pf, &freelist);
+ gc = 1;
+ }
cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
if (found) {
if (new && new != ctx) {
/* lost the race, just free it */
- cfs_hlist_add_head(&new->cc_cache, &freelist);
+ hlist_add_head(&new->cc_cache, &freelist);
new = NULL;
}
/* hot node, move to head */
if (hash_head->first != &ctx->cc_cache) {
- __cfs_hlist_del(&ctx->cc_cache);
- cfs_hlist_add_head(&ctx->cc_cache, hash_head);
+ __hlist_del(&ctx->cc_cache);
+ hlist_add_head(&ctx->cc_cache, hash_head);
}
} else {
/* don't allocate for reverse sec */
int sync)
{
LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
- LASSERT(cfs_hlist_unhashed(&ctx->cc_cache));
+ LASSERT(hlist_unhashed(&ctx->cc_cache));
/* if required async, we must clear the UPTODATE bit to prevent extra
* rpcs during destroy procedure. */
uid_t uid,
int grace, int force)
{
- struct gss_sec *gsec;
- struct gss_sec_pipefs *gsec_pf;
- struct ptlrpc_cli_ctx *ctx;
- cfs_hlist_node_t *pos, *next;
- CFS_HLIST_HEAD(freelist);
+ struct gss_sec *gsec;
+ struct gss_sec_pipefs *gsec_pf;
+ struct ptlrpc_cli_ctx *ctx;
+ struct hlist_node __maybe_unused *pos, *next;
+ HLIST_HEAD(freelist);
int i, busy = 0;
ENTRY;
struct gss_upcall_msg {
struct rpc_pipe_msg gum_base;
atomic_t gum_refcount;
- cfs_list_t gum_list;
+ struct list_head gum_list;
__u32 gum_mechidx;
struct gss_sec *gum_gsec;
struct gss_cli_ctx *gum_gctx;
/* pipefs dentries for each mechanisms */
static struct dentry *de_pipes[MECH_MAX] = { NULL, };
/* all upcall messgaes linked here */
-static cfs_list_t upcall_lists[MECH_MAX];
+static struct list_head upcall_lists[MECH_MAX];
/* and protected by this */
static spinlock_t upcall_locks[MECH_MAX];
static
void upcall_msg_enlist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- cfs_list_add(&msg->gum_list, &upcall_lists[idx]);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_add(&msg->gum_list, &upcall_lists[idx]);
+ upcall_list_unlock(idx);
}
static
void upcall_msg_delist(struct gss_upcall_msg *msg)
{
- __u32 idx = msg->gum_mechidx;
+ __u32 idx = msg->gum_mechidx;
- upcall_list_lock(idx);
- cfs_list_del_init(&msg->gum_list);
- upcall_list_unlock(idx);
+ upcall_list_lock(idx);
+ list_del_init(&msg->gum_list);
+ upcall_list_unlock(idx);
}
/****************************************
gmsg->gum_gctx = NULL;
}
- LASSERT(cfs_list_empty(&gmsg->gum_list));
- LASSERT(cfs_list_empty(&gmsg->gum_base.list));
- OBD_FREE_PTR(gmsg);
- EXIT;
+ LASSERT(list_empty(&gmsg->gum_list));
+ LASSERT(list_empty(&gmsg->gum_base.list));
+ OBD_FREE_PTR(gmsg);
+ EXIT;
}
static
LASSERT(idx < MECH_MAX);
assert_spin_locked(&upcall_locks[idx]);
- if (cfs_list_empty(&gmsg->gum_list))
+ if (list_empty(&gmsg->gum_list))
return;
- cfs_list_del_init(&gmsg->gum_list);
+ list_del_init(&gmsg->gum_list);
LASSERT(atomic_read(&gmsg->gum_refcount) > 1);
atomic_dec(&gmsg->gum_refcount);
}
struct gss_upcall_msg *gmsg;
upcall_list_lock(mechidx);
- cfs_list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
+ list_for_each_entry(gmsg, &upcall_lists[mechidx], gum_list) {
if (gmsg->gum_data.gum_seq != seq)
continue;
static
int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
{
- if (*buflen < reslen) {
- CERROR("buflen %u < %u\n", *buflen, reslen);
- return -EINVAL;
- }
+ if (*buflen < reslen) {
+ CERROR("shorter buflen than needed: %u < %u\n",
+ *buflen, reslen);
+ return -EINVAL;
+ }
- memcpy(res, *buf, reslen);
- *buf += reslen;
- *buflen -= reslen;
- return 0;
+ memcpy(res, *buf, reslen);
+ *buf += reslen;
+ *buflen -= reslen;
+ return 0;
}
/****************************************
static
ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
{
- struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct gss_upcall_msg *gss_msg;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx = NULL;
{
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
- static cfs_time_t ratelimit = 0;
+ static time64_t ratelimit;
ENTRY;
- LASSERT(cfs_list_empty(&msg->list));
+ LASSERT(list_empty(&msg->list));
/* normally errno is >= 0 */
if (msg->errno >= 0) {
gumd = &gmsg->gum_data;
LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
+ CERROR("failed msg %p (seq %u, uid %u, svc %u, nid %#llx, obd %.*s): "
"errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd, msg->errno);
atomic_inc(&gmsg->gum_refcount);
gss_unhash_msg(gmsg);
if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
- cfs_time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
- if (cfs_time_after(now, ratelimit)) {
+ if (now > ratelimit) {
CWARN("upcall timed out, is lgssd running?\n");
ratelimit = now + 15;
}
static
void gss_pipe_release(struct inode *inode)
{
- struct rpc_inode *rpci = RPC_I(inode);
- __u32 idx;
- ENTRY;
+ struct rpc_inode *rpci = RPC_I(inode);
+ __u32 idx;
+ ENTRY;
- idx = (__u32) (long) rpci->private;
- LASSERT(idx < MECH_MAX);
+ idx = (__u32) (long) rpci->private;
+ LASSERT(idx < MECH_MAX);
- upcall_list_lock(idx);
- while (!cfs_list_empty(&upcall_lists[idx])) {
- struct gss_upcall_msg *gmsg;
- struct gss_upcall_msg_data *gumd;
+ upcall_list_lock(idx);
+ while (!list_empty(&upcall_lists[idx])) {
+ struct gss_upcall_msg *gmsg;
+ struct gss_upcall_msg_data *gumd;
- gmsg = cfs_list_entry(upcall_lists[idx].next,
- struct gss_upcall_msg, gum_list);
- gumd = &gmsg->gum_data;
- LASSERT(cfs_list_empty(&gmsg->gum_base.list));
+ gmsg = list_entry(upcall_lists[idx].next,
+ struct gss_upcall_msg, gum_list);
+ gumd = &gmsg->gum_data;
+ LASSERT(list_empty(&gmsg->gum_base.list));
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
- "nid "LPX64", obd %.*s\n", gmsg,
+ "nid %#llx, obd %.*s\n", gmsg,
gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd);
RETURN(-ENOMEM);
/* initialize pipefs base msg */
- CFS_INIT_LIST_HEAD(&gmsg->gum_base.list);
+ INIT_LIST_HEAD(&gmsg->gum_base.list);
gmsg->gum_base.data = &gmsg->gum_data;
gmsg->gum_base.len = sizeof(gmsg->gum_data);
gmsg->gum_base.copied = 0;
gmsg->gum_data.gum_gid = 0; /* not used for now */
gmsg->gum_data.gum_svc = import_to_gss_svc(imp);
gmsg->gum_data.gum_nid = imp->imp_connection->c_peer.nid;
- strncpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
+ strlcpy(gmsg->gum_data.gum_obd, imp->imp_obd->obd_name,
sizeof(gmsg->gum_data.gum_obd));
/* This only could happen when sysadmin set it dead/expired
}
de_pipes[MECH_KRB5] = de;
- CFS_INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
+ INIT_LIST_HEAD(&upcall_lists[MECH_KRB5]);
spin_lock_init(&upcall_locks[MECH_KRB5]);
return 0;
static
void __exit gss_exit_pipefs_upcall(void)
{
- __u32 i;
+ __u32 i;
- for (i = 0; i < MECH_MAX; i++) {
- LASSERT(cfs_list_empty(&upcall_lists[i]));
+ for (i = 0; i < MECH_MAX; i++) {
+ LASSERT(list_empty(&upcall_lists[i]));
- /* dput pipe dentry here might cause lgssd oops. */
- de_pipes[i] = NULL;
- }
+ /* dput pipe dentry here might cause lgssd oops. */
+ de_pipes[i] = NULL;
+ }
- rpc_unlink(LUSTRE_PIPE_KRB5);
- rpc_rmdir(LUSTRE_PIPE_ROOT);
+ rpc_unlink(LUSTRE_PIPE_KRB5);
+ rpc_rmdir(LUSTRE_PIPE_ROOT);
}
int __init gss_init_pipefs(void)
void __exit gss_exit_pipefs(void)
{
- gss_exit_pipefs_upcall();
- sptlrpc_unregister_policy(&gss_policy_pipefs);
+ gss_exit_pipefs_upcall();
+ sptlrpc_unregister_policy(&gss_policy_pipefs);
}