* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/key-type.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
LASSERT(timer);
CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * HZ + cfs_time_current();
+ timeout = msecs_to_jiffies(timeout * MSEC_PER_SEC) +
+ cfs_time_current();
init_timer(timer);
timer->expires = timeout;
atomic_inc(&ctx->cc_refcount);
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
gsec_kr->gsk_root_ctx = ctx;
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
/*
* caller should hold one ref on contexts in freelist.
*/
-static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
+static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
- cfs_hlist_node_t *pos, *next;
- struct ptlrpc_cli_ctx *ctx;
- struct gss_cli_ctx *gctx;
+ struct hlist_node __maybe_unused *pos, *next;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
- cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
- cfs_hlist_del_init(&ctx->cc_cache);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
+ hlist_del_init(&ctx->cc_cache);
/* reverse ctx: update current seq to buddy svcctx if exist.
* ideally this should be done at gss_cli_ctx_finalize(), but
ctx = gsec_kr->gsk_root_ctx;
if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
- cfs_hlist_node_t *node;
- struct ptlrpc_cli_ctx *tmp;
+ struct hlist_node __maybe_unused *node;
+ struct ptlrpc_cli_ctx *tmp;
/* reverse ctx, search root ctx in list, choose the one
* with shortest expire time, which is most possibly have
if (ctx) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
atomic_inc(&ctx->cc_refcount);
}
struct ptlrpc_cli_ctx *new_ctx,
struct key *key)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- cfs_hlist_node_t *hnode;
- struct ptlrpc_cli_ctx *ctx;
- cfs_time_t now;
- ENTRY;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_node __maybe_unused *hnode;
+ struct ptlrpc_cli_ctx *ctx;
+ cfs_time_t now;
+ ENTRY;
LASSERT(sec_is_reverse(sec));
if (gsec_kr == NULL)
RETURN(NULL);
- CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
+ INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
- LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
gss_sec_destroy_common(gsec);
* flush context of root or all, we iterate through the list.
*/
static
-void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
+void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
+ int force)
{
- struct gss_sec_keyring *gsec_kr;
- cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
- cfs_hlist_node_t *pos, *next;
- struct ptlrpc_cli_ctx *ctx;
- ENTRY;
+ struct gss_sec_keyring *gsec_kr;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_node __maybe_unused *pos, *next;
+ struct ptlrpc_cli_ctx *ctx;
+ ENTRY;
gsec_kr = sec2gsec_keyring(sec);
atomic_inc(&ctx->cc_refcount);
if (ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
atomic_dec(&ctx->cc_refcount);
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
- cfs_hlist_node_t *pos, *next;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
+ struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
atomic_inc(&ctx->cc_refcount);
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
static
int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- cfs_hlist_node_t *pos, *next;
- struct ptlrpc_cli_ctx *ctx;
- struct gss_cli_ctx *gctx;
- time_t now = cfs_time_current_sec();
- ENTRY;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct hlist_node __maybe_unused *pos, *next;
+ struct ptlrpc_cli_ctx *ctx;
+ struct gss_cli_ctx *gctx;
+ time_t now = cfs_time_current_sec();
+ ENTRY;
spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
+ &gsec_kr->gsk_clist, cc_cache) {
struct key *key;
char flags_str[40];
char mech[40];
RETURN(-EINVAL);
}
- if (key->payload.data != 0) {
+ if (key->payload.data != NULL) {
CERROR("key already have payload\n");
RETURN(-EINVAL);
}