-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * Copyright (C) 2007 Cluster File Systems, Inc.
- * Author: Eric Mei <ericm@clusterfs.com>
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * This file is part of Lustre, http://www.lustre.org.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
*
- * Lustre is free software; you can redistribute it and/or
- * modify it under the terms of version 2 of the GNU General Public
- * License as published by the Free Software Foundation.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
*
- * Lustre is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * lustre/ptlrpc/gss/gss_keyring.c
*
- * You should have received a copy of the GNU General Public License
- * along with Lustre; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * Author: Eric Mei <ericm@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/crypto.h>
#include <linux/key.h>
#include <linux/keyctl.h>
static int sec_install_rctx_kr(struct ptlrpc_sec *sec,
struct ptlrpc_svc_ctx *svc_ctx);
-#ifndef task_aux
-#define task_aux(tsk) (tsk)
-#endif
-
/*
* the timeout is only for the case that upcall child process die abnormally.
- * in any other cases it should finally update kernel key. so we set this
- * timeout value excessive long.
+ * in any other cases it should finally update kernel key.
+ *
+ * FIXME we'd better to incorporate the client & server side upcall timeouts
+ * into the framework of Adaptive Timeouts, but we need to figure out how to
+ * make sure that kernel knows the upcall processes is in-progress or died
+ * unexpectedly.
*/
#define KEYRING_UPCALL_TIMEOUT (obd_timeout + obd_timeout)
tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
tsk->parent->comm, tsk->parent->pid, \
tsk->parent->uid, tsk->parent->fsuid, \
- task_aux(tsk)->request_key_auth ? \
- task_aux(tsk)->request_key_auth->serial : 0, \
- task_aux(tsk)->thread_keyring ? \
- task_aux(tsk)->thread_keyring->serial : 0, \
+ tsk->request_key_auth ? \
+ tsk->request_key_auth->serial : 0, \
+ tsk->thread_keyring ? \
+ tsk->thread_keyring->serial : 0, \
tsk->signal->process_keyring ? \
tsk->signal->process_keyring->serial : 0, \
tsk->signal->session_keyring ? \
tsk->user->uid_keyring->serial : 0, \
tsk->user->session_keyring ? \
tsk->user->session_keyring->serial : 0, \
- task_aux(tsk)->jit_keyring \
+ tsk->jit_keyring \
); \
}
static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_lock(&gsec_kr->gsk_uc_lock);
+ mutex_lock(&gsec_kr->gsk_uc_lock);
#endif
}
static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_unlock(&gsec_kr->gsk_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_uc_lock);
#endif
}
cli_ctx_expire(ctx);
key_revoke_locked(key);
- sptlrpc_cli_ctx_wakeup(ctx);
}
static
LASSERT(timer);
CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * HZ + cfs_time_current();
+ timeout = timeout * CFS_HZ + cfs_time_current();
init_timer(timer);
timer->expires = timeout;
}
ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
- clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount); /* for the caller */
+ clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+ cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
return ctx;
}
/* at this time the association with key has been broken. */
LASSERT(sec);
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&sec->ps_nctx) > 0);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(gctx_kr->gck_key == NULL);
ctx_clear_timer_kr(ctx);
OBD_FREE_PTR(gctx_kr);
- atomic_dec(&sec->ps_nctx);
+ cfs_atomic_dec(&sec->ps_nctx);
sptlrpc_sec_put(sec);
}
if (sync) {
ctx_destroy_kr(ctx);
} else {
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
sptlrpc_gc_add_ctx(ctx);
}
}
static void ctx_put_kr(struct ptlrpc_cli_ctx *ctx, int sync)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- if (atomic_dec_and_test(&ctx->cc_refcount))
+ if (cfs_atomic_dec_and_test(&ctx->cc_refcount))
ctx_release_kr(ctx, sync);
}
static inline void spin_lock_if(spinlock_t *lock, int condition)
{
- if (condition)
- spin_lock(lock);
+ if (condition)
+ spin_lock(lock);
}
static inline void spin_unlock_if(spinlock_t *lock, int condition)
{
- if (condition)
- spin_unlock(lock);
+ if (condition)
+ spin_unlock(lock);
}
static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- spin_lock_if(&sec->ps_lock, !locked);
+ spin_lock_if(&sec->ps_lock, !locked);
- atomic_inc(&ctx->cc_refcount);
- set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
- if (is_root)
- gsec_kr->gsk_root_ctx = ctx;
+ cfs_atomic_inc(&ctx->cc_refcount);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ if (is_root)
+ gsec_kr->gsk_root_ctx = ctx;
- spin_unlock_if(&sec->ps_lock, !locked);
+ spin_unlock_if(&sec->ps_lock, !locked);
}
/*
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
/* if hashed bit has gone, leave the job to somebody who is doing it */
- if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
/* drop ref inside spin lock to prevent race with other operations */
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- hlist_del_init(&ctx->cc_cache);
- atomic_dec(&ctx->cc_refcount);
+ cfs_hlist_del_init(&ctx->cc_cache);
+ cfs_atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
*/
static void bind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(atomic_read(&key->usage) > 0);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == NULL);
LASSERT(key->payload.data == NULL);
/* at this time context may or may not in list. */
key_get(key);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
ctx2gctx_keyring(ctx)->gck_key = key;
key->payload.data = ctx;
}
static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
LASSERT(key->payload.data == ctx);
- LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
/* must revoke the key, or others may treat it as newly created */
key_revoke_locked(key);
/*
* caller should hold one ref on contexts in freelist.
*/
-static void dispose_ctx_list_kr(struct hlist_head *freelist)
+static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
{
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
- hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
- hlist_del_init(&ctx->cc_cache);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next, freelist, cc_cache) {
+ cfs_hlist_del_init(&ctx->cc_cache);
/* reverse ctx: update current seq to buddy svcctx if exist.
* ideally this should be done at gss_cli_ctx_finalize(), but
if (!rawobj_empty(&gctx->gc_svc_handle) &&
sec_is_reverse(gctx->gc_base.cc_sec)) {
gss_svc_upcall_update_sequence(&gctx->gc_svc_handle,
- (__u32) atomic_read(&gctx->gc_seq));
+ (__u32) cfs_atomic_read(&gctx->gc_seq));
}
/* we need to wakeup waiting reqs here. the context might
static
struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct ptlrpc_cli_ctx *ctx = NULL;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx = NULL;
- spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
ctx = gsec_kr->gsk_root_ctx;
if (ctx == NULL && unlikely(sec_is_reverse(sec))) {
- struct hlist_node *node;
+ cfs_hlist_node_t *node;
struct ptlrpc_cli_ctx *tmp;
/* reverse ctx, search root ctx in list, choose the one
* with shortest expire time, which is most possibly have
* an established peer ctx at client side. */
- hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist, cc_cache) {
+ cfs_hlist_for_each_entry(tmp, node, &gsec_kr->gsk_clist,
+ cc_cache) {
if (ctx == NULL || ctx->cc_expire == 0 ||
ctx->cc_expire > tmp->cc_expire) {
ctx = tmp;
}
if (ctx) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
- atomic_inc(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ cfs_atomic_inc(&ctx->cc_refcount);
}
- spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- return ctx;
+ return ctx;
}
#define RVS_CTX_EXPIRE_NICE (10)
struct key *key)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node *hnode;
+ cfs_hlist_node_t *hnode;
struct ptlrpc_cli_ctx *ctx;
cfs_time_t now;
ENTRY;
LASSERT(sec_is_reverse(sec));
- spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
now = cfs_time_current_sec();
/* set all existing ctxs short expiry */
- hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
+ cfs_hlist_for_each_entry(ctx, hnode, &gsec_kr->gsk_clist, cc_cache) {
if (ctx->cc_expire > now + RVS_CTX_EXPIRE_NICE) {
ctx->cc_early_expire = 1;
ctx->cc_expire = now + RVS_CTX_EXPIRE_NICE;
if (key)
bind_key_ctx(key, new_ctx);
- spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
}
static void construct_key_desc(void *buf, int bufsize,
CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
- mutex_init(&gsec_kr->gsk_root_uc_lock);
+ mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- mutex_init(&gsec_kr->gsk_uc_lock);
+ mutex_init(&gsec_kr->gsk_uc_lock);
#endif
if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
- LASSERT(hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
gss_sec_destroy_common(gsec);
struct task_struct *tsk = current;
struct key *ring;
- switch (task_aux(tsk)->jit_keyring) {
+ switch (tsk->jit_keyring) {
case KEY_REQKEY_DEFL_DEFAULT:
case KEY_REQKEY_DEFL_THREAD_KEYRING:
- ring = key_get(task_aux(tsk)->thread_keyring);
+ ring = key_get(tsk->thread_keyring);
if (ring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
* the root upcall lock, make sure nobody else populated new root
* context after last check. */
if (is_root) {
- mutex_lock(&gsec_kr->gsk_root_uc_lock);
+ mutex_lock(&gsec_kr->gsk_root_uc_lock);
ctx = sec_lookup_root_ctx_kr(sec);
if (ctx)
/* update reverse handle for root user */
sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
- co_flags = "r";
+ switch (sec->ps_part) {
+ case LUSTRE_SP_MDT:
+ co_flags = "m";
+ break;
+ case LUSTRE_SP_OST:
+ co_flags = "o";
+ break;
+ case LUSTRE_SP_MGC:
+ co_flags = "rmo";
+ break;
+ case LUSTRE_SP_CLI:
+ co_flags = "r";
+ break;
+ case LUSTRE_SP_MGS:
+ default:
+ LBUG();
+ }
}
/* in case of setuid, key will be constructed as owner of fsuid/fsgid,
co_flags, import_to_gss_svc(imp),
imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
- CDEBUG(D_SEC, "requesting key for %s", desc);
+ CDEBUG(D_SEC, "requesting key for %s\n", desc);
keyring_upcall_lock(gsec_kr);
key = request_key(&gss_key_type, desc, coinfo);
CERROR("failed request key: %ld\n", PTR_ERR(key));
goto out;
}
- CDEBUG(D_SEC, "obtained key %08x for %s", key->serial, desc);
+ CDEBUG(D_SEC, "obtained key %08x for %s\n", key->serial, desc);
/* once payload.data was pointed to a ctx, it never changes until
* we de-associate them; but parallel request_key() may return
if (likely(key->payload.data != NULL)) {
ctx = key->payload.data;
- LASSERT(atomic_read(&ctx->cc_refcount) >= 1);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 1);
LASSERT(ctx2gctx_keyring(ctx)->gck_key == key);
LASSERT(atomic_read(&key->usage) >= 2);
/* simply take a ref and return. it's upper layer's
* responsibility to detect & replace dead ctx. */
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
} else {
/* pre initialization with a cli_ctx. this can't be done in
* key_instantiate() because we'v no enough information
key_put(key);
out:
if (is_root)
- mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_root_uc_lock);
RETURN(ctx);
}
struct ptlrpc_cli_ctx *ctx,
int sync)
{
- LASSERT(atomic_read(&sec->ps_refcount) > 0);
- LASSERT(atomic_read(&ctx->cc_refcount) == 0);
+ LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) == 0);
ctx_release_kr(ctx, sync);
}
for (;;) {
key = request_key(&gss_key_type, desc, NULL);
if (IS_ERR(key)) {
- CWARN("No more key found for current user\n");
+ CDEBUG(D_SEC, "No more key found for current user\n");
break;
}
int grace, int force)
{
struct gss_sec_keyring *gsec_kr;
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
- struct hlist_node *pos, *next;
+ cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
gsec_kr = sec2gsec_keyring(sec);
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
if (uid != -1 && uid != ctx->cc_vcred.vc_uid)
continue;
/* at this moment there's at least 2 base reference:
* key association and in-list. */
- if (atomic_read(&ctx->cc_refcount) > 2) {
+ if (cfs_atomic_read(&ctx->cc_refcount) > 2) {
if (!force)
continue;
CWARN("flush busy ctx %p(%u->%s, extra ref %d)\n",
ctx, ctx->cc_vcred.vc_uid,
sec2target_str(ctx->cc_sec),
- atomic_read(&ctx->cc_refcount) - 2);
+ cfs_atomic_read(&ctx->cc_refcount) - 2);
}
- set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
if (ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_cache, &freelist);
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
} else {
- LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
- atomic_dec(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+ cfs_atomic_dec(&ctx->cc_refcount);
}
}
- spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
}
static
int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
+ uid_t uid, int grace, int force)
{
ENTRY;
CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
- sec, atomic_read(&sec->ps_refcount), atomic_read(&sec->ps_nctx),
+ sec, cfs_atomic_read(&sec->ps_refcount),
+ cfs_atomic_read(&sec->ps_nctx),
uid, grace, force);
if (uid != -1 && uid != 0)
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
- struct hlist_node *pos, *next;
+ cfs_hlist_head_t freelist = CFS_HLIST_HEAD_INIT;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
CWARN("running gc\n");
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
- &gsec_kr->gsk_clist, cc_cache) {
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
+ &gsec_kr->gsk_clist, cc_cache) {
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- atomic_inc(&ctx->cc_refcount);
+ cfs_atomic_inc(&ctx->cc_refcount);
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- hlist_add_head(&ctx->cc_cache, &freelist);
+ cfs_hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
} else {
- LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
- atomic_dec(&ctx->cc_refcount);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) >= 2);
+ cfs_atomic_dec(&ctx->cc_refcount);
}
}
- spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
- return;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
+ return;
}
static
int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_node *pos, *next;
+ cfs_hlist_node_t *pos, *next;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
time_t now = cfs_time_current_sec();
ENTRY;
- spin_lock(&sec->ps_lock);
- hlist_for_each_entry_safe(ctx, pos, next,
+ spin_lock(&sec->ps_lock);
+ cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
struct key *key;
char flags_str[40];
"seq %d, win %u, key %08x(ref %d), "
"hdl "LPX64":"LPX64", mech: %s\n",
ctx, ctx->cc_vcred.vc_uid,
- atomic_read(&ctx->cc_refcount),
+ cfs_atomic_read(&ctx->cc_refcount),
ctx->cc_expire,
ctx->cc_expire ? ctx->cc_expire - now : 0,
flags_str,
- atomic_read(&gctx->gc_seq),
+ cfs_atomic_read(&gctx->gc_seq),
gctx->gc_win,
key ? key->serial : 0,
key ? atomic_read(&key->usage) : 0,
gss_handle_to_u64(&gctx->gc_svc_handle),
mech);
}
- spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- RETURN(0);
+ RETURN(0);
}
/****************************************
static
int gss_cli_ctx_validate_kr(struct ptlrpc_cli_ctx *ctx)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
if (cli_ctx_check_death(ctx)) {
static
void gss_cli_ctx_die_kr(struct ptlrpc_cli_ctx *ctx, int grace)
{
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
cli_ctx_expire(ctx);
ENTRY;
if (data != NULL || datalen != 0) {
- CERROR("invalid: data %p, len %d\n", data, datalen);
+ CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
RETURN(-EINVAL);
}
*/
LASSERT(cfs_current()->signal->session_keyring);
+ lockdep_off();
rc = key_link(cfs_current()->signal->session_keyring, key);
+ lockdep_on();
if (unlikely(rc)) {
CERROR("failed to link key %08x to keyring %08x: %d\n",
key->serial,
struct ptlrpc_cli_ctx *ctx = key->payload.data;
struct gss_cli_ctx *gctx;
rawobj_t tmpobj = RAWOBJ_EMPTY;
+ __u32 datalen32 = (__u32) datalen;
int rc;
ENTRY;
if (data == NULL || datalen == 0) {
- CWARN("invalid: data %p, len %d\n", data, datalen);
+ CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
RETURN(-EINVAL);
}
- /* there's a race between userspace parent - child processes. if
- * child finish negotiation too fast and call kt_update(), the ctx
+ /* if upcall finished negotiation too fast (mostly likely because
+ * of local error happened) and call kt_update(), the ctx
* might be still NULL. but the key will finally be associate
* with a context, or be revoked. if key status is fine, return
* -EAGAIN to allow userspace sleep a while and call again. */
if (ctx == NULL) {
- CWARN("race in userspace. key %p(%x) flags %lx\n",
+ CDEBUG(D_SEC, "update too soon: key %p(%x) flags %lx\n",
key, key->serial, key->flags);
rc = key_validate(key);
RETURN(rc);
}
- LASSERT(atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
LASSERT(ctx->cc_sec);
ctx_clear_timer_kr(ctx);
/* don't proceed if already refreshed */
if (cli_ctx_is_refreshed(ctx)) {
CWARN("ctx already done refresh\n");
- sptlrpc_cli_ctx_wakeup(ctx);
RETURN(0);
}
sptlrpc_cli_ctx_get(ctx);
gctx = ctx2gctx(ctx);
- rc = buffer_extract_bytes(&data, &datalen, &gctx->gc_win,
+ rc = buffer_extract_bytes(&data, &datalen32, &gctx->gc_win,
sizeof(gctx->gc_win));
if (rc) {
CERROR("failed extract seq_win\n");
if (gctx->gc_win == 0) {
__u32 nego_rpc_err, nego_gss_err;
- rc = buffer_extract_bytes(&data, &datalen, &nego_rpc_err,
+ rc = buffer_extract_bytes(&data, &datalen32, &nego_rpc_err,
sizeof(nego_rpc_err));
if (rc) {
CERROR("failed to extrace rpc rc\n");
goto out;
}
- rc = buffer_extract_bytes(&data, &datalen, &nego_gss_err,
+ rc = buffer_extract_bytes(&data, &datalen32, &nego_gss_err,
sizeof(nego_gss_err));
if (rc) {
CERROR("failed to extrace gss rc\n");
rc = nego_rpc_err ? nego_rpc_err : -EACCES;
} else {
rc = rawobj_extract_local_alloc(&gctx->gc_handle,
- (__u32 **) &data, &datalen);
+ (__u32 **) &data, &datalen32);
if (rc) {
CERROR("failed extract handle\n");
goto out;
}
- rc = rawobj_extract_local(&tmpobj, (__u32 **) &data, &datalen);
+ rc = rawobj_extract_local(&tmpobj, (__u32 **) &data,&datalen32);
if (rc) {
CERROR("failed extract mech\n");
goto out;
cli_ctx_expire(ctx);
if (rc != -ERESTART)
- set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
- sptlrpc_cli_ctx_wakeup(ctx);
-
/* let user space think it's a success */
sptlrpc_cli_ctx_put(ctx, 1);
RETURN(0);
.authorize = gss_svc_authorize,
.free_rs = gss_svc_free_rs,
.free_ctx = gss_svc_free_ctx,
+ .prep_bulk = gss_svc_prep_bulk,
.unwrap_bulk = gss_svc_unwrap_bulk,
.wrap_bulk = gss_svc_wrap_bulk,
.install_rctx = gss_svc_install_rctx_kr,