-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Eric Mei <ericm@clusterfs.com>
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_SEC
#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/dcache.h>
#include <linux/fs.h>
-#include <linux/random.h>
#include <linux/crypto.h>
#include <linux/key.h>
#include <linux/keyctl.h>
+#include <linux/key-type.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
#else
* internal helpers *
****************************************/
-#define DUMP_PROCESS_KEYRINGS(tsk) \
-{ \
- CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
- "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
- tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
- tsk->parent->comm, tsk->parent->pid, \
- tsk->parent->uid, tsk->parent->fsuid, \
- tsk->request_key_auth ? \
- tsk->request_key_auth->serial : 0, \
- tsk->thread_keyring ? \
- tsk->thread_keyring->serial : 0, \
- tsk->signal->process_keyring ? \
- tsk->signal->process_keyring->serial : 0, \
- tsk->signal->session_keyring ? \
- tsk->signal->session_keyring->serial : 0, \
- tsk->user->uid_keyring ? \
- tsk->user->uid_keyring->serial : 0, \
- tsk->user->session_keyring ? \
- tsk->user->session_keyring->serial : 0, \
- tsk->jit_keyring \
- ); \
+#define DUMP_PROCESS_KEYRINGS(tsk) \
+{ \
+ CWARN("DUMP PK: %s[%u,%u/%u](<-%s[%u,%u/%u]): " \
+ "a %d, t %d, p %d, s %d, u %d, us %d, df %d\n", \
+ tsk->comm, tsk->pid, tsk->uid, tsk->fsuid, \
+ tsk->parent->comm, tsk->parent->pid, \
+ tsk->parent->uid, tsk->parent->fsuid, \
+ tsk->request_key_auth ? \
+ tsk->request_key_auth->serial : 0, \
+ key_cred(tsk)->thread_keyring ? \
+ key_cred(tsk)->thread_keyring->serial : 0, \
+ key_tgcred(tsk)->process_keyring ? \
+ key_tgcred(tsk)->process_keyring->serial : 0, \
+ key_tgcred(tsk)->session_keyring ? \
+ key_tgcred(tsk)->session_keyring->serial : 0, \
+ key_cred(tsk)->user->uid_keyring ? \
+ key_cred(tsk)->user->uid_keyring->serial : 0, \
+ key_cred(tsk)->user->session_keyring ? \
+ key_cred(tsk)->user->session_keyring->serial : 0, \
+ key_cred(tsk)->jit_keyring \
+ ); \
}
#define DUMP_KEY(key) \
); \
}
+#define key_cred(tsk) ((tsk)->cred)
+#define key_tgcred(tsk) ((tsk)->cred->tgcred)
static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_lock(&gsec_kr->gsk_uc_lock);
+ mutex_lock(&gsec_kr->gsk_uc_lock);
#endif
}
static inline void keyring_upcall_unlock(struct gss_sec_keyring *gsec_kr)
{
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_unlock(&gsec_kr->gsk_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_uc_lock);
#endif
}
key_revoke_locked(key);
}
-static
-void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
+static void ctx_start_timer_kr(struct ptlrpc_cli_ctx *ctx, long timeout)
{
- struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
- struct timer_list *timer = gctx_kr->gck_timer;
+ struct gss_cli_ctx_keyring *gctx_kr = ctx2gctx_keyring(ctx);
+ struct timer_list *timer = gctx_kr->gck_timer;
- LASSERT(timer);
+ LASSERT(timer);
- CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * CFS_HZ + cfs_time_current();
+ CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
+ timeout = timeout * HZ + cfs_time_current();
- init_timer(timer);
- timer->expires = timeout;
- timer->data = (unsigned long ) ctx;
- timer->function = ctx_upcall_timeout_kr;
+ init_timer(timer);
+ timer->expires = timeout;
+ timer->data = (unsigned long ) ctx;
+ timer->function = ctx_upcall_timeout_kr;
- add_timer(timer);
+ add_timer(timer);
}
/*
}
ctx->cc_expire = cfs_time_current_sec() + KEYRING_UPCALL_TIMEOUT;
- cfs_clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
+ clear_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount); /* for the caller */
return ctx;
LASSERT(sec);
LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
LASSERT(cfs_atomic_read(&sec->ps_nctx) > 0);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
LASSERT(gctx_kr->gck_key == NULL);
ctx_clear_timer_kr(ctx);
* - lock ctx -> unlist -> unlock ctx -> lock key -> unbind -> unlock key
*/
-static inline void spin_lock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_lock_if(spinlock_t *lock, int condition)
{
- if (condition)
- cfs_spin_lock(lock);
+ if (condition)
+ spin_lock(lock);
}
-static inline void spin_unlock_if(cfs_spinlock_t *lock, int condition)
+static inline void spin_unlock_if(spinlock_t *lock, int condition)
{
- if (condition)
- cfs_spin_unlock(lock);
+ if (condition)
+ spin_unlock(lock);
}
static void ctx_enlist_kr(struct ptlrpc_cli_ctx *ctx, int is_root, int locked)
{
- struct ptlrpc_sec *sec = ctx->cc_sec;
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_sec *sec = ctx->cc_sec;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- LASSERT(!cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
- LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
+ LASSERT(!test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags));
+ LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
- spin_lock_if(&sec->ps_lock, !locked);
+ spin_lock_if(&sec->ps_lock, !locked);
- cfs_atomic_inc(&ctx->cc_refcount);
- cfs_set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
- if (is_root)
- gsec_kr->gsk_root_ctx = ctx;
+ cfs_atomic_inc(&ctx->cc_refcount);
+ set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
+ cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ if (is_root)
+ gsec_kr->gsk_root_ctx = ctx;
- spin_unlock_if(&sec->ps_lock, !locked);
+ spin_unlock_if(&sec->ps_lock, !locked);
}
/*
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
/* if hashed bit has gone, leave the job to somebody who is doing it */
- if (cfs_test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
+ if (test_and_clear_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0)
return 0;
/* drop ref inside spin lock to prevent race with other operations */
static void unbind_key_ctx(struct key *key, struct ptlrpc_cli_ctx *ctx)
{
LASSERT(key->payload.data == ctx);
- LASSERT(cfs_test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
+ LASSERT(test_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags) == 0);
/* must revoke the key, or others may treat it as newly created */
key_revoke_locked(key);
static
struct ptlrpc_cli_ctx * sec_lookup_root_ctx_kr(struct ptlrpc_sec *sec)
{
- struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct ptlrpc_cli_ctx *ctx = NULL;
+ struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
+ struct ptlrpc_cli_ctx *ctx = NULL;
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
ctx = gsec_kr->gsk_root_ctx;
cfs_atomic_inc(&ctx->cc_refcount);
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- return ctx;
+ return ctx;
}
#define RVS_CTX_EXPIRE_NICE (10)
LASSERT(sec_is_reverse(sec));
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
now = cfs_time_current_sec();
if (key)
bind_key_ctx(key, new_ctx);
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
}
static void construct_key_desc(void *buf, int bufsize,
CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
- cfs_mutex_init(&gsec_kr->gsk_root_uc_lock);
+ mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
- cfs_mutex_init(&gsec_kr->gsk_uc_lock);
+ mutex_init(&gsec_kr->gsk_uc_lock);
#endif
if (gss_sec_create_common(&gsec_kr->gsk_base, &gss_policy_keyring,
*/
static void request_key_unlink(struct key *key)
{
- struct task_struct *tsk = current;
- struct key *ring;
-
- switch (tsk->jit_keyring) {
- case KEY_REQKEY_DEFL_DEFAULT:
- case KEY_REQKEY_DEFL_THREAD_KEYRING:
- ring = key_get(tsk->thread_keyring);
- if (ring)
- break;
- case KEY_REQKEY_DEFL_PROCESS_KEYRING:
- ring = key_get(tsk->signal->process_keyring);
- if (ring)
- break;
- case KEY_REQKEY_DEFL_SESSION_KEYRING:
- rcu_read_lock();
- ring = key_get(rcu_dereference(tsk->signal->session_keyring));
- rcu_read_unlock();
- if (ring)
- break;
- case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
- ring = key_get(tsk->user->session_keyring);
- break;
- case KEY_REQKEY_DEFL_USER_KEYRING:
- ring = key_get(tsk->user->uid_keyring);
- break;
- case KEY_REQKEY_DEFL_GROUP_KEYRING:
- default:
- LBUG();
- }
-
- LASSERT(ring);
- key_unlink(ring, key);
- key_put(ring);
+ struct task_struct *tsk = current;
+ struct key *ring;
+
+ switch (key_cred(tsk)->jit_keyring) {
+ case KEY_REQKEY_DEFL_DEFAULT:
+ case KEY_REQKEY_DEFL_THREAD_KEYRING:
+ ring = key_get(key_cred(tsk)->thread_keyring);
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_PROCESS_KEYRING:
+ ring = key_get(key_tgcred(tsk)->process_keyring);
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_SESSION_KEYRING:
+ rcu_read_lock();
+ ring = key_get(rcu_dereference(key_tgcred(tsk)
+ ->session_keyring));
+ rcu_read_unlock();
+ if (ring)
+ break;
+ case KEY_REQKEY_DEFL_USER_SESSION_KEYRING:
+ ring = key_get(key_cred(tsk)->user->session_keyring);
+ break;
+ case KEY_REQKEY_DEFL_USER_KEYRING:
+ ring = key_get(key_cred(tsk)->user->uid_keyring);
+ break;
+ case KEY_REQKEY_DEFL_GROUP_KEYRING:
+ default:
+ LBUG();
+ }
+
+ LASSERT(ring);
+ key_unlink(ring, key);
+ key_put(ring);
}
static
* the root upcall lock, make sure nobody else populated new root
* context after last check. */
if (is_root) {
- cfs_mutex_lock(&gsec_kr->gsk_root_uc_lock);
+ mutex_lock(&gsec_kr->gsk_root_uc_lock);
ctx = sec_lookup_root_ctx_kr(sec);
if (ctx)
/* update reverse handle for root user */
sec2gsec(sec)->gs_rvs_hdl = gss_get_next_ctx_index();
- co_flags = "r";
+ switch (sec->ps_part) {
+ case LUSTRE_SP_MDT:
+ co_flags = "m";
+ break;
+ case LUSTRE_SP_OST:
+ co_flags = "o";
+ break;
+ case LUSTRE_SP_MGC:
+ co_flags = "rmo";
+ break;
+ case LUSTRE_SP_CLI:
+ co_flags = "r";
+ break;
+ case LUSTRE_SP_MGS:
+ default:
+ LBUG();
+ }
}
/* in case of setuid, key will be constructed as owner of fsuid/fsgid,
key_put(key);
out:
if (is_root)
- cfs_mutex_unlock(&gsec_kr->gsk_root_uc_lock);
+ mutex_unlock(&gsec_kr->gsk_root_uc_lock);
RETURN(ctx);
}
gsec_kr = sec2gsec_keyring(sec);
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cfs_atomic_read(&ctx->cc_refcount) - 2);
}
- cfs_set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
- if (!grace)
- cfs_clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags);
+ if (!grace)
+ clear_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags);
cfs_atomic_inc(&ctx->cc_refcount);
cfs_atomic_dec(&ctx->cc_refcount);
}
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
}
static
int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
+ uid_t uid, int grace, int force)
{
ENTRY;
CWARN("running gc\n");
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
cfs_atomic_dec(&ctx->cc_refcount);
}
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- dispose_ctx_list_kr(&freelist);
- EXIT;
- return;
+ dispose_ctx_list_kr(&freelist);
+ EXIT;
+ return;
}
static
time_t now = cfs_time_current_sec();
ENTRY;
- cfs_spin_lock(&sec->ps_lock);
+ spin_lock(&sec->ps_lock);
cfs_hlist_for_each_entry_safe(ctx, pos, next,
&gsec_kr->gsk_clist, cc_cache) {
struct key *key;
gss_handle_to_u64(&gctx->gc_svc_handle),
mech);
}
- cfs_spin_unlock(&sec->ps_lock);
+ spin_unlock(&sec->ps_lock);
- RETURN(0);
+ RETURN(0);
}
/****************************************
* the session keyring is created upon upcall, and don't change all
* the way until upcall finished, so rcu lock is not needed here.
*/
- LASSERT(cfs_current()->signal->session_keyring);
-
- cfs_lockdep_off();
- rc = key_link(cfs_current()->signal->session_keyring, key);
- cfs_lockdep_on();
- if (unlikely(rc)) {
- CERROR("failed to link key %08x to keyring %08x: %d\n",
- key->serial,
- cfs_current()->signal->session_keyring->serial, rc);
- RETURN(rc);
- }
-
- CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
- RETURN(0);
+ LASSERT(key_tgcred(current)->session_keyring);
+
+ lockdep_off();
+ rc = key_link(key_tgcred(current)->session_keyring, key);
+ lockdep_on();
+ if (unlikely(rc)) {
+ CERROR("failed to link key %08x to keyring %08x: %d\n",
+ key->serial,
+ key_tgcred(current)->session_keyring->serial, rc);
+ RETURN(rc);
+ }
+
+ CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
+ RETURN(0);
}
/*
cli_ctx_expire(ctx);
if (rc != -ERESTART)
- cfs_set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
+ set_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags);
}
/* let user space think it's a success */