* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/key-type.h>
#include <linux/mutex.h>
#include <asm/atomic.h>
-#else
-#include <liblustre.h>
-#endif
#include <obd.h>
#include <obd_class.h>
/*
* the timeout is only for the case that upcall child process die abnormally.
* in any other cases it should finally update kernel key.
- *
+ *
* FIXME we'd better to incorporate the client & server side upcall timeouts
* into the framework of Adaptive Timeouts, but we need to figure out how to
* make sure that kernel knows the upcall processes is in-progress or died
}
#define key_cred(tsk) ((tsk)->cred)
+#ifdef HAVE_CRED_TGCRED
#define key_tgcred(tsk) ((tsk)->cred->tgcred)
+#else
+#define key_tgcred(tsk) key_cred(tsk)
+#endif
static inline void keyring_upcall_lock(struct gss_sec_keyring *gsec_kr)
{
LASSERT(timer);
CDEBUG(D_SEC, "ctx %p: start timer %lds\n", ctx, timeout);
- timeout = timeout * HZ + cfs_time_current();
+ timeout = msecs_to_jiffies(timeout * MSEC_PER_SEC) +
+ cfs_time_current();
init_timer(timer);
timer->expires = timeout;
atomic_inc(&ctx->cc_refcount);
set_bit(PTLRPC_CTX_CACHED_BIT, &ctx->cc_flags);
- cfs_hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
+ hlist_add_head(&ctx->cc_cache, &gsec_kr->gsk_clist);
if (is_root)
gsec_kr->gsk_root_ctx = ctx;
if (gsec_kr->gsk_root_ctx == ctx)
gsec_kr->gsk_root_ctx = NULL;
- cfs_hlist_del_init(&ctx->cc_cache);
+ hlist_del_init(&ctx->cc_cache);
atomic_dec(&ctx->cc_refcount);
spin_unlock_if(&sec->ps_lock, !locked);
/*
* caller should hold one ref on contexts in freelist.
*/
-static void dispose_ctx_list_kr(cfs_hlist_head_t *freelist)
+static void dispose_ctx_list_kr(struct hlist_head *freelist)
{
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
if (ctx) {
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
- LASSERT(!cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(!hlist_empty(&gsec_kr->gsk_clist));
atomic_inc(&ctx->cc_refcount);
}
if (gsec_kr == NULL)
RETURN(NULL);
- CFS_INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
+ INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
mutex_init(&gsec_kr->gsk_root_uc_lock);
#ifdef HAVE_KEYRING_UPCALL_SERIALIZED
CDEBUG(D_SEC, "destroy %s@%p\n", sec->ps_policy->sp_name, sec);
- LASSERT(cfs_hlist_empty(&gsec_kr->gsk_clist));
+ LASSERT(hlist_empty(&gsec_kr->gsk_clist));
LASSERT(gsec_kr->gsk_root_ctx == NULL);
gss_sec_destroy_common(gsec);
* encode real uid/gid into callout info.
*/
+ /* But first we need to make sure the obd type is supported */
+ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
+ strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
+ strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MGC_NAME) &&
+ strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
+ strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSP_NAME)) {
+ CERROR("obd %s is not a supported device\n",
+ imp->imp_obd->obd_name);
+ GOTO(out, ctx = NULL);
+ }
+
construct_key_desc(desc, sizeof(desc), sec, vcred->vc_uid);
/* callout info format:
if (coinfo == NULL)
goto out;
- snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s",
- sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
- vcred->vc_uid, vcred->vc_gid,
- co_flags, import_to_gss_svc(imp),
- imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name);
+ snprintf(coinfo, coinfo_size, "%d:%s:%u:%u:%s:%d:"LPX64":%s:"LPX64,
+ sec->ps_id, sec2gsec(sec)->gs_mech->gm_name,
+ vcred->vc_uid, vcred->vc_gid,
+ co_flags, import_to_gss_svc(imp),
+ imp->imp_connection->c_peer.nid, imp->imp_obd->obd_name,
+ imp->imp_connection->c_self);
CDEBUG(D_SEC, "requesting key for %s\n", desc);
* flush context of root or all, we iterate through the list.
*/
static
-void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
- uid_t uid,
- int grace, int force)
+void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec, uid_t uid, int grace,
+ int force)
{
struct gss_sec_keyring *gsec_kr;
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
atomic_inc(&ctx->cc_refcount);
if (ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
atomic_dec(&ctx->cc_refcount);
void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
{
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
- struct hlist_head freelist = CFS_HLIST_HEAD_INIT;
+ struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node __maybe_unused *pos, *next;
struct ptlrpc_cli_ctx *ctx;
ENTRY;
atomic_inc(&ctx->cc_refcount);
if (cli_ctx_check_death(ctx) && ctx_unlist_kr(ctx, 1)) {
- cfs_hlist_add_head(&ctx->cc_cache, &freelist);
+ hlist_add_head(&ctx->cc_cache, &freelist);
CWARN("unhashed ctx %p\n", ctx);
} else {
LASSERT(atomic_read(&ctx->cc_refcount) >= 2);
****************************************/
static
+#ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
+int gss_kt_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ const void *data = prep->data;
+ size_t datalen = prep->datalen;
+#else
int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
{
+#endif
int rc;
ENTRY;
RETURN(-EINVAL);
}
- if (key->payload.data != 0) {
+ if (key->payload.data != NULL) {
CERROR("key already have payload\n");
RETURN(-EINVAL);
}
* on the context without fear of loosing refcount.
*/
static
+#ifdef HAVE_KEY_TYPE_INSTANTIATE_2ARGS
+int gss_kt_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ const void *data = prep->data;
+ __u32 datalen32 = (__u32) prep->datalen;
+#else
int gss_kt_update(struct key *key, const void *data, size_t datalen)
{
+ __u32 datalen32 = (__u32) datalen;
+#endif
struct ptlrpc_cli_ctx *ctx = key->payload.data;
struct gss_cli_ctx *gctx;
rawobj_t tmpobj = RAWOBJ_EMPTY;
- __u32 datalen32 = (__u32) datalen;
int rc;
ENTRY;
- if (data == NULL || datalen == 0) {
- CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
- RETURN(-EINVAL);
- }
+ if (data == NULL || datalen32 == 0) {
+ CWARN("invalid: data %p, len %lu\n", data, (long)datalen32);
+ RETURN(-EINVAL);
+ }
/* if upcall finished negotiation too fast (mostly likely because
* of local error happened) and call kt_update(), the ctx
RETURN(0);
}
-static
-int gss_kt_match(const struct key *key, const void *desc)
+#ifndef HAVE_KEY_MATCH_DATA
+static int
+gss_kt_match(const struct key *key, const void *desc)
+{
+ return (strcmp(key->description, (const char *) desc) == 0);
+}
+#else /* ! HAVE_KEY_MATCH_DATA */
+static bool
+gss_kt_match(const struct key *key, const struct key_match_data *match_data)
+{
+ const char *desc = match_data->raw_data;
+
+ return (strcmp(key->description, desc) == 0);
+}
+
+/*
+ * Preparse the match criterion.
+ */
+static int gss_kt_match_preparse(struct key_match_data *match_data)
{
- return (strcmp(key->description, (const char *) desc) == 0);
+ match_data->lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT;
+ match_data->cmp = gss_kt_match;
+ return 0;
}
+#endif /* HAVE_KEY_MATCH_DATA */
static
void gss_kt_destroy(struct key *key)
static struct key_type gss_key_type =
{
- .name = "lgssc",
- .def_datalen = 0,
- .instantiate = gss_kt_instantiate,
- .update = gss_kt_update,
- .match = gss_kt_match,
- .destroy = gss_kt_destroy,
- .describe = gss_kt_describe,
+ .name = "lgssc",
+ .def_datalen = 0,
+ .instantiate = gss_kt_instantiate,
+ .update = gss_kt_update,
+#ifdef HAVE_KEY_MATCH_DATA
+ .match_preparse = gss_kt_match_preparse,
+#else
+ .match = gss_kt_match,
+#endif
+ .destroy = gss_kt_destroy,
+ .describe = gss_kt_describe,
};
/****************************************