-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
*
- * Copyright 2008, Sun Microsystems, Inc.
- * Author: Eric Mei <eric.mei@sun.com>
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
*
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/mutex.h>
#include <linux/sunrpc/cache.h>
-#else
-#include <liblustre.h>
-#endif
+#include <net/sock.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lustre/lustre_idl.h>
-#include <lustre_net.h>
#include <lustre_import.h>
+#include <lustre_net.h>
+#include <lustre_nodemap.h>
#include <lustre_sec.h>
#include "gss_err.h"
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t __ctx_index_lock;
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
{
- __u64 idx;
+ __u64 idx;
- spin_lock(&__ctx_index_lock);
- idx = __ctx_index++;
- spin_unlock(&__ctx_index_lock);
+ spin_lock(&__ctx_index_lock);
+ idx = __ctx_index++;
+ spin_unlock(&__ctx_index_lock);
- return idx;
+ return idx;
}
static inline unsigned long hash_mem(char *buf, int length, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
-
- do {
- if (len == length) {
- c = (char) len;
- len = -1;
- } else
- c = *buf++;
-
- l = (l << 8) | c;
- len++;
-
- if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
-
- return hash >> (BITS_PER_LONG - bits);
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+
+ do {
+ if (len == length) {
+ c = (char) len;
+ len = -1;
+ } else
+ c = *buf++;
+
+ l = (l << 8) | c;
+ len++;
+
+ if ((len & (BITS_PER_LONG/8-1)) == 0)
+ hash = hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+
+ return hash >> (BITS_PER_LONG - bits);
}
+/* This compatibility can be removed once kernel 3.3 is used,
+ * since cache_register_net/cache_unregister_net are exported.
+ * Note that since kernel 3.4 cache_register and cache_unregister
+ * are removed.
+*/
+static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ return cache_register(cd);
+#else
+ return cache_register_net(cd, net);
+#endif
+}
+static inline void _cache_unregister_net(struct cache_detail *cd,
+ struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ cache_unregister(cd);
+#else
+ cache_unregister_net(cd, net);
+#endif
+}
/****************************************
- * rsi cache *
+ * rpc sec init (rsi) cache *
****************************************/
#define RSI_HASHBITS (6)
#define RSI_HASHMASK (RSI_HASHMAX - 1)
struct rsi {
- struct cache_head h;
- __u32 lustre_svc;
- __u64 nid;
- wait_queue_head_t waitq;
- rawobj_t in_handle, in_token;
- rawobj_t out_handle, out_token;
- int major_status, minor_status;
+ struct cache_head h;
+ __u32 lustre_svc;
+ __u64 nid;
+ char nm_name[LUSTRE_NODEMAP_NAME_LENGTH + 1];
+ wait_queue_head_t waitq;
+ rawobj_t in_handle, in_token;
+ rawobj_t out_handle, out_token;
+ int major_status, minor_status;
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsi_table[RSI_HASHMAX];
+#else
static struct cache_head *rsi_table[RSI_HASHMAX];
+#endif
static struct cache_detail rsi_cache;
-#ifdef HAVE_SUNRPC_CACHE_V2
static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
static struct rsi *rsi_lookup(struct rsi *item);
-#else
-static struct rsi *rsi_lookup(struct rsi *item, int set);
-#endif
static inline int rsi_hash(struct rsi *item)
{
rawobj_free(&rsi->out_token);
}
+/* See handle_channel_req() userspace for where the upcall data is read */
static void rsi_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
- struct rsi *rsi = container_of(h, struct rsi, h);
- __u64 index = 0;
-
- /* if in_handle is null, provide kernel suggestion */
- if (rsi->in_handle.len == 0)
- index = gss_get_next_ctx_index();
-
- qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
- sizeof(rsi->lustre_svc));
- qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
- qword_addhex(bpp, blen, (char *) &index, sizeof(index));
- qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
- qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
- (*bpp)[-1] = '\n';
+ struct rsi *rsi = container_of(h, struct rsi, h);
+ __u64 index = 0;
+
+ /* if in_handle is null, provide kernel suggestion */
+ if (rsi->in_handle.len == 0)
+ index = gss_get_next_ctx_index();
+
+ qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
+ sizeof(rsi->lustre_svc));
+ qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
+ qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+ qword_addhex(bpp, blen, (char *) rsi->nm_name,
+ strlen(rsi->nm_name) + 1);
+ qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
+ qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
+ (*bpp)[-1] = '\n';
}
-static inline void __rsi_init(struct rsi *new, struct rsi *item)
+#ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
{
- new->out_handle = RAWOBJ_EMPTY;
- new->out_token = RAWOBJ_EMPTY;
+ return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+#else
- new->in_handle = item->in_handle;
- item->in_handle = RAWOBJ_EMPTY;
- new->in_token = item->in_token;
- item->in_token = RAWOBJ_EMPTY;
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h);
+}
+#endif
- new->lustre_svc = item->lustre_svc;
- new->nid = item->nid;
- init_waitqueue_head(&new->waitq);
+static inline void __rsi_init(struct rsi *new, struct rsi *item)
+{
+ new->out_handle = RAWOBJ_EMPTY;
+ new->out_token = RAWOBJ_EMPTY;
+
+ new->in_handle = item->in_handle;
+ item->in_handle = RAWOBJ_EMPTY;
+ new->in_token = item->in_token;
+ item->in_token = RAWOBJ_EMPTY;
+
+ new->lustre_svc = item->lustre_svc;
+ new->nid = item->nid;
+ memcpy(new->nm_name, item->nm_name, sizeof(item->nm_name));
+ init_waitqueue_head(&new->waitq);
}
static inline void __rsi_update(struct rsi *new, struct rsi *item)
new->minor_status = item->minor_status;
}
-#ifdef HAVE_SUNRPC_CACHE_V2
-
static void rsi_put(struct kref *ref)
{
struct rsi *rsi = container_of(ref, struct rsi, h.ref);
- LASSERT(rsi->h.next == NULL);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(rsi->h.cache_list.next == NULL);
+#else
+ LASSERT(rsi->h.next == NULL);
+#endif
rsi_free(rsi);
OBD_FREE_PTR(rsi);
}
rsip = rsi_update(&rsii, rsip);
status = 0;
out:
- rsi_free(&rsii);
- if (rsip) {
- wake_up_all(&rsip->waitq);
- cache_put(&rsip->h, &rsi_cache);
- } else {
- status = -ENOMEM;
- }
-
- if (status)
- CERROR("rsi parse error %d\n", status);
- RETURN(status);
-}
-
-#else /* !HAVE_SUNRPC_CACHE_V2 */
-
-static void rsi_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsi *rsi = container_of(item, struct rsi, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsi_free(rsi);
- kfree(rsi); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline int rsi_match(struct rsi *item, struct rsi *tmp)
-{
- return __rsi_match(item, tmp);
-}
-
-static inline void rsi_init(struct rsi *new, struct rsi *item)
-{
- __rsi_init(new, item);
-}
-
-static inline void rsi_update(struct rsi *new, struct rsi *item)
-{
- __rsi_update(new, item);
-}
-
-static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
-{
- char *buf = mesg;
- char *ep;
- int len;
- struct rsi rsii, *rsip = NULL;
- time_t expiry;
- int status = -EINVAL;
- ENTRY;
-
-
- memset(&rsii, 0, sizeof(rsii));
-
- /* handle */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- if (rawobj_alloc(&rsii.in_handle, buf, len)) {
- status = -ENOMEM;
- goto out;
- }
-
- /* token */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- if (rawobj_alloc(&rsii.in_token, buf, len)) {
- status = -ENOMEM;
- goto out;
- }
-
- /* expiry */
- expiry = get_expiry(&mesg);
- if (expiry == 0)
- goto out;
-
- len = qword_get(&mesg, buf, mlen);
- if (len <= 0)
- goto out;
-
- /* major */
- rsii.major_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
-
- /* minor */
- len = qword_get(&mesg, buf, mlen);
- if (len <= 0)
- goto out;
- rsii.minor_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
-
- /* out_handle */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- if (rawobj_alloc(&rsii.out_handle, buf, len)) {
- status = -ENOMEM;
- goto out;
- }
-
- /* out_token */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- if (rawobj_alloc(&rsii.out_token, buf, len)) {
- status = -ENOMEM;
- goto out;
- }
-
- rsii.h.expiry_time = expiry;
- rsip = rsi_lookup(&rsii, 1);
- status = 0;
-out:
- rsi_free(&rsii);
- if (rsip) {
- wake_up_all(&rsip->waitq);
- rsi_put(&rsip->h, &rsi_cache);
- }
-
- if (status)
- CERROR("rsi parse error %d\n", status);
- RETURN(status);
+ rsi_free(&rsii);
+ if (rsip) {
+ wake_up_all(&rsip->waitq);
+ cache_put(&rsip->h, &rsi_cache);
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status)
+ CERROR("rsi parse error %d\n", status);
+ RETURN(status);
}
-#endif /* HAVE_SUNRPC_CACHE_V2 */
-
static struct cache_detail rsi_cache = {
- .hash_size = RSI_HASHMAX,
- .hash_table = rsi_table,
- .name = "auth.sptlrpc.init",
- .cache_put = rsi_put,
- .cache_request = rsi_request,
- .cache_parse = rsi_parse,
-#ifdef HAVE_SUNRPC_CACHE_V2
- .match = rsi_match,
- .init = rsi_init,
- .update = update_rsi,
- .alloc = rsi_alloc,
+ .hash_size = RSI_HASHMAX,
+ .hash_table = rsi_table,
+ .name = "auth.sptlrpc.init",
+ .cache_put = rsi_put,
+#ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
+ .cache_request = rsi_request,
#endif
+ .cache_upcall = rsi_upcall,
+ .cache_parse = rsi_parse,
+ .match = rsi_match,
+ .init = rsi_init,
+ .update = update_rsi,
+ .alloc = rsi_alloc,
};
-#ifdef HAVE_SUNRPC_CACHE_V2
-
static struct rsi *rsi_lookup(struct rsi *item)
{
struct cache_head *ch;
return NULL;
}
-#else
-
-static DefineSimpleCacheLookup(rsi, 0)
-
-#endif
-
/****************************************
- * rsc cache *
+ * rpc sec context (rsc) cache *
****************************************/
#define RSC_HASHBITS (10)
struct gss_svc_ctx ctx;
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsc_table[RSC_HASHMAX];
+#else
static struct cache_head *rsc_table[RSC_HASHMAX];
+#endif
static struct cache_detail rsc_cache;
-#ifdef HAVE_SUNRPC_CACHE_V2
static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
static struct rsc *rsc_lookup(struct rsc *item);
-#else
-static struct rsc *rsc_lookup(struct rsc *item, int set);
-#endif
static void rsc_free(struct rsc *rsci)
{
tmp->ctx.gsc_mechctx = NULL;
memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
- spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+ spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
-#ifdef HAVE_SUNRPC_CACHE_V2
-
static void rsc_put(struct kref *ref)
{
struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(rsci->h.cache_list.next == NULL);
+#else
LASSERT(rsci->h.next == NULL);
+#endif
rsc_free(rsci);
OBD_FREE_PTR(rsci);
}
}
rsci.ctx.gsc_remote = (tmp_int != 0);
- /* root user flag */
+ /* root user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get root user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_root = (tmp_int != 0);
+
+ /* mds user flag */
rv = get_int(&mesg, &tmp_int);
if (rv) {
- CERROR("fail to get oss user flag\n");
+ CERROR("fail to get mds user flag\n");
goto out;
}
- rsci.ctx.gsc_usr_root = (tmp_int != 0);
+ rsci.ctx.gsc_usr_mds = (tmp_int != 0);
- /* mds user flag */
+ /* oss user flag */
rv = get_int(&mesg, &tmp_int);
if (rv) {
- CERROR("fail to get mds user flag\n");
+ CERROR("fail to get oss user flag\n");
goto out;
}
- rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+ rsci.ctx.gsc_usr_oss = (tmp_int != 0);
/* mapped uid */
rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
- rawobj_t tmp_buf;
- unsigned long ctx_expiry;
-
- /* gid */
- if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
- goto out;
-
- /* mech name */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- gm = lgss_name_to_mech(buf);
- status = -EOPNOTSUPP;
- if (!gm)
- goto out;
-
- status = -EINVAL;
- /* mech-specific data: */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
-
- tmp_buf.len = len;
- tmp_buf.data = (unsigned char *)buf;
- if (lgss_import_sec_context(&tmp_buf, gm,
- &rsci.ctx.gsc_mechctx))
- goto out;
-
- /* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech. */
- if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
- CERROR("unable to get expire time, drop it\n");
- goto out;
- }
- expiry = (time_t) ctx_expiry;
+ rawobj_t tmp_buf;
+ time64_t ctx_expiry;
+
+ /* gid */
+ if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
+ goto out;
+
+ /* mech name */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ gm = lgss_name_to_mech(buf);
+ status = -EOPNOTSUPP;
+ if (!gm)
+ goto out;
+
+ status = -EINVAL;
+ /* mech-specific data: */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+
+ tmp_buf.len = len;
+ tmp_buf.data = (unsigned char *)buf;
+ if (lgss_import_sec_context(&tmp_buf, gm,
+ &rsci.ctx.gsc_mechctx))
+ goto out;
+
+ /* set to seconds since machine booted */
+ expiry = ktime_get_seconds();
+
+ /* currently the expiry time passed down from user-space
+ * is invalid, here we retrive it from mech.
+ */
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx,
+ (unsigned long *)&ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ goto out;
+ }
+
+ /* ctx_expiry is the number of seconds since Jan 1 1970.
+ * We want just the number of seconds into the future.
+ */
+ expiry += ctx_expiry - ktime_get_real_seconds();
}
rsci.h.expiry_time = expiry;
return status;
}
-#else /* !HAVE_SUNRPC_CACHE_V2 */
-
-static void rsc_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsc *rsci = container_of(item, struct rsc, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsc_free(rsci);
- kfree(rsci); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline int rsc_match(struct rsc *new, struct rsc *tmp)
-{
- return __rsc_match(new, tmp);
-}
-
-static inline void rsc_init(struct rsc *new, struct rsc *tmp)
-{
- __rsc_init(new, tmp);
-}
-
-static inline void rsc_update(struct rsc *new, struct rsc *tmp)
-{
- __rsc_update(new, tmp);
-}
-
-static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
-{
- char *buf = mesg;
- int len, rv, tmp_int;
- struct rsc rsci, *rscp = NULL;
- time_t expiry;
- int status = -EINVAL;
-
- memset(&rsci, 0, sizeof(rsci));
-
- /* context handle */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0) goto out;
- status = -ENOMEM;
- if (rawobj_alloc(&rsci.handle, buf, len))
- goto out;
-
- rsci.h.flags = 0;
- /* expiry */
- expiry = get_expiry(&mesg);
- status = -EINVAL;
- if (expiry == 0)
- goto out;
-
- /* remote flag */
- rv = get_int(&mesg, &tmp_int);
- if (rv) {
- CERROR("fail to get remote flag\n");
- goto out;
- }
- rsci.ctx.gsc_remote = (tmp_int != 0);
-
- /* root user flag */
- rv = get_int(&mesg, &tmp_int);
- if (rv) {
- CERROR("fail to get oss user flag\n");
- goto out;
- }
- rsci.ctx.gsc_usr_root = (tmp_int != 0);
-
- /* mds user flag */
- rv = get_int(&mesg, &tmp_int);
- if (rv) {
- CERROR("fail to get mds user flag\n");
- goto out;
- }
- rsci.ctx.gsc_usr_mds = (tmp_int != 0);
-
- /* mapped uid */
- rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
- if (rv) {
- CERROR("fail to get mapped uid\n");
- goto out;
- }
-
- /* uid, or NEGATIVE */
- rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
- if (rv == -EINVAL)
- goto out;
- if (rv == -ENOENT) {
- CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
- } else {
- struct gss_api_mech *gm;
- rawobj_t tmp_buf;
- unsigned long ctx_expiry;
-
- /* gid */
- if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
- goto out;
-
- /* mech name */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- gm = lgss_name_to_mech(buf);
- status = -EOPNOTSUPP;
- if (!gm)
- goto out;
-
- status = -EINVAL;
- /* mech-specific data: */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0) {
- lgss_mech_put(gm);
- goto out;
- }
- tmp_buf.len = len;
- tmp_buf.data = (unsigned char *)buf;
- if (lgss_import_sec_context(&tmp_buf, gm,
- &rsci.ctx.gsc_mechctx)) {
- lgss_mech_put(gm);
- goto out;
- }
-
- /* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech. */
- if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
- CERROR("unable to get expire time, drop it\n");
- lgss_mech_put(gm);
- goto out;
- }
- expiry = (time_t) ctx_expiry;
-
- lgss_mech_put(gm);
- }
-
- rsci.h.expiry_time = expiry;
- rscp = rsc_lookup(&rsci, 1);
- status = 0;
-out:
- rsc_free(&rsci);
- if (rscp)
- rsc_put(&rscp->h, &rsc_cache);
-
- if (status)
- CERROR("parse rsc error %d\n", status);
- return status;
-}
-
-#endif /* HAVE_SUNRPC_CACHE_V2 */
-
-
static struct cache_detail rsc_cache = {
.hash_size = RSC_HASHMAX,
.hash_table = rsc_table,
.name = "auth.sptlrpc.context",
.cache_put = rsc_put,
.cache_parse = rsc_parse,
-#ifdef HAVE_SUNRPC_CACHE_V2
.match = rsc_match,
.init = rsc_init,
.update = update_rsc,
.alloc = rsc_alloc,
-#endif
};
-#ifdef HAVE_SUNRPC_CACHE_V2
-
static struct rsc *rsc_lookup(struct rsc *item)
{
struct cache_head *ch;
#define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
-#else
-
-static DefineSimpleCacheLookup(rsc, 0);
-
-#define COMPAT_RSC_PUT(item, cd) rsc_put((item), (cd))
-
-#endif
-
/****************************************
* rsc cache flush *
****************************************/
static void rsc_flush(rsc_entry_match *match, long data)
{
- struct cache_head **ch;
+#ifdef HAVE_CACHE_HEAD_HLIST
+ struct cache_head *ch = NULL;
+ struct hlist_head *head;
+#else
+ struct cache_head **ch;
+#endif
struct rsc *rscp;
int n;
ENTRY;
- write_lock(&rsc_cache.hash_lock);
+ write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
- for (ch = &rsc_cache.hash_table[n]; *ch;) {
- rscp = container_of(*ch, struct rsc, h);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ head = &rsc_cache.hash_table[n];
+ hlist_for_each_entry(ch, head, cache_list) {
+ rscp = container_of(ch, struct rsc, h);
+#else
+ for (ch = &rsc_cache.hash_table[n]; *ch;) {
+ rscp = container_of(*ch, struct rsc, h);
+#endif
if (!match(rscp, data)) {
- ch = &((*ch)->next);
+#ifndef HAVE_CACHE_HEAD_HLIST
+ ch = &((*ch)->next);
+#endif
continue;
}
/* it seems simply set NEGATIVE doesn't work */
- *ch = (*ch)->next;
- rscp->h.next = NULL;
+#ifdef HAVE_CACHE_HEAD_HLIST
+ hlist_del_init(&ch->cache_list);
+#else
+ *ch = (*ch)->next;
+ rscp->h.next = NULL;
+#endif
cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ set_bit(CACHE_NEGATIVE, &rscp->h.flags);
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
- write_unlock(&rsc_cache.hash_lock);
+ write_unlock(&rsc_cache.hash_lock);
EXIT;
}
{
rsc_flush_target(target);
}
-EXPORT_SYMBOL(gss_secsvc_flush);
static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
if (rawobj_dup(&rsci.handle, handle))
return NULL;
-#ifdef HAVE_SUNRPC_CACHE_V2
found = rsc_lookup(&rsci);
-#else
- found = rsc_lookup(&rsci, 0);
-#endif
rsc_free(&rsci);
if (!found)
return NULL;
return found;
}
-#ifdef HAVE_SUNRPC_CACHE_V2
-
int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
struct gss_sec *gsec,
struct gss_cli_ctx *gctx)
}
rsci.h.expiry_time = (time_t) ctx_expiry;
- /* FIXME */
- rsci.ctx.gsc_usr_root = 1;
- rsci.ctx.gsc_usr_mds= 1;
- rsci.ctx.gsc_reverse = 1;
+ switch (imp->imp_obd->u.cli.cl_sp_to) {
+ case LUSTRE_SP_MDT:
+ rsci.ctx.gsc_usr_mds = 1;
+ break;
+ case LUSTRE_SP_OST:
+ rsci.ctx.gsc_usr_oss = 1;
+ break;
+ case LUSTRE_SP_CLI:
+ rsci.ctx.gsc_usr_root = 1;
+ default:
+ break;
+ }
rscp = rsc_update(&rsci, rscp);
if (rscp == NULL)
rscp->target = imp->imp_obd;
rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
- CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ CWARN("create reverse svc ctx %p to %s: idx %#llx\n",
&rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
rc = 0;
out:
rsc_free(&rsci);
if (rc)
- CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
- gsec->gs_rvs_hdl, rc);
- RETURN(rc);
-}
-
-#else /* !HAVE_SUNRPC_CACHE_V2 */
-
-int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
- struct gss_sec *gsec,
- struct gss_cli_ctx *gctx)
-{
- struct rsc rsci, *rscp;
- unsigned long ctx_expiry;
- __u32 major;
- int rc;
- ENTRY;
-
- memset(&rsci, 0, sizeof(rsci));
-
- if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
- sizeof(gsec->gs_rvs_hdl)))
- GOTO(out, rc = -ENOMEM);
-
- major = lgss_copy_reverse_context(gctx->gc_mechctx,
- &rsci.ctx.gsc_mechctx);
- if (major != GSS_S_COMPLETE)
- GOTO(out, rc = -ENOMEM);
-
- if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
- CERROR("unable to get expire time, drop it\n");
- GOTO(out, rc = -ENOMEM);
- }
- rsci.h.expiry_time = (time_t) ctx_expiry;
-
- /* FIXME */
- rsci.ctx.gsc_usr_root = 1;
- rsci.ctx.gsc_usr_mds= 1;
- rsci.ctx.gsc_reverse = 1;
-
- rscp = rsc_lookup(&rsci, 1);
- if (rscp == NULL) {
- CERROR("rsc lookup failed\n");
- GOTO(out, rc = -ENOMEM);
- }
-
- rscp->target = imp->imp_obd;
- rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
-
- CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
- &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
- rsc_put(&rscp->h, &rsc_cache);
- rc = 0;
-out:
- rsc_free(&rsci);
- if (rc)
- CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+ CERROR("create reverse svc ctx: idx %#llx, rc %d\n",
gsec->gs_rvs_hdl, rc);
RETURN(rc);
}
-#endif /* HAVE_SUNRPC_CACHE_V2 */
-
int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
{
const cfs_time_t expire = 20;
static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
- struct gss_svc_reqctx *grctx,
- struct gss_wire_ctx *gw,
- struct obd_device *target,
- __u32 lustre_svc,
- rawobj_t *rvs_hdl,
- rawobj_t *in_token)
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ struct obd_device *target,
+ __u32 lustre_svc,
+ rawobj_t *rvs_hdl,
+ rawobj_t *in_token)
{
- struct ptlrpc_reply_state *rs;
- struct rsc *rsci = NULL;
- struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
- int replen = sizeof(struct ptlrpc_body);
- struct gss_rep_header *rephdr;
- int first_check = 1;
- int rc = SECSVC_DROP;
- ENTRY;
-
- memset(&rsikey, 0, sizeof(rsikey));
- rsikey.lustre_svc = lustre_svc;
- rsikey.nid = (__u64) req->rq_peer.nid;
+ struct ptlrpc_reply_state *rs;
+ struct rsc *rsci = NULL;
+ struct rsi *rsip = NULL, rsikey;
+ wait_queue_t wait;
+ int replen = sizeof(struct ptlrpc_body);
+ struct gss_rep_header *rephdr;
+ int first_check = 1;
+ int rc = SECSVC_DROP;
+ ENTRY;
+
+ memset(&rsikey, 0, sizeof(rsikey));
+ rsikey.lustre_svc = lustre_svc;
+ rsikey.nid = (__u64) req->rq_peer.nid;
+ nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
+ sizeof(rsikey.nm_name));
/* duplicate context handle. for INIT it always 0 */
if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
GOTO(out, rc);
}
-#ifdef HAVE_SUNRPC_CACHE_V2
rsip = rsi_lookup(&rsikey);
-#else
- rsip = rsi_lookup(&rsikey, 0);
-#endif
rsi_free(&rsikey);
if (!rsip) {
CERROR("error in rsi_lookup.\n");
GOTO(out, rc);
}
- cache_get(&rsip->h); /* take an extra ref */
- init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&rsip->waitq, &wait);
+ cache_get(&rsip->h); /* take an extra ref */
+ init_waitqueue_head(&rsip->waitq);
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&rsip->waitq, &wait);
cache_check:
- /* Note each time cache_check() will drop a reference if return
- * non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls. */
- rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
- switch (rc) {
- case -EAGAIN: {
- int valid;
-
- if (first_check) {
- first_check = 0;
-
- read_lock(&rsi_cache.hash_lock);
- valid = test_bit(CACHE_VALID, &rsip->h.flags);
- if (valid == 0)
- set_current_state(TASK_INTERRUPTIBLE);
- read_unlock(&rsi_cache.hash_lock);
-
- if (valid == 0)
- schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
-
- cache_get(&rsip->h);
- goto cache_check;
- }
- CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
- break;
- }
- case -ENOENT:
- CWARN("cache_check return ENOENT, drop\n");
- break;
- case 0:
- /* if not the first check, we have to release the extra
- * reference we just added on it. */
- if (!first_check)
- cache_put(&rsip->h, &rsi_cache);
- CDEBUG(D_SEC, "cache_check is good\n");
- break;
- }
-
- remove_wait_queue(&rsip->waitq, &wait);
- cache_put(&rsip->h, &rsi_cache);
-
- if (rc)
- GOTO(out, rc = SECSVC_DROP);
+ /* Note each time cache_check() will drop a reference if return
+ * non-zero. We hold an extra reference on initial rsip, but must
+ * take care of following calls. */
+ rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+ switch (rc) {
+ case -ETIMEDOUT:
+ case -EAGAIN: {
+ int valid;
+
+ if (first_check) {
+ first_check = 0;
+
+ read_lock(&rsi_cache.hash_lock);
+ valid = test_bit(CACHE_VALID, &rsip->h.flags);
+ if (valid == 0)
+ set_current_state(TASK_INTERRUPTIBLE);
+ read_unlock(&rsi_cache.hash_lock);
+
+ if (valid == 0) {
+ unsigned long jiffies;
+ jiffies = msecs_to_jiffies(MSEC_PER_SEC *
+ GSS_SVC_UPCALL_TIMEOUT);
+ schedule_timeout(jiffies);
+ }
+ cache_get(&rsip->h);
+ goto cache_check;
+ }
+ CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
+ break;
+ }
+ case -ENOENT:
+ CDEBUG(D_SEC, "cache_check return ENOENT, drop\n");
+ break;
+ case 0:
+ /* if not the first check, we have to release the extra
+ * reference we just added on it. */
+ if (!first_check)
+ cache_put(&rsip->h, &rsi_cache);
+ CDEBUG(D_SEC, "cache_check is good\n");
+ break;
+ }
+
+ remove_wait_queue(&rsip->waitq, &wait);
+ cache_put(&rsip->h, &rsi_cache);
+
+ if (rc)
+ GOTO(out, rc = SECSVC_DROP);
rc = SECSVC_DROP;
rsci = gss_svc_searchbyctx(&rsip->out_handle);
if (!rsci) {
CERROR("authentication failed\n");
- if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
- rc = SECSVC_COMPLETE;
+ /* gss mechanism returned major and minor code so we return
+ * those in error message */
+ if (!gss_pack_err_notify(req, rsip->major_status,
+ rsip->minor_status))
+ rc = SECSVC_COMPLETE;
GOTO(out, rc);
} else {
}
grctx->src_init = 1;
- grctx->src_reserve_len = size_round4(rsip->out_token.len);
+ grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
- rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("failed to pack reply: %d\n", rc);
GOTO(out, rc = SECSVC_DROP);
rc = SECSVC_OK;
out:
- /* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME */
-#if 0
- if (rsip)
- rsi_put(&rsip->h, &rsi_cache);
-#endif
-
- if (rsci) {
- /* if anything went wrong, we don't keep the context too */
- if (rc != SECSVC_OK)
- set_bit(CACHE_NEGATIVE, &rsci->h.flags);
- else
- CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
- gss_handle_to_u64(&rsci->handle));
-
- COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
- }
- RETURN(rc);
+ /* it looks like here we should put rsip also, but this mess up
+ * with NFS cache mgmt code... FIXME
+ * something like:
+ * if (rsip)
+ * rsi_put(&rsip->h, &rsi_cache); */
+
+ if (rsci) {
+ /* if anything went wrong, we don't keep the context too */
+ if (rc != SECSVC_OK)
+ set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ else
+ CDEBUG(D_SEC, "create rsc with idx %#llx\n",
+ gss_handle_to_u64(&rsci->handle));
+
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+ }
+ RETURN(rc);
}
struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
rsc = gss_svc_searchbyctx(&gw->gw_handle);
if (!rsc) {
- CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+ CWARN("Invalid gss ctx idx %#llx from %s\n",
gss_handle_to_u64(&gw->gw_handle),
libcfs_nid2str(req->rq_peer.nid));
return NULL;
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
/* can't be found */
- set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ set_bit(CACHE_NEGATIVE, &rsc->h.flags);
/* to be removed at next scan */
rsc->h.expiry_time = 1;
}
int __init gss_init_svc_upcall(void)
{
- int i;
-
- cache_register(&rsi_cache);
- cache_register(&rsc_cache);
-
- /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
- * the init upcall channel, otherwise there's big chance that the first
- * upcall issued before the channel be opened thus nfsv4 cache code will
- * drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
- for (i = 0; i < 6; i++) {
- if (atomic_read(&rsi_cache.readers) > 0)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(HZ >= 4);
- schedule_timeout(HZ / 4);
- }
-
- if (atomic_read(&rsi_cache.readers) == 0)
- CWARN("Init channel is not opened by lsvcgssd, following "
- "request might be dropped until lsvcgssd is active\n");
-
- /* this helps reducing context index confliction. after server reboot,
- * conflicting request from clients might be filtered out by initial
- * sequence number checking, thus no chance to sent error notification
- * back to clients. */
- get_random_bytes(&__ctx_index, sizeof(__ctx_index));
-
- return 0;
+ int i, rc;
+
+ spin_lock_init(&__ctx_index_lock);
+ /*
+ * this helps reducing context index confliction. after server reboot,
+ * conflicting request from clients might be filtered out by initial
+ * sequence number checking, thus no chance to sent error notification
+ * back to clients.
+ */
+ cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+ rc = _cache_register_net(&rsi_cache, &init_net);
+ if (rc != 0)
+ return rc;
+
+ rc = _cache_register_net(&rsc_cache, &init_net);
+ if (rc != 0) {
+ _cache_unregister_net(&rsi_cache, &init_net);
+ return rc;
+ }
+
+ /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
+ * the init upcall channel, otherwise there's big chance that the first
+ * upcall issued before the channel be opened thus nfsv4 cache code will
+ * drop the request direclty, thus lead to unnecessary recovery time.
+ * here we wait at miximum 1.5 seconds. */
+ for (i = 0; i < 6; i++) {
+ if (atomic_read(&rsi_cache.readers) > 0)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
+ }
+
+ if (atomic_read(&rsi_cache.readers) == 0)
+ CWARN("Init channel is not opened by lsvcgssd, following "
+ "request might be dropped until lsvcgssd is active\n");
+
+ return 0;
}
-void __exit gss_exit_svc_upcall(void)
+void gss_exit_svc_upcall(void)
{
- int rc;
-
- cache_purge(&rsi_cache);
- if ((rc = cache_unregister(&rsi_cache)))
- CERROR("unregister rsi cache: %d\n", rc);
+ cache_purge(&rsi_cache);
+ _cache_unregister_net(&rsi_cache, &init_net);
- cache_purge(&rsc_cache);
- if ((rc = cache_unregister(&rsc_cache)))
- CERROR("unregister rsc cache: %d\n", rc);
+ cache_purge(&rsc_cache);
+ _cache_unregister_net(&rsc_cache, &init_net);
}