-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* Modifications for Lustre
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
+ *
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ *
+ * Copyright (c) 2012, 2014, Intel Corporation.
+ *
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/hash.h>
-#else
-#include <liblustre.h>
-#endif
-
+#include <linux/mutex.h>
#include <linux/sunrpc/cache.h>
+#include <net/sock.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
#include <lustre/lustre_idl.h>
-#include <lustre_net.h>
#include <lustre_import.h>
+#include <lustre_net.h>
+#include <lustre_nodemap.h>
#include <lustre_sec.h>
#include "gss_err.h"
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t __ctx_index_lock;
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
{
- __u64 idx;
+ __u64 idx;
- spin_lock(&__ctx_index_lock);
- idx = __ctx_index++;
- spin_unlock(&__ctx_index_lock);
+ spin_lock(&__ctx_index_lock);
+ idx = __ctx_index++;
+ spin_unlock(&__ctx_index_lock);
- return idx;
+ return idx;
}
-static inline
-unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char *buf, int length, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
-
- do {
- if (len == length) {
- c = (char) len;
- len = -1;
- } else
- c = *buf++;
-
- l = (l << 8) | c;
- len++;
-
- if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = hash_long(hash^l, BITS_PER_LONG);
- } while (len);
-
- return hash >> (BITS_PER_LONG - bits);
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+
+ do {
+ if (len == length) {
+ c = (char) len;
+ len = -1;
+ } else
+ c = *buf++;
+
+ l = (l << 8) | c;
+ len++;
+
+ if ((len & (BITS_PER_LONG/8-1)) == 0)
+ hash = hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+
+ return hash >> (BITS_PER_LONG - bits);
}
+/* This compatibility can be removed once kernel 3.3 is used,
+ * since cache_register_net/cache_unregister_net are exported.
+ * Note that since kernel 3.4 cache_register and cache_unregister
+ * are removed.
+*/
+static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ return cache_register(cd);
+#else
+ return cache_register_net(cd, net);
+#endif
+}
+static inline void _cache_unregister_net(struct cache_detail *cd,
+ struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ cache_unregister(cd);
+#else
+ cache_unregister_net(cd, net);
+#endif
+}
/****************************************
- * rsi cache *
+ * rpc sec init (rsi) cache *
****************************************/
#define RSI_HASHBITS (6)
#define RSI_HASHMASK (RSI_HASHMAX - 1)
struct rsi {
- struct cache_head h;
- __u32 lustre_svc;
- __u64 nid;
- wait_queue_head_t waitq;
- rawobj_t in_handle, in_token;
- rawobj_t out_handle, out_token;
- int major_status, minor_status;
+ struct cache_head h;
+ __u32 lustre_svc;
+ __u64 nid;
+ char nm_name[LUSTRE_NODEMAP_NAME_LENGTH + 1];
+ wait_queue_head_t waitq;
+ rawobj_t in_handle, in_token;
+ rawobj_t out_handle, out_token;
+ int major_status, minor_status;
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsi_table[RSI_HASHMAX];
+#else
static struct cache_head *rsi_table[RSI_HASHMAX];
+#endif
static struct cache_detail rsi_cache;
-static struct rsi *rsi_lookup(struct rsi *item, int set);
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
+static struct rsi *rsi_lookup(struct rsi *item);
-static
-void rsi_free(struct rsi *rsi)
-{
- rawobj_free(&rsi->in_handle);
- rawobj_free(&rsi->in_token);
- rawobj_free(&rsi->out_handle);
- rawobj_free(&rsi->out_token);
-}
-
-static
-void rsi_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsi *rsi = container_of(item, struct rsi, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsi_free(rsi);
- kfree(rsi); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline
-int rsi_hash(struct rsi *item)
+static inline int rsi_hash(struct rsi *item)
{
return hash_mem((char *)item->in_handle.data, item->in_handle.len,
RSI_HASHBITS) ^
RSI_HASHBITS);
}
-static inline
-int rsi_match(struct rsi *item, struct rsi *tmp)
+static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
{
return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
rawobj_equal(&item->in_token, &tmp->in_token));
}
-static
-void rsi_request(struct cache_detail *cd,
- struct cache_head *h,
- char **bpp, int *blen)
+static void rsi_free(struct rsi *rsi)
{
- struct rsi *rsi = container_of(h, struct rsi, h);
- __u64 index = 0;
-
- /* if in_handle is null, provide kernel suggestion */
- if (rsi->in_handle.len == 0)
- index = gss_get_next_ctx_index();
+ rawobj_free(&rsi->in_handle);
+ rawobj_free(&rsi->in_token);
+ rawobj_free(&rsi->out_handle);
+ rawobj_free(&rsi->out_token);
+}
- qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
- sizeof(rsi->lustre_svc));
- qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
- qword_addhex(bpp, blen, (char *) &index, sizeof(index));
- qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
- qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
- (*bpp)[-1] = '\n';
+/* See handle_channel_req() userspace for where the upcall data is read */
+static void rsi_request(struct cache_detail *cd,
+ struct cache_head *h,
+ char **bpp, int *blen)
+{
+ struct rsi *rsi = container_of(h, struct rsi, h);
+ __u64 index = 0;
+
+ /* if in_handle is null, provide kernel suggestion */
+ if (rsi->in_handle.len == 0)
+ index = gss_get_next_ctx_index();
+
+ qword_addhex(bpp, blen, (char *) &rsi->lustre_svc,
+ sizeof(rsi->lustre_svc));
+ qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
+ qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+ qword_addhex(bpp, blen, (char *) rsi->nm_name,
+ strlen(rsi->nm_name) + 1);
+ qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
+ qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
+ (*bpp)[-1] = '\n';
}
-static inline
-void rsi_init(struct rsi *new, struct rsi *item)
+#ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
{
- new->out_handle = RAWOBJ_EMPTY;
- new->out_token = RAWOBJ_EMPTY;
+ return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+#else
- new->in_handle = item->in_handle;
- item->in_handle = RAWOBJ_EMPTY;
- new->in_token = item->in_token;
- item->in_token = RAWOBJ_EMPTY;
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h);
+}
+#endif
- new->lustre_svc = item->lustre_svc;
- new->nid = item->nid;
- init_waitqueue_head(&new->waitq);
+static inline void __rsi_init(struct rsi *new, struct rsi *item)
+{
+ new->out_handle = RAWOBJ_EMPTY;
+ new->out_token = RAWOBJ_EMPTY;
+
+ new->in_handle = item->in_handle;
+ item->in_handle = RAWOBJ_EMPTY;
+ new->in_token = item->in_token;
+ item->in_token = RAWOBJ_EMPTY;
+
+ new->lustre_svc = item->lustre_svc;
+ new->nid = item->nid;
+ memcpy(new->nm_name, item->nm_name, sizeof(item->nm_name));
+ init_waitqueue_head(&new->waitq);
}
-static inline
-void rsi_update(struct rsi *new, struct rsi *item)
+static inline void __rsi_update(struct rsi *new, struct rsi *item)
{
LASSERT(new->out_handle.len == 0);
LASSERT(new->out_token.len == 0);
new->minor_status = item->minor_status;
}
-static
-int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
+static void rsi_put(struct kref *ref)
+{
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(rsi->h.cache_list.next == NULL);
+#else
+ LASSERT(rsi->h.next == NULL);
+#endif
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
+}
+
+static int rsi_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsi *item = container_of(a, struct rsi, h);
+ struct rsi *tmp = container_of(b, struct rsi, h);
+
+ return __rsi_match(item, tmp);
+}
+
+static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_init(new, item);
+}
+
+static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_update(new, item);
+}
+
+static struct cache_head *rsi_alloc(void)
+{
+ struct rsi *rsi;
+
+ OBD_ALLOC_PTR(rsi);
+ if (rsi)
+ return &rsi->h;
+ else
+ return NULL;
+}
+
+static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
char *ep;
goto out;
}
+ rsip = rsi_lookup(&rsii);
+ if (!rsip)
+ goto out;
+
+ rsii.h.flags = 0;
/* expiry */
expiry = get_expiry(&mesg);
if (expiry == 0)
}
rsii.h.expiry_time = expiry;
- rsip = rsi_lookup(&rsii, 1);
+ rsip = rsi_update(&rsii, rsip);
status = 0;
out:
- rsi_free(&rsii);
- if (rsip) {
- wake_up_all(&rsip->waitq);
- rsi_put(&rsip->h, &rsi_cache);
- }
-
- if (status)
- CERROR("rsi parse error %d\n", status);
- RETURN(status);
+ rsi_free(&rsii);
+ if (rsip) {
+ wake_up_all(&rsip->waitq);
+ cache_put(&rsip->h, &rsi_cache);
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status)
+ CERROR("rsi parse error %d\n", status);
+ RETURN(status);
}
static struct cache_detail rsi_cache = {
- .hash_size = RSI_HASHMAX,
- .hash_table = rsi_table,
- .name = "auth.ptlrpcs.init",
- .cache_put = rsi_put,
- .cache_request = rsi_request,
- .cache_parse = rsi_parse,
+ .hash_size = RSI_HASHMAX,
+ .hash_table = rsi_table,
+ .name = "auth.sptlrpc.init",
+ .cache_put = rsi_put,
+#ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
+ .cache_request = rsi_request,
+#endif
+ .cache_upcall = rsi_upcall,
+ .cache_parse = rsi_parse,
+ .match = rsi_match,
+ .init = rsi_init,
+ .update = update_rsi,
+ .alloc = rsi_alloc,
};
-static DefineSimpleCacheLookup(rsi, 0)
+static struct rsi *rsi_lookup(struct rsi *item)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
+
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(new);
+
+ ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
/****************************************
- * rsc cache *
+ * rpc sec context (rsc) cache *
****************************************/
#define RSC_HASHBITS (10)
struct gss_svc_ctx ctx;
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsc_table[RSC_HASHMAX];
+#else
static struct cache_head *rsc_table[RSC_HASHMAX];
+#endif
static struct cache_detail rsc_cache;
-static struct rsc *rsc_lookup(struct rsc *item, int set);
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
+static struct rsc *rsc_lookup(struct rsc *item);
-static
-void rsc_free(struct rsc *rsci)
+static void rsc_free(struct rsc *rsci)
{
rawobj_free(&rsci->handle);
rawobj_free(&rsci->ctx.gsc_rvs_hdl);
lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
}
-static
-void rsc_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsc *rsci = container_of(item, struct rsc, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsc_free(rsci);
- kfree(rsci); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline
-int rsc_hash(struct rsc *rsci)
+static inline int rsc_hash(struct rsc *rsci)
{
return hash_mem((char *)rsci->handle.data,
rsci->handle.len, RSC_HASHBITS);
}
-static inline
-int rsc_match(struct rsc *new, struct rsc *tmp)
+static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
{
return rawobj_equal(&new->handle, &tmp->handle);
}
-static inline
-void rsc_init(struct rsc *new, struct rsc *tmp)
+static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
{
new->handle = tmp->handle;
tmp->handle = RAWOBJ_EMPTY;
new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
}
-static inline
-void rsc_update(struct rsc *new, struct rsc *tmp)
+static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
{
new->ctx = tmp->ctx;
tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
tmp->ctx.gsc_mechctx = NULL;
memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
- spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+ spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+}
+
+static void rsc_put(struct kref *ref)
+{
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(rsci->h.cache_list.next == NULL);
+#else
+ LASSERT(rsci->h.next == NULL);
+#endif
+ rsc_free(rsci);
+ OBD_FREE_PTR(rsci);
+}
+
+static int rsc_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsc *new = container_of(a, struct rsc, h);
+ struct rsc *tmp = container_of(b, struct rsc, h);
+
+ return __rsc_match(new, tmp);
}
-static
-int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
{
- char *buf = mesg;
- int len, rv, tmp_int;
- struct rsc rsci, *rscp = NULL;
- time_t expiry;
- int status = -EINVAL;
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_init(new, tmp);
+}
+
+static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
+{
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_update(new, tmp);
+}
+
+static struct cache_head * rsc_alloc(void)
+{
+ struct rsc *rsc;
+
+ OBD_ALLOC_PTR(rsc);
+ if (rsc)
+ return &rsc->h;
+ else
+ return NULL;
+}
+
+static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+ char *buf = mesg;
+ int len, rv, tmp_int;
+ struct rsc rsci, *rscp = NULL;
+ time_t expiry;
+ int status = -EINVAL;
+ struct gss_api_mech *gm = NULL;
memset(&rsci, 0, sizeof(rsci));
}
rsci.ctx.gsc_remote = (tmp_int != 0);
- /* root user flag */
+ /* root user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get root user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_root = (tmp_int != 0);
+
+ /* mds user flag */
rv = get_int(&mesg, &tmp_int);
if (rv) {
- CERROR("fail to get oss user flag\n");
+ CERROR("fail to get mds user flag\n");
goto out;
}
- rsci.ctx.gsc_usr_root = (tmp_int != 0);
+ rsci.ctx.gsc_usr_mds = (tmp_int != 0);
- /* mds user flag */
+ /* oss user flag */
rv = get_int(&mesg, &tmp_int);
if (rv) {
- CERROR("fail to get mds user flag\n");
+ CERROR("fail to get oss user flag\n");
goto out;
}
- rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+ rsci.ctx.gsc_usr_oss = (tmp_int != 0);
/* mapped uid */
rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
goto out;
}
+ rscp = rsc_lookup(&rsci);
+ if (!rscp)
+ goto out;
+
/* uid, or NEGATIVE */
rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
if (rv == -EINVAL)
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
- struct gss_api_mech *gm;
rawobj_t tmp_buf;
unsigned long ctx_expiry;
status = -EINVAL;
/* mech-specific data: */
len = qword_get(&mesg, buf, mlen);
- if (len < 0) {
- lgss_mech_put(gm);
+ if (len < 0)
goto out;
- }
+
tmp_buf.len = len;
tmp_buf.data = (unsigned char *)buf;
if (lgss_import_sec_context(&tmp_buf, gm,
- &rsci.ctx.gsc_mechctx)) {
- lgss_mech_put(gm);
+ &rsci.ctx.gsc_mechctx))
goto out;
- }
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech.
- */
+ * is invalid, here we retrive it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
- lgss_mech_put(gm);
goto out;
}
expiry = (time_t) ctx_expiry;
-
- lgss_mech_put(gm);
}
rsci.h.expiry_time = expiry;
- rscp = rsc_lookup(&rsci, 1);
+ rscp = rsc_update(&rsci, rscp);
status = 0;
out:
+ if (gm)
+ lgss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
- rsc_put(&rscp->h, &rsc_cache);
+ cache_put(&rscp->h, &rsc_cache);
+ else
+ status = -ENOMEM;
if (status)
CERROR("parse rsc error %d\n", status);
return status;
}
+static struct cache_detail rsc_cache = {
+ .hash_size = RSC_HASHMAX,
+ .hash_table = rsc_table,
+ .name = "auth.sptlrpc.context",
+ .cache_put = rsc_put,
+ .cache_parse = rsc_parse,
+ .match = rsc_match,
+ .init = rsc_init,
+ .update = update_rsc,
+ .alloc = rsc_alloc,
+};
+
+static struct rsc *rsc_lookup(struct rsc *item)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(new);
+
+ ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+#define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
+
/****************************************
* rsc cache flush *
****************************************/
typedef int rsc_entry_match(struct rsc *rscp, long data);
-static
-void rsc_flush(rsc_entry_match *match, long data)
+static void rsc_flush(rsc_entry_match *match, long data)
{
- struct cache_head **ch;
+#ifdef HAVE_CACHE_HEAD_HLIST
+ struct cache_head *ch = NULL;
+ struct hlist_head *head;
+#else
+ struct cache_head **ch;
+#endif
struct rsc *rscp;
int n;
ENTRY;
- write_lock(&rsc_cache.hash_lock);
+ write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
- for (ch = &rsc_cache.hash_table[n]; *ch;) {
- rscp = container_of(*ch, struct rsc, h);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ head = &rsc_cache.hash_table[n];
+ hlist_for_each_entry(ch, head, cache_list) {
+ rscp = container_of(ch, struct rsc, h);
+#else
+ for (ch = &rsc_cache.hash_table[n]; *ch;) {
+ rscp = container_of(*ch, struct rsc, h);
+#endif
if (!match(rscp, data)) {
- ch = &((*ch)->next);
+#ifndef HAVE_CACHE_HEAD_HLIST
+ ch = &((*ch)->next);
+#endif
continue;
}
/* it seems simply set NEGATIVE doesn't work */
- *ch = (*ch)->next;
- rscp->h.next = NULL;
+#ifdef HAVE_CACHE_HEAD_HLIST
+ hlist_del_init(&ch->cache_list);
+#else
+ *ch = (*ch)->next;
+ rscp->h.next = NULL;
+#endif
cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
- rsc_put(&rscp->h, &rsc_cache);
+ set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
- write_unlock(&rsc_cache.hash_lock);
+ write_unlock(&rsc_cache.hash_lock);
EXIT;
}
-static
-int match_uid(struct rsc *rscp, long uid)
+static int match_uid(struct rsc *rscp, long uid)
{
if ((int) uid == -1)
return 1;
return ((int) rscp->ctx.gsc_uid == (int) uid);
}
-static
-int match_target(struct rsc *rscp, long target)
+static int match_target(struct rsc *rscp, long target)
{
return (rscp->target == (struct obd_device *) target);
}
-static inline
-void rsc_flush_uid(int uid)
+static inline void rsc_flush_uid(int uid)
{
if (uid == -1)
CWARN("flush all gss contexts...\n");
rsc_flush(match_uid, (long) uid);
}
-static inline
-void rsc_flush_target(struct obd_device *target)
+static inline void rsc_flush_target(struct obd_device *target)
{
rsc_flush(match_target, (long) target);
}
{
rsc_flush_target(target);
}
-EXPORT_SYMBOL(gss_secsvc_flush);
-
-static struct cache_detail rsc_cache = {
- .hash_size = RSC_HASHMAX,
- .hash_table = rsc_table,
- .name = "auth.ptlrpcs.context",
- .cache_put = rsc_put,
- .cache_parse = rsc_parse,
-};
-
-static DefineSimpleCacheLookup(rsc, 0);
-static
-struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
+static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
struct rsc rsci;
struct rsc *found;
if (rawobj_dup(&rsci.handle, handle))
return NULL;
- found = rsc_lookup(&rsci, 0);
+ found = rsc_lookup(&rsci);
rsc_free(&rsci);
if (!found)
return NULL;
struct gss_sec *gsec,
struct gss_cli_ctx *gctx)
{
- struct rsc rsci, *rscp;
+ struct rsc rsci, *rscp = NULL;
unsigned long ctx_expiry;
__u32 major;
+ int rc;
ENTRY;
memset(&rsci, 0, sizeof(rsci));
if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
- sizeof(gsec->gs_rvs_hdl))) {
- CERROR("unable alloc handle\n");
- RETURN(-ENOMEM);
- }
+ sizeof(gsec->gs_rvs_hdl)))
+ GOTO(out, rc = -ENOMEM);
+
+ rscp = rsc_lookup(&rsci);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
major = lgss_copy_reverse_context(gctx->gc_mechctx,
&rsci.ctx.gsc_mechctx);
- if (major != GSS_S_COMPLETE) {
- CERROR("unable to copy reverse context\n");
- rsc_free(&rsci);
- RETURN(-ENOMEM);
- }
+ if (major != GSS_S_COMPLETE)
+ GOTO(out, rc = -ENOMEM);
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
- rsc_free(&rsci);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
-
rsci.h.expiry_time = (time_t) ctx_expiry;
- rsci.target = imp->imp_obd;
- rscp = rsc_lookup(&rsci, 1);
- rsc_free(&rsci);
- if (rscp)
- rsc_put(&rscp->h, &rsc_cache);
+ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
+ rsci.ctx.gsc_usr_mds = 1;
+ else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
+ rsci.ctx.gsc_usr_oss = 1;
+ else
+ rsci.ctx.gsc_usr_root = 1;
+
+ rscp = rsc_update(&rsci, rscp);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
- CWARN("client installed reverse svc ctx to %s: idx "LPX64"\n",
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- gsec->gs_rvs_hdl);
+ rscp->target = imp->imp_obd;
+ rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
- imp->imp_next_reconnect = gss_round_imp_reconnect(ctx_expiry);
- CWARN("import(%s) to %s: set force reconnect at %lu(%lds valid time)\n",
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- imp->imp_next_reconnect,
- (long) (imp->imp_next_reconnect - get_seconds()));
+ CWARN("create reverse svc ctx %p to %s: idx %#llx\n",
+ &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
+ rc = 0;
+out:
+ if (rscp)
+ cache_put(&rscp->h, &rsc_cache);
+ rsc_free(&rsci);
- RETURN(0);
+ if (rc)
+ CERROR("create reverse svc ctx: idx %#llx, rc %d\n",
+ gsec->gs_rvs_hdl, rc);
+ RETURN(rc);
}
-#if 0
-static int
-gss_svc_unseal_request(struct ptlrpc_request *req,
- struct rsc *rsci,
- struct gss_wire_cred *gc,
- __u32 *vp, __u32 vlen)
+int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
{
- struct ptlrpcs_wire_hdr *sec_hdr;
- struct gss_ctx *ctx = rsci->mechctx;
- rawobj_t cipher_text, plain_text;
- __u32 major;
- ENTRY;
+ const cfs_time_t expire = 20;
+ struct rsc *rscp;
- sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
+ &rscp->ctx, rscp);
- if (vlen < 4) {
- CERROR("vlen only %u\n", vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
+ rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
}
+ return 0;
+}
- cipher_text.len = le32_to_cpu(*vp++);
- cipher_text.data = (__u8 *) vp;
- vlen -= 4;
-
- if (cipher_text.len > vlen) {
- CERROR("cipher claimed %u while buf only %u\n",
- cipher_text.len, vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
- }
-
- plain_text = cipher_text;
-
- major = lgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
- if (major) {
- CERROR("unwrap error 0x%x\n", major);
- RETURN(major);
- }
+int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
+{
+ struct rsc *rscp = container_of(ctx, struct rsc, ctx);
- if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
- CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
- req, req->rq_reqmsg->opc, req->rq_xid,
- req->rq_reqmsg->transno);
- RETURN(GSS_S_DUPLICATE_TOKEN);
- }
+ return rawobj_dup(handle, &rscp->handle);
+}
- req->rq_reqmsg = (struct lustre_msg *) (vp);
- req->rq_reqlen = plain_text.len;
+int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
+{
+ struct rsc *rscp;
- CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
+ &rscp->ctx, rscp, seq + 1);
- RETURN(GSS_S_COMPLETE);
+ rscp->ctx.gsc_rvs_seq = seq + 1;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
+ }
+ return 0;
}
-#endif
-static
-struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
+static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
{
return NULL;
}
static struct cache_req cache_upcall_chandle = { cache_upcall_defer };
int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
- struct gss_svc_reqctx *grctx,
- struct gss_wire_ctx *gw,
- struct obd_device *target,
- __u32 lustre_svc,
- rawobj_t *rvs_hdl,
- rawobj_t *in_token)
-{
- struct ptlrpc_reply_state *rs;
- struct rsc *rsci = NULL;
- struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
- int replen = sizeof(struct ptlrpc_body);
- struct gss_rep_header *rephdr;
- int first_check = 1;
- int rc = SECSVC_DROP;
- ENTRY;
-
- memset(&rsikey, 0, sizeof(rsikey));
- rsikey.lustre_svc = lustre_svc;
- rsikey.nid = (__u64) req->rq_peer.nid;
+ struct gss_svc_reqctx *grctx,
+ struct gss_wire_ctx *gw,
+ struct obd_device *target,
+ __u32 lustre_svc,
+ rawobj_t *rvs_hdl,
+ rawobj_t *in_token)
+{
+ struct ptlrpc_reply_state *rs;
+ struct rsc *rsci = NULL;
+ struct rsi *rsip = NULL, rsikey;
+ wait_queue_t wait;
+ int replen = sizeof(struct ptlrpc_body);
+ struct gss_rep_header *rephdr;
+ int first_check = 1;
+ int rc = SECSVC_DROP;
+ ENTRY;
+
+ memset(&rsikey, 0, sizeof(rsikey));
+ rsikey.lustre_svc = lustre_svc;
+ rsikey.nid = (__u64) req->rq_peer.nid;
+ nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
+ sizeof(rsikey.nm_name));
/* duplicate context handle. for INIT it always 0 */
if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
GOTO(out, rc);
}
- rsip = rsi_lookup(&rsikey, 0);
+ rsip = rsi_lookup(&rsikey);
rsi_free(&rsikey);
if (!rsip) {
CERROR("error in rsi_lookup.\n");
GOTO(out, rc);
}
- cache_get(&rsip->h); /* take an extra ref */
- init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&rsip->waitq, &wait);
+ cache_get(&rsip->h); /* take an extra ref */
+ init_waitqueue_head(&rsip->waitq);
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&rsip->waitq, &wait);
cache_check:
- /* Note each time cache_check() will drop a reference if return
- * non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls.
- */
- rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
- switch (rc) {
- case -EAGAIN: {
+ /* Note each time cache_check() will drop a reference if return
+ * non-zero. We hold an extra reference on initial rsip, but must
+ * take care of following calls. */
+ rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
+ switch (rc) {
+ case -ETIMEDOUT:
+ case -EAGAIN: {
int valid;
if (first_check) {
first_check = 0;
read_lock(&rsi_cache.hash_lock);
- valid = test_bit(CACHE_VALID, &rsip->h.flags);
+ valid = test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
- set_current_state(TASK_INTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
- if (valid == 0)
- schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
-
- cache_get(&rsip->h);
- goto cache_check;
+ if (valid == 0) {
+ unsigned long jiffies;
+ jiffies = msecs_to_jiffies(MSEC_PER_SEC *
+ GSS_SVC_UPCALL_TIMEOUT);
+ schedule_timeout(jiffies);
+ }
+ cache_get(&rsip->h);
+ goto cache_check;
}
CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
break;
break;
case 0:
/* if not the first check, we have to release the extra
- * reference we just added on it.
- */
- if (!first_check)
- cache_put(&rsip->h, &rsi_cache);
- CDEBUG(D_SEC, "cache_check is good\n");
- break;
- }
+ * reference we just added on it. */
+ if (!first_check)
+ cache_put(&rsip->h, &rsi_cache);
+ CDEBUG(D_SEC, "cache_check is good\n");
+ break;
+ }
- remove_wait_queue(&rsip->waitq, &wait);
- cache_put(&rsip->h, &rsi_cache);
+ remove_wait_queue(&rsip->waitq, &wait);
+ cache_put(&rsip->h, &rsi_cache);
- if (rc)
- GOTO(out, rc = SECSVC_DROP);
+ if (rc)
+ GOTO(out, rc = SECSVC_DROP);
rc = SECSVC_DROP;
rsci = gss_svc_searchbyctx(&rsip->out_handle);
if (!rsci) {
CERROR("authentication failed\n");
- if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
- rc = SECSVC_COMPLETE;
+ /* gss mechanism returned major and minor code so we return
+ * those in error message */
+ if (!gss_pack_err_notify(req, rsip->major_status,
+ rsip->minor_status))
+ rc = SECSVC_COMPLETE;
GOTO(out, rc);
} else {
rsci->target = target;
- CWARN("server create rsc %p(%u->%s)\n",
- rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+ CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
+ rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
CERROR("handle size %u too large\n", rsip->out_handle.len);
}
grctx->src_init = 1;
- grctx->src_reserve_len = size_round4(rsip->out_token.len);
+ grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
- rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("failed to pack reply: %d\n", rc);
GOTO(out, rc = SECSVC_DROP);
rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
rsip->out_token.len, 0);
- if (rsci->ctx.gsc_usr_mds)
- CWARN("user from %s authenticated as mds\n",
- libcfs_nid2str(req->rq_peer.nid));
-
rc = SECSVC_OK;
out:
- /* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME
- */
-#if 0
- if (rsip)
- rsi_put(&rsip->h, &rsi_cache);
-#endif
-
- if (rsci) {
- /* if anything went wrong, we don't keep the context too */
- if (rc != SECSVC_OK)
- set_bit(CACHE_NEGATIVE, &rsci->h.flags);
-
- rsc_put(&rsci->h, &rsc_cache);
- }
- RETURN(rc);
+ /* it looks like here we should put rsip also, but this mess up
+ * with NFS cache mgmt code... FIXME
+ * something like:
+ * if (rsip)
+ * rsi_put(&rsip->h, &rsi_cache); */
+
+ if (rsci) {
+ /* if anything went wrong, we don't keep the context too */
+ if (rc != SECSVC_OK)
+ set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ else
+ CDEBUG(D_SEC, "create rsc with idx %#llx\n",
+ gss_handle_to_u64(&rsci->handle));
+
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+ }
+ RETURN(rc);
}
struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
rsc = gss_svc_searchbyctx(&gw->gw_handle);
if (!rsc) {
- CWARN("Invalid gss context handle from %s\n",
+ CWARN("Invalid gss ctx idx %#llx from %s\n",
+ gss_handle_to_u64(&gw->gw_handle),
libcfs_nid2str(req->rq_peer.nid));
return NULL;
}
{
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
- rsc_put(&rsc->h, &rsc_cache);
+ COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
}
void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
{
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
- set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ /* can't be found */
+ set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ /* to be removed at next scan */
+ rsc->h.expiry_time = 1;
}
-int __init gss_svc_init_upcall(void)
+int __init gss_init_svc_upcall(void)
{
- int i;
-
- cache_register(&rsi_cache);
- cache_register(&rsc_cache);
-
- /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
- * the init upcall channel, otherwise there's big chance that the first
- * upcall issued before the channel be opened thus nfsv4 cache code will
- * drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds.
- */
- for (i = 0; i < 6; i++) {
- if (atomic_read(&rsi_cache.readers) > 0)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(HZ >= 4);
- schedule_timeout(HZ / 4);
- }
-
- if (atomic_read(&rsi_cache.readers) == 0)
- CWARN("Init channel is not opened by lsvcgssd, following "
- "request might be dropped until lsvcgssd is active\n");
-
- /*
- * this helps reducing context index confliction. after server reboot,
- * conflicting request from clients might be filtered out by initial
- * sequence number checking, thus no chance to sent error notification
- * back to clients.
- */
- get_random_bytes(&__ctx_index, sizeof(__ctx_index));
-
- return 0;
+ int i, rc;
+
+ spin_lock_init(&__ctx_index_lock);
+ /*
+ * this helps reducing context index confliction. after server reboot,
+ * conflicting request from clients might be filtered out by initial
+ * sequence number checking, thus no chance to sent error notification
+ * back to clients.
+ */
+ cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+ rc = _cache_register_net(&rsi_cache, &init_net);
+ if (rc != 0)
+ return rc;
+
+ rc = _cache_register_net(&rsc_cache, &init_net);
+ if (rc != 0) {
+ _cache_unregister_net(&rsi_cache, &init_net);
+ return rc;
+ }
+
+ /* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
+ * the init upcall channel, otherwise there's big chance that the first
+ * upcall issued before the channel be opened thus nfsv4 cache code will
+ * drop the request direclty, thus lead to unnecessary recovery time.
+ * here we wait at miximum 1.5 seconds. */
+ for (i = 0; i < 6; i++) {
+ if (atomic_read(&rsi_cache.readers) > 0)
+ break;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
+ }
+
+ if (atomic_read(&rsi_cache.readers) == 0)
+ CWARN("Init channel is not opened by lsvcgssd, following "
+ "request might be dropped until lsvcgssd is active\n");
+
+ return 0;
}
-void __exit gss_svc_exit_upcall(void)
+void gss_exit_svc_upcall(void)
{
- int rc;
-
- cache_purge(&rsi_cache);
- if ((rc = cache_unregister(&rsi_cache)))
- CERROR("unregister rsi cache: %d\n", rc);
+ cache_purge(&rsi_cache);
+ _cache_unregister_net(&rsi_cache, &init_net);
- cache_purge(&rsc_cache);
- if ((rc = cache_unregister(&rsc_cache)))
- CERROR("unregister rsc cache: %d\n", rc);
+ cache_purge(&rsc_cache);
+ _cache_unregister_net(&rsc_cache, &init_net);
}