* vim:expandtab:shiftwidth=8:tabstop=8:
*
* Modifications for Lustre
+ *
+ * Copyright 2008, Sun Microsystems, Inc.
+ * Author: Eric Mei <eric.mei@sun.com>
+ *
* Copyright 2004 - 2006, Cluster File Systems, Inc.
* All rights reserved
* Author: Eric Mei <ericm@clusterfs.com>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/mutex.h>
+#include <linux/sunrpc/cache.h>
#else
#include <liblustre.h>
#endif
-#include <linux/sunrpc/cache.h>
-
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
return idx;
}
-static inline
-unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char *buf, int length, int bits)
{
unsigned long hash = 0;
unsigned long l = 0;
static struct cache_head *rsi_table[RSI_HASHMAX];
static struct cache_detail rsi_cache;
+#ifdef HAVE_SUNRPC_CACHE_V2
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
+static struct rsi *rsi_lookup(struct rsi *item);
+#else
static struct rsi *rsi_lookup(struct rsi *item, int set);
+#endif
-static
-void rsi_free(struct rsi *rsi)
-{
- rawobj_free(&rsi->in_handle);
- rawobj_free(&rsi->in_token);
- rawobj_free(&rsi->out_handle);
- rawobj_free(&rsi->out_token);
-}
-
-static
-void rsi_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsi *rsi = container_of(item, struct rsi, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsi_free(rsi);
- kfree(rsi); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline
-int rsi_hash(struct rsi *item)
+static inline int rsi_hash(struct rsi *item)
{
return hash_mem((char *)item->in_handle.data, item->in_handle.len,
RSI_HASHBITS) ^
RSI_HASHBITS);
}
-static inline
-int rsi_match(struct rsi *item, struct rsi *tmp)
+static inline int __rsi_match(struct rsi *item, struct rsi *tmp)
{
return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
rawobj_equal(&item->in_token, &tmp->in_token));
}
-static
-void rsi_request(struct cache_detail *cd,
- struct cache_head *h,
- char **bpp, int *blen)
+static void rsi_free(struct rsi *rsi)
+{
+ rawobj_free(&rsi->in_handle);
+ rawobj_free(&rsi->in_token);
+ rawobj_free(&rsi->out_handle);
+ rawobj_free(&rsi->out_token);
+}
+
+static void rsi_request(struct cache_detail *cd,
+ struct cache_head *h,
+ char **bpp, int *blen)
{
struct rsi *rsi = container_of(h, struct rsi, h);
__u64 index = 0;
(*bpp)[-1] = '\n';
}
-static inline
-void rsi_init(struct rsi *new, struct rsi *item)
+static inline void __rsi_init(struct rsi *new, struct rsi *item)
{
new->out_handle = RAWOBJ_EMPTY;
new->out_token = RAWOBJ_EMPTY;
init_waitqueue_head(&new->waitq);
}
-static inline
-void rsi_update(struct rsi *new, struct rsi *item)
+static inline void __rsi_update(struct rsi *new, struct rsi *item)
{
LASSERT(new->out_handle.len == 0);
LASSERT(new->out_token.len == 0);
new->minor_status = item->minor_status;
}
-static
-int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
+#ifdef HAVE_SUNRPC_CACHE_V2
+
+static void rsi_put(struct kref *ref)
+{
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+
+ LASSERT(rsi->h.next == NULL);
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
+}
+
+static int rsi_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsi *item = container_of(a, struct rsi, h);
+ struct rsi *tmp = container_of(b, struct rsi, h);
+
+ return __rsi_match(item, tmp);
+}
+
+static void rsi_init(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_init(new, item);
+}
+
+static void update_rsi(struct cache_head *cnew, struct cache_head *citem)
+{
+ struct rsi *new = container_of(cnew, struct rsi, h);
+ struct rsi *item = container_of(citem, struct rsi, h);
+
+ __rsi_update(new, item);
+}
+
+static struct cache_head *rsi_alloc(void)
+{
+ struct rsi *rsi;
+
+ OBD_ALLOC_PTR(rsi);
+ if (rsi)
+ return &rsi->h;
+ else
+ return NULL;
+}
+
+static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+ char *buf = mesg;
+ char *ep;
+ int len;
+ struct rsi rsii, *rsip = NULL;
+ time_t expiry;
+ int status = -EINVAL;
+ ENTRY;
+
+
+ memset(&rsii, 0, sizeof(rsii));
+
+ /* handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.in_handle, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* token */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.in_token, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ rsip = rsi_lookup(&rsii);
+ if (!rsip)
+ goto out;
+
+ rsii.h.flags = 0;
+ /* expiry */
+ expiry = get_expiry(&mesg);
+ if (expiry == 0)
+ goto out;
+
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0)
+ goto out;
+
+ /* major */
+ rsii.major_status = simple_strtol(buf, &ep, 10);
+ if (*ep)
+ goto out;
+
+ /* minor */
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0)
+ goto out;
+ rsii.minor_status = simple_strtol(buf, &ep, 10);
+ if (*ep)
+ goto out;
+
+ /* out_handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.out_handle, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ /* out_token */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ if (rawobj_alloc(&rsii.out_token, buf, len)) {
+ status = -ENOMEM;
+ goto out;
+ }
+
+ rsii.h.expiry_time = expiry;
+ rsip = rsi_update(&rsii, rsip);
+ status = 0;
+out:
+ rsi_free(&rsii);
+ if (rsip) {
+ wake_up_all(&rsip->waitq);
+ cache_put(&rsip->h, &rsi_cache);
+ } else {
+ status = -ENOMEM;
+ }
+
+ if (status)
+ CERROR("rsi parse error %d\n", status);
+ RETURN(status);
+}
+
+#else /* !HAVE_SUNRPC_CACHE_V2 */
+
+static void rsi_put(struct cache_head *item, struct cache_detail *cd)
+{
+ struct rsi *rsi = container_of(item, struct rsi, h);
+
+ LASSERT(atomic_read(&item->refcnt) > 0);
+
+ if (cache_put(item, cd)) {
+ LASSERT(item->next == NULL);
+ rsi_free(rsi);
+ kfree(rsi); /* created by cache mgmt using kmalloc */
+ }
+}
+
+static inline int rsi_match(struct rsi *item, struct rsi *tmp)
+{
+ return __rsi_match(item, tmp);
+}
+
+static inline void rsi_init(struct rsi *new, struct rsi *item)
+{
+ __rsi_init(new, item);
+}
+
+static inline void rsi_update(struct rsi *new, struct rsi *item)
+{
+ __rsi_update(new, item);
+}
+
+static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
char *ep;
RETURN(status);
}
+#endif /* HAVE_SUNRPC_CACHE_V2 */
+
static struct cache_detail rsi_cache = {
.hash_size = RSI_HASHMAX,
.hash_table = rsi_table,
- .name = "auth.ptlrpcs.init",
+ .name = "auth.sptlrpc.init",
.cache_put = rsi_put,
.cache_request = rsi_request,
.cache_parse = rsi_parse,
+#ifdef HAVE_SUNRPC_CACHE_V2
+ .match = rsi_match,
+ .init = rsi_init,
+ .update = update_rsi,
+ .alloc = rsi_alloc,
+#endif
};
+#ifdef HAVE_SUNRPC_CACHE_V2
+
+static struct rsi *rsi_lookup(struct rsi *item)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsi_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
+
+static struct rsi *rsi_update(struct rsi *new, struct rsi *old)
+{
+ struct cache_head *ch;
+ int hash = rsi_hash(new);
+
+ ch = sunrpc_cache_update(&rsi_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsi, h);
+ else
+ return NULL;
+}
+
+#else
+
static DefineSimpleCacheLookup(rsi, 0)
+#endif
+
/****************************************
* rsc cache *
****************************************/
static struct cache_head *rsc_table[RSC_HASHMAX];
static struct cache_detail rsc_cache;
+#ifdef HAVE_SUNRPC_CACHE_V2
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
+static struct rsc *rsc_lookup(struct rsc *item);
+#else
static struct rsc *rsc_lookup(struct rsc *item, int set);
+#endif
-static
-void rsc_free(struct rsc *rsci)
+static void rsc_free(struct rsc *rsci)
{
rawobj_free(&rsci->handle);
rawobj_free(&rsci->ctx.gsc_rvs_hdl);
lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
}
-static
-void rsc_put(struct cache_head *item, struct cache_detail *cd)
-{
- struct rsc *rsci = container_of(item, struct rsc, h);
-
- LASSERT(atomic_read(&item->refcnt) > 0);
-
- if (cache_put(item, cd)) {
- LASSERT(item->next == NULL);
- rsc_free(rsci);
- kfree(rsci); /* created by cache mgmt using kmalloc */
- }
-}
-
-static inline
-int rsc_hash(struct rsc *rsci)
+static inline int rsc_hash(struct rsc *rsci)
{
return hash_mem((char *)rsci->handle.data,
rsci->handle.len, RSC_HASHBITS);
}
-static inline
-int rsc_match(struct rsc *new, struct rsc *tmp)
+static inline int __rsc_match(struct rsc *new, struct rsc *tmp)
{
return rawobj_equal(&new->handle, &tmp->handle);
}
-static inline
-void rsc_init(struct rsc *new, struct rsc *tmp)
+static inline void __rsc_init(struct rsc *new, struct rsc *tmp)
{
new->handle = tmp->handle;
tmp->handle = RAWOBJ_EMPTY;
new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
}
-static inline
-void rsc_update(struct rsc *new, struct rsc *tmp)
+static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
{
new->ctx = tmp->ctx;
tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
-static
-int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+#ifdef HAVE_SUNRPC_CACHE_V2
+
+static void rsc_put(struct kref *ref)
+{
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+
+ LASSERT(rsci->h.next == NULL);
+ rsc_free(rsci);
+ OBD_FREE_PTR(rsci);
+}
+
+static int rsc_match(struct cache_head *a, struct cache_head *b)
+{
+ struct rsc *new = container_of(a, struct rsc, h);
+ struct rsc *tmp = container_of(b, struct rsc, h);
+
+ return __rsc_match(new, tmp);
+}
+
+static void rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
+{
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_init(new, tmp);
+}
+
+static void update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
+{
+ struct rsc *new = container_of(cnew, struct rsc, h);
+ struct rsc *tmp = container_of(ctmp, struct rsc, h);
+
+ __rsc_update(new, tmp);
+}
+
+static struct cache_head * rsc_alloc(void)
+{
+ struct rsc *rsc;
+
+ OBD_ALLOC_PTR(rsc);
+ if (rsc)
+ return &rsc->h;
+ else
+ return NULL;
+}
+
+static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+{
+ char *buf = mesg;
+ int len, rv, tmp_int;
+ struct rsc rsci, *rscp = NULL;
+ time_t expiry;
+ int status = -EINVAL;
+ struct gss_api_mech *gm = NULL;
+
+ memset(&rsci, 0, sizeof(rsci));
+
+ /* context handle */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0) goto out;
+ status = -ENOMEM;
+ if (rawobj_alloc(&rsci.handle, buf, len))
+ goto out;
+
+ rsci.h.flags = 0;
+ /* expiry */
+ expiry = get_expiry(&mesg);
+ status = -EINVAL;
+ if (expiry == 0)
+ goto out;
+
+ /* remote flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get remote flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_remote = (tmp_int != 0);
+
+ /* root user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get oss user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_root = (tmp_int != 0);
+
+ /* mds user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get mds user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+
+ /* mapped uid */
+ rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
+ if (rv) {
+ CERROR("fail to get mapped uid\n");
+ goto out;
+ }
+
+ rscp = rsc_lookup(&rsci);
+ if (!rscp)
+ goto out;
+
+ /* uid, or NEGATIVE */
+ rv = get_int(&mesg, (int *) &rsci.ctx.gsc_uid);
+ if (rv == -EINVAL)
+ goto out;
+ if (rv == -ENOENT) {
+ CERROR("NOENT? set rsc entry negative\n");
+ set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ } else {
+ rawobj_t tmp_buf;
+ unsigned long ctx_expiry;
+
+ /* gid */
+ if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
+ goto out;
+
+ /* mech name */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ gm = lgss_name_to_mech(buf);
+ status = -EOPNOTSUPP;
+ if (!gm)
+ goto out;
+
+ status = -EINVAL;
+ /* mech-specific data: */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+
+ tmp_buf.len = len;
+ tmp_buf.data = (unsigned char *)buf;
+ if (lgss_import_sec_context(&tmp_buf, gm,
+ &rsci.ctx.gsc_mechctx))
+ goto out;
+
+ /* currently the expiry time passed down from user-space
+ * is invalid, here we retrive it from mech. */
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ goto out;
+ }
+ expiry = (time_t) ctx_expiry;
+ }
+
+ rsci.h.expiry_time = expiry;
+ rscp = rsc_update(&rsci, rscp);
+ status = 0;
+out:
+ if (gm)
+ lgss_mech_put(gm);
+ rsc_free(&rsci);
+ if (rscp)
+ cache_put(&rscp->h, &rsc_cache);
+ else
+ status = -ENOMEM;
+
+ if (status)
+ CERROR("parse rsc error %d\n", status);
+ return status;
+}
+
+#else /* !HAVE_SUNRPC_CACHE_V2 */
+
+static void rsc_put(struct cache_head *item, struct cache_detail *cd)
+{
+ struct rsc *rsci = container_of(item, struct rsc, h);
+
+ LASSERT(atomic_read(&item->refcnt) > 0);
+
+ if (cache_put(item, cd)) {
+ LASSERT(item->next == NULL);
+ rsc_free(rsci);
+ kfree(rsci); /* created by cache mgmt using kmalloc */
+ }
+}
+
+static inline int rsc_match(struct rsc *new, struct rsc *tmp)
+{
+ return __rsc_match(new, tmp);
+}
+
+static inline void rsc_init(struct rsc *new, struct rsc *tmp)
+{
+ __rsc_init(new, tmp);
+}
+
+static inline void rsc_update(struct rsc *new, struct rsc *tmp)
+{
+ __rsc_update(new, tmp);
+}
+
+static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
int len, rv, tmp_int;
}
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech.
- */
+ * is invalid, here we retrive it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
lgss_mech_put(gm);
return status;
}
+#endif /* HAVE_SUNRPC_CACHE_V2 */
+
+
+static struct cache_detail rsc_cache = {
+ .hash_size = RSC_HASHMAX,
+ .hash_table = rsc_table,
+ .name = "auth.sptlrpc.context",
+ .cache_put = rsc_put,
+ .cache_parse = rsc_parse,
+#ifdef HAVE_SUNRPC_CACHE_V2
+ .match = rsc_match,
+ .init = rsc_init,
+ .update = update_rsc,
+ .alloc = rsc_alloc,
+#endif
+};
+
+#ifdef HAVE_SUNRPC_CACHE_V2
+
+static struct rsc *rsc_lookup(struct rsc *item)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(item);
+
+ ch = sunrpc_cache_lookup(&rsc_cache, &item->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+static struct rsc *rsc_update(struct rsc *new, struct rsc *old)
+{
+ struct cache_head *ch;
+ int hash = rsc_hash(new);
+
+ ch = sunrpc_cache_update(&rsc_cache, &new->h, &old->h, hash);
+ if (ch)
+ return container_of(ch, struct rsc, h);
+ else
+ return NULL;
+}
+
+#define COMPAT_RSC_PUT(item, cd) cache_put((item), (cd))
+
+#else
+
+static DefineSimpleCacheLookup(rsc, 0);
+
+#define COMPAT_RSC_PUT(item, cd) rsc_put((item), (cd))
+
+#endif
+
/****************************************
* rsc cache flush *
****************************************/
typedef int rsc_entry_match(struct rsc *rscp, long data);
-static
-void rsc_flush(rsc_entry_match *match, long data)
+static void rsc_flush(rsc_entry_match *match, long data)
{
struct cache_head **ch;
struct rsc *rscp;
rscp->h.next = NULL;
cache_get(&rscp->h);
set_bit(CACHE_NEGATIVE, &rscp->h.flags);
- rsc_put(&rscp->h, &rsc_cache);
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
EXIT;
}
-static
-int match_uid(struct rsc *rscp, long uid)
+static int match_uid(struct rsc *rscp, long uid)
{
if ((int) uid == -1)
return 1;
return ((int) rscp->ctx.gsc_uid == (int) uid);
}
-static
-int match_target(struct rsc *rscp, long target)
+static int match_target(struct rsc *rscp, long target)
{
return (rscp->target == (struct obd_device *) target);
}
-static inline
-void rsc_flush_uid(int uid)
+static inline void rsc_flush_uid(int uid)
{
if (uid == -1)
CWARN("flush all gss contexts...\n");
rsc_flush(match_uid, (long) uid);
}
-static inline
-void rsc_flush_target(struct obd_device *target)
+static inline void rsc_flush_target(struct obd_device *target)
{
rsc_flush(match_target, (long) target);
}
}
EXPORT_SYMBOL(gss_secsvc_flush);
-static struct cache_detail rsc_cache = {
- .hash_size = RSC_HASHMAX,
- .hash_table = rsc_table,
- .name = "auth.ptlrpcs.context",
- .cache_put = rsc_put,
- .cache_parse = rsc_parse,
-};
-
-static DefineSimpleCacheLookup(rsc, 0);
-
-static
-struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
+static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
struct rsc rsci;
struct rsc *found;
if (rawobj_dup(&rsci.handle, handle))
return NULL;
+#ifdef HAVE_SUNRPC_CACHE_V2
+ found = rsc_lookup(&rsci);
+#else
found = rsc_lookup(&rsci, 0);
+#endif
rsc_free(&rsci);
if (!found)
return NULL;
return found;
}
+#ifdef HAVE_SUNRPC_CACHE_V2
+
int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
struct gss_sec *gsec,
struct gss_cli_ctx *gctx)
{
- struct rsc rsci, *rscp;
+ struct rsc rsci, *rscp = NULL;
unsigned long ctx_expiry;
__u32 major;
+ int rc;
ENTRY;
memset(&rsci, 0, sizeof(rsci));
if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
- sizeof(gsec->gs_rvs_hdl))) {
- CERROR("unable alloc handle\n");
- RETURN(-ENOMEM);
- }
+ sizeof(gsec->gs_rvs_hdl)))
+ GOTO(out, rc = -ENOMEM);
+
+ rscp = rsc_lookup(&rsci);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
major = lgss_copy_reverse_context(gctx->gc_mechctx,
&rsci.ctx.gsc_mechctx);
- if (major != GSS_S_COMPLETE) {
- CERROR("unable to copy reverse context\n");
- rsc_free(&rsci);
- RETURN(-ENOMEM);
- }
+ if (major != GSS_S_COMPLETE)
+ GOTO(out, rc = -ENOMEM);
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
- rsc_free(&rsci);
- RETURN(-EINVAL);
+ GOTO(out, rc = -EINVAL);
}
-
rsci.h.expiry_time = (time_t) ctx_expiry;
- rsci.target = imp->imp_obd;
- rscp = rsc_lookup(&rsci, 1);
- rsc_free(&rsci);
- if (rscp)
- rsc_put(&rscp->h, &rsc_cache);
+ /* FIXME */
+ rsci.ctx.gsc_usr_root = 1;
+ rsci.ctx.gsc_usr_mds= 1;
+ rsci.ctx.gsc_reverse = 1;
+
+ rscp = rsc_update(&rsci, rscp);
+ if (rscp == NULL)
+ GOTO(out, rc = -ENOMEM);
- CWARN("client installed reverse svc ctx to %s: idx "LPX64"\n",
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- gsec->gs_rvs_hdl);
+ rscp->target = imp->imp_obd;
+ rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
- imp->imp_next_reconnect = gss_round_imp_reconnect(ctx_expiry);
- CWARN("import(%s) to %s: set force reconnect at %lu(%lds valid time)\n",
- ptlrpc_import_state_name(imp->imp_state),
- imp->imp_obd->u.cli.cl_target_uuid.uuid,
- imp->imp_next_reconnect,
- (long) (imp->imp_next_reconnect - get_seconds()));
+ CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
+ rc = 0;
+out:
+ if (rscp)
+ cache_put(&rscp->h, &rsc_cache);
+ rsc_free(&rsci);
- RETURN(0);
+ if (rc)
+ CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+ gsec->gs_rvs_hdl, rc);
+ RETURN(rc);
}
-#if 0
-static int
-gss_svc_unseal_request(struct ptlrpc_request *req,
- struct rsc *rsci,
- struct gss_wire_cred *gc,
- __u32 *vp, __u32 vlen)
-{
- struct ptlrpcs_wire_hdr *sec_hdr;
- struct gss_ctx *ctx = rsci->mechctx;
- rawobj_t cipher_text, plain_text;
- __u32 major;
+#else /* !HAVE_SUNRPC_CACHE_V2 */
+
+int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
+ struct gss_sec *gsec,
+ struct gss_cli_ctx *gctx)
+{
+ struct rsc rsci, *rscp;
+ unsigned long ctx_expiry;
+ __u32 major;
+ int rc;
ENTRY;
- sec_hdr = (struct ptlrpcs_wire_hdr *) req->rq_reqbuf;
+ memset(&rsci, 0, sizeof(rsci));
+
+ if (rawobj_alloc(&rsci.handle, (char *) &gsec->gs_rvs_hdl,
+ sizeof(gsec->gs_rvs_hdl)))
+ GOTO(out, rc = -ENOMEM);
- if (vlen < 4) {
- CERROR("vlen only %u\n", vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
- }
+ major = lgss_copy_reverse_context(gctx->gc_mechctx,
+ &rsci.ctx.gsc_mechctx);
+ if (major != GSS_S_COMPLETE)
+ GOTO(out, rc = -ENOMEM);
- cipher_text.len = le32_to_cpu(*vp++);
- cipher_text.data = (__u8 *) vp;
- vlen -= 4;
-
- if (cipher_text.len > vlen) {
- CERROR("cipher claimed %u while buf only %u\n",
- cipher_text.len, vlen);
- RETURN(GSS_S_CALL_BAD_STRUCTURE);
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ GOTO(out, rc = -ENOMEM);
}
+ rsci.h.expiry_time = (time_t) ctx_expiry;
- plain_text = cipher_text;
+ /* FIXME */
+ rsci.ctx.gsc_usr_root = 1;
+ rsci.ctx.gsc_usr_mds= 1;
+ rsci.ctx.gsc_reverse = 1;
- major = lgss_unwrap(ctx, GSS_C_QOP_DEFAULT, &cipher_text, &plain_text);
- if (major) {
- CERROR("unwrap error 0x%x\n", major);
- RETURN(major);
+ rscp = rsc_lookup(&rsci, 1);
+ if (rscp == NULL) {
+ CERROR("rsc lookup failed\n");
+ GOTO(out, rc = -ENOMEM);
}
- if (gss_check_seq_num(&rsci->seqdata, gc->gc_seq)) {
- CERROR("discard replayed request %p(o%u,x"LPU64",t"LPU64")\n",
- req, req->rq_reqmsg->opc, req->rq_xid,
- req->rq_reqmsg->transno);
- RETURN(GSS_S_DUPLICATE_TOKEN);
+ rscp->target = imp->imp_obd;
+ rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
+
+ CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ &rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
+ rsc_put(&rscp->h, &rsc_cache);
+ rc = 0;
+out:
+ rsc_free(&rsci);
+ if (rc)
+ CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+ gsec->gs_rvs_hdl, rc);
+ RETURN(rc);
+}
+
+#endif /* HAVE_SUNRPC_CACHE_V2 */
+
+int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
+{
+ const cfs_time_t expire = 20;
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
+ &rscp->ctx, rscp);
+
+ rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
}
+ return 0;
+}
- req->rq_reqmsg = (struct lustre_msg *) (vp);
- req->rq_reqlen = plain_text.len;
+int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
+{
+ struct rsc *rscp = container_of(ctx, struct rsc, ctx);
- CDEBUG(D_SEC, "msg len %d\n", req->rq_reqlen);
+ return rawobj_dup(handle, &rscp->handle);
+}
- RETURN(GSS_S_COMPLETE);
+int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
+{
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
+ &rscp->ctx, rscp, seq + 1);
+
+ rscp->ctx.gsc_rvs_seq = seq + 1;
+ COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
+ }
+ return 0;
}
-#endif
-static
-struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
+static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
{
return NULL;
}
GOTO(out, rc);
}
+#ifdef HAVE_SUNRPC_CACHE_V2
+ rsip = rsi_lookup(&rsikey);
+#else
rsip = rsi_lookup(&rsikey, 0);
+#endif
rsi_free(&rsikey);
if (!rsip) {
CERROR("error in rsi_lookup.\n");
cache_check:
/* Note each time cache_check() will drop a reference if return
* non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls.
- */
+ * take care of following calls. */
rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
switch (rc) {
case -EAGAIN: {
break;
case 0:
/* if not the first check, we have to release the extra
- * reference we just added on it.
- */
+ * reference we just added on it. */
if (!first_check)
cache_put(&rsip->h, &rsi_cache);
CDEBUG(D_SEC, "cache_check is good\n");
rsci->target = target;
- CWARN("server create rsc %p(%u->%s)\n",
- rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
+ CDEBUG(D_SEC, "server create rsc %p(%u->%s)\n",
+ rsci, rsci->ctx.gsc_uid, libcfs_nid2str(req->rq_peer.nid));
if (rsip->out_handle.len > PTLRPC_GSS_MAX_HANDLE_SIZE) {
CERROR("handle size %u too large\n", rsip->out_handle.len);
grctx->src_init = 1;
grctx->src_reserve_len = size_round4(rsip->out_token.len);
- rc = lustre_pack_reply_v2(req, 1, &replen, NULL);
+ rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("failed to pack reply: %d\n", rc);
GOTO(out, rc = SECSVC_DROP);
rs->rs_repdata_len = lustre_shrink_msg(rs->rs_repbuf, 2,
rsip->out_token.len, 0);
- if (rsci->ctx.gsc_usr_mds)
- CWARN("user from %s authenticated as mds\n",
- libcfs_nid2str(req->rq_peer.nid));
-
rc = SECSVC_OK;
out:
/* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME
- */
+ * with NFS cache mgmt code... FIXME */
#if 0
if (rsip)
rsi_put(&rsip->h, &rsi_cache);
/* if anything went wrong, we don't keep the context too */
if (rc != SECSVC_OK)
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ else
+ CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
+ gss_handle_to_u64(&rsci->handle));
- rsc_put(&rsci->h, &rsc_cache);
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
}
RETURN(rc);
}
rsc = gss_svc_searchbyctx(&gw->gw_handle);
if (!rsc) {
- CWARN("Invalid gss context handle from %s\n",
+ CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+ gss_handle_to_u64(&gw->gw_handle),
libcfs_nid2str(req->rq_peer.nid));
return NULL;
}
{
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
- rsc_put(&rsc->h, &rsc_cache);
+ COMPAT_RSC_PUT(&rsc->h, &rsc_cache);
}
void gss_svc_upcall_destroy_ctx(struct gss_svc_ctx *ctx)
{
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
+ /* can't be found */
set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ /* to be removed at next scan */
+ rsc->h.expiry_time = 1;
}
int __init gss_init_svc_upcall(void)
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds.
- */
+ * here we wait at miximum 1.5 seconds. */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
- /*
- * this helps reducing context index confliction. after server reboot,
+ /* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* sequence number checking, thus no chance to sent error notification
- * back to clients.
- */
+ * back to clients. */
get_random_bytes(&__ctx_index, sizeof(__ctx_index));
return 0;