#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/mutex.h>
+#include <linux/sunrpc/cache.h>
#else
#include <liblustre.h>
#endif
-#include <linux/sunrpc/cache.h>
-
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
return idx;
}
-static inline
-unsigned long hash_mem(char *buf, int length, int bits)
+static inline unsigned long hash_mem(char *buf, int length, int bits)
{
unsigned long hash = 0;
unsigned long l = 0;
static struct cache_detail rsi_cache;
static struct rsi *rsi_lookup(struct rsi *item, int set);
-static
-void rsi_free(struct rsi *rsi)
+static void rsi_free(struct rsi *rsi)
{
rawobj_free(&rsi->in_handle);
rawobj_free(&rsi->in_token);
rawobj_free(&rsi->out_token);
}
-static
-void rsi_put(struct cache_head *item, struct cache_detail *cd)
+static void rsi_put(struct cache_head *item, struct cache_detail *cd)
{
struct rsi *rsi = container_of(item, struct rsi, h);
}
}
-static inline
-int rsi_hash(struct rsi *item)
+static inline int rsi_hash(struct rsi *item)
{
return hash_mem((char *)item->in_handle.data, item->in_handle.len,
RSI_HASHBITS) ^
RSI_HASHBITS);
}
-static inline
-int rsi_match(struct rsi *item, struct rsi *tmp)
+static inline int rsi_match(struct rsi *item, struct rsi *tmp)
{
return (rawobj_equal(&item->in_handle, &tmp->in_handle) &&
rawobj_equal(&item->in_token, &tmp->in_token));
}
-static
-void rsi_request(struct cache_detail *cd,
+static void rsi_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
{
(*bpp)[-1] = '\n';
}
-static inline
-void rsi_init(struct rsi *new, struct rsi *item)
+static inline void rsi_init(struct rsi *new, struct rsi *item)
{
new->out_handle = RAWOBJ_EMPTY;
new->out_token = RAWOBJ_EMPTY;
init_waitqueue_head(&new->waitq);
}
-static inline
-void rsi_update(struct rsi *new, struct rsi *item)
+static inline void rsi_update(struct rsi *new, struct rsi *item)
{
LASSERT(new->out_handle.len == 0);
LASSERT(new->out_token.len == 0);
new->minor_status = item->minor_status;
}
-static
-int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
+static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
char *ep;
static struct cache_detail rsc_cache;
static struct rsc *rsc_lookup(struct rsc *item, int set);
-static
-void rsc_free(struct rsc *rsci)
+static void rsc_free(struct rsc *rsci)
{
rawobj_free(&rsci->handle);
rawobj_free(&rsci->ctx.gsc_rvs_hdl);
lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
}
-static
-void rsc_put(struct cache_head *item, struct cache_detail *cd)
+static void rsc_put(struct cache_head *item, struct cache_detail *cd)
{
struct rsc *rsci = container_of(item, struct rsc, h);
}
}
-static inline
-int rsc_hash(struct rsc *rsci)
+static inline int rsc_hash(struct rsc *rsci)
{
return hash_mem((char *)rsci->handle.data,
rsci->handle.len, RSC_HASHBITS);
}
-static inline
-int rsc_match(struct rsc *new, struct rsc *tmp)
+static inline int rsc_match(struct rsc *new, struct rsc *tmp)
{
return rawobj_equal(&new->handle, &tmp->handle);
}
-static inline
-void rsc_init(struct rsc *new, struct rsc *tmp)
+static inline void rsc_init(struct rsc *new, struct rsc *tmp)
{
new->handle = tmp->handle;
tmp->handle = RAWOBJ_EMPTY;
new->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
}
-static inline
-void rsc_update(struct rsc *new, struct rsc *tmp)
+static inline void rsc_update(struct rsc *new, struct rsc *tmp)
{
new->ctx = tmp->ctx;
tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
-static
-int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
+static int rsc_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
int len, rv, tmp_int;
}
/* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech.
- */
+ * is invalid, here we retrive it from mech. */
if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
CERROR("unable to get expire time, drop it\n");
lgss_mech_put(gm);
typedef int rsc_entry_match(struct rsc *rscp, long data);
-static
-void rsc_flush(rsc_entry_match *match, long data)
+static void rsc_flush(rsc_entry_match *match, long data)
{
struct cache_head **ch;
struct rsc *rscp;
EXIT;
}
-static
-int match_uid(struct rsc *rscp, long uid)
+static int match_uid(struct rsc *rscp, long uid)
{
if ((int) uid == -1)
return 1;
return ((int) rscp->ctx.gsc_uid == (int) uid);
}
-static
-int match_target(struct rsc *rscp, long target)
+static int match_target(struct rsc *rscp, long target)
{
return (rscp->target == (struct obd_device *) target);
}
-static inline
-void rsc_flush_uid(int uid)
+static inline void rsc_flush_uid(int uid)
{
if (uid == -1)
CWARN("flush all gss contexts...\n");
rsc_flush(match_uid, (long) uid);
}
-static inline
-void rsc_flush_target(struct obd_device *target)
+static inline void rsc_flush_target(struct obd_device *target)
{
rsc_flush(match_target, (long) target);
}
static DefineSimpleCacheLookup(rsc, 0);
-static
-struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
+static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
struct rsc rsci;
struct rsc *found;
rscp = rsc_lookup(&rsci, 1);
rsc_free(&rsci);
- if (rscp)
- rsc_put(&rscp->h, &rsc_cache);
+ if (rscp) {
+ /* FIXME */
+ rscp->ctx.gsc_usr_root = 1;
+ rscp->ctx.gsc_usr_mds= 1;
+ rscp->ctx.gsc_reverse = 1;
+
+ rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
- CDEBUG(D_SEC, "client installed reverse svc ctx to %s: idx "LPX64"\n",
- imp->imp_obd->u.cli.cl_target_uuid.uuid, gsec->gs_rvs_hdl);
+ CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ &rscp->ctx, obd2cli_tgt(imp->imp_obd),
+ gsec->gs_rvs_hdl);
+
+ rsc_put(&rscp->h, &rsc_cache);
+ }
RETURN(0);
}
-static
-struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
+int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
+{
+ const cfs_time_t expire = 20;
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
+ &rscp->ctx, rscp);
+
+ rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ rsc_put(&rscp->h, &rsc_cache);
+ }
+ return 0;
+}
+
+int gss_svc_upcall_dup_handle(rawobj_t *handle, struct gss_svc_ctx *ctx)
+{
+ struct rsc *rscp = container_of(ctx, struct rsc, ctx);
+
+ return rawobj_dup(handle, &rscp->handle);
+}
+
+int gss_svc_upcall_update_sequence(rawobj_t *handle, __u32 seq)
+{
+ struct rsc *rscp;
+
+ rscp = gss_svc_searchbyctx(handle);
+ if (rscp) {
+ CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) update seq to %u\n",
+ &rscp->ctx, rscp, seq + 1);
+
+ rscp->ctx.gsc_rvs_seq = seq + 1;
+ rsc_put(&rscp->h, &rsc_cache);
+ }
+ return 0;
+}
+
+static struct cache_deferred_req* cache_upcall_defer(struct cache_req *req)
{
return NULL;
}
cache_check:
/* Note each time cache_check() will drop a reference if return
* non-zero. We hold an extra reference on initial rsip, but must
- * take care of following calls.
- */
+ * take care of following calls. */
rc = cache_check(&rsi_cache, &rsip->h, &cache_upcall_chandle);
switch (rc) {
case -EAGAIN: {
break;
case 0:
/* if not the first check, we have to release the extra
- * reference we just added on it.
- */
+ * reference we just added on it. */
if (!first_check)
cache_put(&rsip->h, &rsi_cache);
CDEBUG(D_SEC, "cache_check is good\n");
out:
/* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME
- */
+ * with NFS cache mgmt code... FIXME */
#if 0
if (rsip)
rsi_put(&rsip->h, &rsi_cache);
/* if anything went wrong, we don't keep the context too */
if (rc != SECSVC_OK)
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ else
+ CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
+ gss_handle_to_u64(&rsci->handle));
rsc_put(&rsci->h, &rsc_cache);
}
rsc = gss_svc_searchbyctx(&gw->gw_handle);
if (!rsc) {
- CWARN("Invalid gss context handle from %s\n",
+ CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+ gss_handle_to_u64(&gw->gw_handle),
libcfs_nid2str(req->rq_peer.nid));
return NULL;
}
{
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
+ /* can't be found */
set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ /* to be removed at next scan */
+ rsc->h.expiry_time = 1;
}
int __init gss_init_svc_upcall(void)
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
* drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds.
- */
+ * here we wait at miximum 1.5 seconds. */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
- /*
- * this helps reducing context index confliction. after server reboot,
+ /* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* sequence number checking, thus no chance to sent error notification
- * back to clients.
- */
+ * back to clients. */
get_random_bytes(&__ctx_index, sizeof(__ctx_index));
return 0;