#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/random.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/mutex.h>
#include <linux/sunrpc/cache.h>
+#include <net/sock.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
-#include <lustre/lustre_idl.h>
-#include <lustre_net.h>
#include <lustre_import.h>
+#include <lustre_net.h>
+#include <lustre_nodemap.h>
#include <lustre_sec.h>
#include "gss_err.h"
#include "gss_internal.h"
#include "gss_api.h"
+#include "gss_crypto.h"
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock;
+static DEFINE_SPINLOCK(__ctx_index_lock);
static __u64 __ctx_index;
+unsigned int krb5_allow_old_client_csum;
+
__u64 gss_get_next_ctx_index(void)
{
__u64 idx;
return hash >> (BITS_PER_LONG - bits);
}
-/* This compatibility can be removed once kernel 3.3 is used,
- * since cache_register_net/cache_unregister_net are exported.
- * Note that since kernel 3.4 cache_register and cache_unregister
- * are removed.
-*/
-static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
-{
-#ifdef HAVE_CACHE_REGISTER
- return cache_register(cd);
-#else
- return cache_register_net(cd, net);
-#endif
-}
-static inline void _cache_unregister_net(struct cache_detail *cd,
- struct net *net)
-{
-#ifdef HAVE_CACHE_REGISTER
- cache_unregister(cd);
-#else
- cache_unregister_net(cd, net);
-#endif
-}
/****************************************
- * rsi cache *
+ * rpc sec init (rsi) cache *
****************************************/
#define RSI_HASHBITS (6)
struct cache_head h;
__u32 lustre_svc;
__u64 nid;
+ char nm_name[LUSTRE_NODEMAP_NAME_LENGTH + 1];
wait_queue_head_t waitq;
rawobj_t in_handle, in_token;
rawobj_t out_handle, out_token;
int major_status, minor_status;
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+ struct rcu_head rcu_head;
+#endif
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsi_table[RSI_HASHMAX];
+#else
static struct cache_head *rsi_table[RSI_HASHMAX];
+#endif
static struct cache_detail rsi_cache;
static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
static struct rsi *rsi_lookup(struct rsi *item);
+#ifdef HAVE_CACHE_DETAIL_WRITERS
+static inline int channel_users(struct cache_detail *cd)
+{
+ return atomic_read(&cd->writers);
+}
+#else
+static inline int channel_users(struct cache_detail *cd)
+{
+ return atomic_read(&cd->readers);
+}
+#endif
+
static inline int rsi_hash(struct rsi *item)
{
return hash_mem((char *)item->in_handle.data, item->in_handle.len,
rawobj_free(&rsi->out_token);
}
+/* See handle_channel_req() userspace for where the upcall data is read */
static void rsi_request(struct cache_detail *cd,
struct cache_head *h,
char **bpp, int *blen)
sizeof(rsi->lustre_svc));
qword_addhex(bpp, blen, (char *) &rsi->nid, sizeof(rsi->nid));
qword_addhex(bpp, blen, (char *) &index, sizeof(index));
+ qword_addhex(bpp, blen, (char *) rsi->nm_name,
+ strlen(rsi->nm_name) + 1);
qword_addhex(bpp, blen, rsi->in_handle.data, rsi->in_handle.len);
qword_addhex(bpp, blen, rsi->in_token.data, rsi->in_token.len);
(*bpp)[-1] = '\n';
}
-#ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-#else
-
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h);
-}
-#endif
-
static inline void __rsi_init(struct rsi *new, struct rsi *item)
{
new->out_handle = RAWOBJ_EMPTY;
new->lustre_svc = item->lustre_svc;
new->nid = item->nid;
+ memcpy(new->nm_name, item->nm_name, sizeof(item->nm_name));
init_waitqueue_head(&new->waitq);
}
new->minor_status = item->minor_status;
}
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+static void rsi_free_rcu(struct rcu_head *head)
+{
+ struct rsi *rsi = container_of(head, struct rsi, rcu_head);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(hlist_unhashed(&rsi->h.cache_list));
+#else
+ LASSERT(rsi->h.next == NULL);
+#endif
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
+}
+
+static void rsi_put(struct kref *ref)
+{
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+
+ call_rcu(&rsi->rcu_head, rsi_free_rcu);
+}
+#else /* !HAVE_CACHE_HASH_SPINLOCK */
static void rsi_put(struct kref *ref)
{
- struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
- LASSERT(rsi->h.next == NULL);
- rsi_free(rsi);
- OBD_FREE_PTR(rsi);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(hlist_unhashed(&rsi->h.cache_list));
+#else
+ LASSERT(rsi->h.next == NULL);
+#endif
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
}
+#endif /* HAVE_CACHE_HASH_SPINLOCK */
static int rsi_match(struct cache_head *a, struct cache_head *b)
{
static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
- char *ep;
int len;
struct rsi rsii, *rsip = NULL;
- time_t expiry;
+ time64_t expiry;
int status = -EINVAL;
ENTRY;
if (len <= 0)
goto out;
- /* major */
- rsii.major_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
+ /* major */
+ status = kstrtoint(buf, 10, &rsii.major_status);
+ if (status)
+ goto out;
- /* minor */
- len = qword_get(&mesg, buf, mlen);
- if (len <= 0)
- goto out;
- rsii.minor_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
+ /* minor */
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ status = kstrtoint(buf, 10, &rsii.minor_status);
+ if (status)
+ goto out;
/* out_handle */
len = qword_get(&mesg, buf, mlen);
.hash_table = rsi_table,
.name = "auth.sptlrpc.init",
.cache_put = rsi_put,
-#ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
.cache_request = rsi_request,
-#endif
- .cache_upcall = rsi_upcall,
+ .cache_upcall = sunrpc_cache_pipe_upcall,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
}
/****************************************
- * rsc cache *
+ * rpc sec context (rsc) cache *
****************************************/
#define RSC_HASHBITS (10)
struct obd_device *target;
rawobj_t handle;
struct gss_svc_ctx ctx;
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+ struct rcu_head rcu_head;
+#endif
};
+#ifdef HAVE_CACHE_HEAD_HLIST
+static struct hlist_head rsc_table[RSC_HASHMAX];
+#else
static struct cache_head *rsc_table[RSC_HASHMAX];
+#endif
static struct cache_detail rsc_cache;
static struct rsc *rsc_update(struct rsc *new, struct rsc *old);
static struct rsc *rsc_lookup(struct rsc *item);
static inline void __rsc_update(struct rsc *new, struct rsc *tmp)
{
- new->ctx = tmp->ctx;
- tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
- tmp->ctx.gsc_mechctx = NULL;
+ new->ctx = tmp->ctx;
+ memset(&tmp->ctx, 0, sizeof(tmp->ctx));
+ tmp->ctx.gsc_rvs_hdl = RAWOBJ_EMPTY;
+ tmp->ctx.gsc_mechctx = NULL;
+ tmp->target = NULL;
- memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
+ memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+static void rsc_free_rcu(struct rcu_head *head)
+{
+ struct rsc *rsci = container_of(head, struct rsc, rcu_head);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(hlist_unhashed(&rsci->h.cache_list));
+#else
+ LASSERT(rsci->h.next == NULL);
+#endif
+ rawobj_free(&rsci->handle);
+ OBD_FREE_PTR(rsci);
+}
+
static void rsc_put(struct kref *ref)
{
- struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
- LASSERT(rsci->h.next == NULL);
- rsc_free(rsci);
- OBD_FREE_PTR(rsci);
+ rawobj_free(&rsci->ctx.gsc_rvs_hdl);
+ lgss_delete_sec_context(&rsci->ctx.gsc_mechctx);
+ call_rcu(&rsci->rcu_head, rsc_free_rcu);
}
+#else /* !HAVE_CACHE_HASH_SPINLOCK */
+static void rsc_put(struct kref *ref)
+{
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+
+#ifdef HAVE_CACHE_HEAD_HLIST
+ LASSERT(hlist_unhashed(&rsci->h.cache_list));
+#else
+ LASSERT(rsci->h.next == NULL);
+#endif
+ rsc_free(rsci);
+ OBD_FREE_PTR(rsci);
+}
+#endif /* HAVE_CACHE_HASH_SPINLOCK */
static int rsc_match(struct cache_head *a, struct cache_head *b)
{
char *buf = mesg;
int len, rv, tmp_int;
struct rsc rsci, *rscp = NULL;
- time_t expiry;
+ time64_t expiry;
int status = -EINVAL;
struct gss_api_mech *gm = NULL;
}
rsci.ctx.gsc_remote = (tmp_int != 0);
- /* root user flag */
- rv = get_int(&mesg, &tmp_int);
- if (rv) {
- CERROR("fail to get oss user flag\n");
- goto out;
- }
- rsci.ctx.gsc_usr_root = (tmp_int != 0);
+ /* root user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get root user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_root = (tmp_int != 0);
/* mds user flag */
rv = get_int(&mesg, &tmp_int);
CERROR("NOENT? set rsc entry negative\n");
set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
- rawobj_t tmp_buf;
- unsigned long ctx_expiry;
-
- /* gid */
- if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
- goto out;
-
- /* mech name */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
- gm = lgss_name_to_mech(buf);
- status = -EOPNOTSUPP;
- if (!gm)
- goto out;
-
- status = -EINVAL;
- /* mech-specific data: */
- len = qword_get(&mesg, buf, mlen);
- if (len < 0)
- goto out;
-
- tmp_buf.len = len;
- tmp_buf.data = (unsigned char *)buf;
- if (lgss_import_sec_context(&tmp_buf, gm,
- &rsci.ctx.gsc_mechctx))
- goto out;
-
- /* currently the expiry time passed down from user-space
- * is invalid, here we retrive it from mech. */
- if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
- CERROR("unable to get expire time, drop it\n");
- goto out;
- }
- expiry = (time_t) ctx_expiry;
+ rawobj_t tmp_buf;
+ time64_t ctx_expiry;
+
+ /* gid */
+ if (get_int(&mesg, (int *) &rsci.ctx.gsc_gid))
+ goto out;
+
+ /* mech name */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+ gm = lgss_name_to_mech(buf);
+ status = -EOPNOTSUPP;
+ if (!gm)
+ goto out;
+
+ status = -EINVAL;
+ /* mech-specific data: */
+ len = qword_get(&mesg, buf, mlen);
+ if (len < 0)
+ goto out;
+
+ tmp_buf.len = len;
+ tmp_buf.data = (unsigned char *)buf;
+ if (lgss_import_sec_context(&tmp_buf, gm,
+ &rsci.ctx.gsc_mechctx))
+ goto out;
+
+ /* set to seconds since machine booted */
+ expiry = ktime_get_seconds();
+
+ /* currently the expiry time passed down from user-space
+ * is invalid, here we retrive it from mech.
+ */
+ if (lgss_inquire_context(rsci.ctx.gsc_mechctx, &ctx_expiry)) {
+ CERROR("unable to get expire time, drop it\n");
+ goto out;
+ }
+
+ /* ctx_expiry is the number of seconds since Jan 1 1970.
+ * We want just the number of seconds into the future.
+ */
+ expiry += ctx_expiry - ktime_get_real_seconds();
}
rsci.h.expiry_time = expiry;
* rsc cache flush *
****************************************/
-typedef int rsc_entry_match(struct rsc *rscp, long data);
-
-static void rsc_flush(rsc_entry_match *match, long data)
-{
- struct cache_head **ch;
- struct rsc *rscp;
- int n;
- ENTRY;
-
- write_lock(&rsc_cache.hash_lock);
- for (n = 0; n < RSC_HASHMAX; n++) {
- for (ch = &rsc_cache.hash_table[n]; *ch;) {
- rscp = container_of(*ch, struct rsc, h);
-
- if (!match(rscp, data)) {
- ch = &((*ch)->next);
- continue;
- }
-
- /* it seems simply set NEGATIVE doesn't work */
- *ch = (*ch)->next;
- rscp->h.next = NULL;
- cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
- COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
- rsc_cache.entries--;
- }
- }
- write_unlock(&rsc_cache.hash_lock);
- EXIT;
-}
-
-static int match_uid(struct rsc *rscp, long uid)
-{
- if ((int) uid == -1)
- return 1;
- return ((int) rscp->ctx.gsc_uid == (int) uid);
-}
-
-static int match_target(struct rsc *rscp, long target)
-{
- return (rscp->target == (struct obd_device *) target);
-}
-
-static inline void rsc_flush_uid(int uid)
-{
- if (uid == -1)
- CWARN("flush all gss contexts...\n");
-
- rsc_flush(match_uid, (long) uid);
-}
-
-static inline void rsc_flush_target(struct obd_device *target)
-{
- rsc_flush(match_target, (long) target);
-}
-
-void gss_secsvc_flush(struct obd_device *target)
-{
- rsc_flush_target(target);
-}
-
static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
struct rsc rsci;
struct gss_cli_ctx *gctx)
{
struct rsc rsci, *rscp = NULL;
- unsigned long ctx_expiry;
+ time64_t ctx_expiry;
__u32 major;
int rc;
ENTRY;
CERROR("unable to get expire time, drop it\n");
GOTO(out, rc = -EINVAL);
}
- rsci.h.expiry_time = (time_t) ctx_expiry;
+ rsci.h.expiry_time = ctx_expiry;
- if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
- rsci.ctx.gsc_usr_mds = 1;
- else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
- rsci.ctx.gsc_usr_oss = 1;
- else
- rsci.ctx.gsc_usr_root = 1;
+ switch (imp->imp_obd->u.cli.cl_sp_to) {
+ case LUSTRE_SP_MDT:
+ rsci.ctx.gsc_usr_mds = 1;
+ break;
+ case LUSTRE_SP_OST:
+ rsci.ctx.gsc_usr_oss = 1;
+ break;
+ case LUSTRE_SP_CLI:
+ rsci.ctx.gsc_usr_root = 1;
+ break;
+ case LUSTRE_SP_MGS:
+ /* by convention, all 3 set to 1 means MGS */
+ rsci.ctx.gsc_usr_mds = 1;
+ rsci.ctx.gsc_usr_oss = 1;
+ rsci.ctx.gsc_usr_root = 1;
+ break;
+ default:
+ break;
+ }
rscp = rsc_update(&rsci, rscp);
if (rscp == NULL)
rscp->target = imp->imp_obd;
rawobj_dup(&gctx->gc_svc_handle, &rscp->handle);
- CWARN("create reverse svc ctx %p to %s: idx "LPX64"\n",
+ CWARN("create reverse svc ctx %p to %s: idx %#llx\n",
&rscp->ctx, obd2cli_tgt(imp->imp_obd), gsec->gs_rvs_hdl);
rc = 0;
out:
rsc_free(&rsci);
if (rc)
- CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
+ CERROR("create reverse svc ctx: idx %#llx, rc %d\n",
gsec->gs_rvs_hdl, rc);
RETURN(rc);
}
int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
{
- const cfs_time_t expire = 20;
- struct rsc *rscp;
+ const time64_t expire = 20;
+ struct rsc *rscp;
rscp = gss_svc_searchbyctx(handle);
if (rscp) {
CDEBUG(D_SEC, "reverse svcctx %p (rsc %p) expire soon\n",
&rscp->ctx, rscp);
- rscp->h.expiry_time = cfs_time_current_sec() + expire;
+ rscp->h.expiry_time = ktime_get_real_seconds() + expire;
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
}
return 0;
struct ptlrpc_reply_state *rs;
struct rsc *rsci = NULL;
struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
+ wait_queue_entry_t wait;
int replen = sizeof(struct ptlrpc_body);
struct gss_rep_header *rephdr;
int first_check = 1;
int rc = SECSVC_DROP;
ENTRY;
- memset(&rsikey, 0, sizeof(rsikey));
- rsikey.lustre_svc = lustre_svc;
- rsikey.nid = (__u64) req->rq_peer.nid;
+ memset(&rsikey, 0, sizeof(rsikey));
+ rsikey.lustre_svc = lustre_svc;
+ /* In case of MR, rq_peer is not the NID from which request is received,
+ * but primary NID of peer.
+ * So we need rq_source, which contains the NID actually in use.
+ */
+ rsikey.nid = (__u64) req->rq_source.nid;
+ nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
+ sizeof(rsikey.nm_name));
/* duplicate context handle. for INIT it always 0 */
if (rawobj_dup(&rsikey.in_handle, &gw->gw_handle)) {
}
cache_get(&rsip->h); /* take an extra ref */
- init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry_current(&wait);
+ init_wait(&wait);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
switch (rc) {
case -ETIMEDOUT:
case -EAGAIN: {
- int valid;
+ int valid;
- if (first_check) {
- first_check = 0;
+ if (first_check) {
+ first_check = 0;
- read_lock(&rsi_cache.hash_lock);
+ cache_read_lock(&rsi_cache);
valid = test_bit(CACHE_VALID, &rsip->h.flags);
- if (valid == 0)
+ if (valid == 0)
set_current_state(TASK_INTERRUPTIBLE);
- read_unlock(&rsi_cache.hash_lock);
+ cache_read_unlock(&rsi_cache);
if (valid == 0) {
- unsigned long jiffies;
- jiffies = msecs_to_jiffies(MSEC_PER_SEC *
- GSS_SVC_UPCALL_TIMEOUT);
- schedule_timeout(jiffies);
+ unsigned long timeout;
+
+ timeout = cfs_time_seconds(GSS_SVC_UPCALL_TIMEOUT);
+ schedule_timeout(timeout);
}
cache_get(&rsip->h);
goto cache_check;
- }
- CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
- break;
- }
- case -ENOENT:
- CWARN("cache_check return ENOENT, drop\n");
- break;
- case 0:
- /* if not the first check, we have to release the extra
- * reference we just added on it. */
+ }
+ CWARN("waited %ds timeout, drop\n", GSS_SVC_UPCALL_TIMEOUT);
+ break;
+ }
+ case -ENOENT:
+ CDEBUG(D_SEC, "cache_check return ENOENT, drop\n");
+ break;
+ case 0:
+ /* if not the first check, we have to release the extra
+ * reference we just added on it. */
if (!first_check)
cache_put(&rsip->h, &rsi_cache);
CDEBUG(D_SEC, "cache_check is good\n");
if (!rsci) {
CERROR("authentication failed\n");
- if (!gss_pack_err_notify(req, GSS_S_FAILURE, 0))
- rc = SECSVC_COMPLETE;
+ /* gss mechanism returned major and minor code so we return
+ * those in error message */
+ if (!gss_pack_err_notify(req, rsip->major_status,
+ rsip->minor_status))
+ rc = SECSVC_COMPLETE;
GOTO(out, rc);
} else {
grctx->src_ctx = &rsci->ctx;
}
+ if (gw->gw_flags & LUSTRE_GSS_PACK_KCSUM) {
+ grctx->src_ctx->gsc_mechctx->hash_func = gss_digest_hash;
+ } else if (!strcmp(grctx->src_ctx->gsc_mechctx->mech_type->gm_name,
+ "krb5") &&
+ !krb5_allow_old_client_csum) {
+ CWARN("%s: deny connection from '%s' due to missing 'krb_csum' feature, set 'sptlrpc.gss.krb5_allow_old_client_csum=1' to allow, but recommend client upgrade: rc = %d\n",
+ target->obd_name, libcfs_nid2str(req->rq_peer.nid),
+ -EPROTO);
+ GOTO(out, rc = SECSVC_DROP);
+ } else {
+ grctx->src_ctx->gsc_mechctx->hash_func =
+ gss_digest_hash_compat;
+ }
+
if (rawobj_dup(&rsci->ctx.gsc_rvs_hdl, rvs_hdl)) {
CERROR("failed duplicate reverse handle\n");
GOTO(out, rc);
rc = SECSVC_OK;
out:
- /* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME */
-#if 0
- if (rsip)
- rsi_put(&rsip->h, &rsi_cache);
-#endif
-
- if (rsci) {
- /* if anything went wrong, we don't keep the context too */
- if (rc != SECSVC_OK)
+ /* it looks like here we should put rsip also, but this mess up
+ * with NFS cache mgmt code... FIXME
+ * something like:
+ * if (rsip)
+ * rsi_put(&rsip->h, &rsi_cache); */
+
+ if (rsci) {
+ /* if anything went wrong, we don't keep the context too */
+ if (rc != SECSVC_OK)
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
- else
- CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
- gss_handle_to_u64(&rsci->handle));
+ else
+ CDEBUG(D_SEC, "create rsc with idx %#llx\n",
+ gss_handle_to_u64(&rsci->handle));
- COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
- }
- RETURN(rc);
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+ }
+ RETURN(rc);
}
struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
rsc = gss_svc_searchbyctx(&gw->gw_handle);
if (!rsc) {
- CWARN("Invalid gss ctx idx "LPX64" from %s\n",
+ CWARN("Invalid gss ctx idx %#llx from %s\n",
gss_handle_to_u64(&gw->gw_handle),
libcfs_nid2str(req->rq_peer.nid));
return NULL;
{
int i, rc;
- spin_lock_init(&__ctx_index_lock);
/*
* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
* sequence number checking, thus no chance to sent error notification
* back to clients.
*/
- cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+ get_random_bytes(&__ctx_index, sizeof(__ctx_index));
- rc = _cache_register_net(&rsi_cache, &init_net);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ for (i = 0; i < rsi_cache.hash_size; i++)
+ INIT_HLIST_HEAD(&rsi_cache.hash_table[i]);
+#endif
+ rc = cache_register_net(&rsi_cache, &init_net);
if (rc != 0)
return rc;
- rc = _cache_register_net(&rsc_cache, &init_net);
+#ifdef HAVE_CACHE_HEAD_HLIST
+ for (i = 0; i < rsc_cache.hash_size; i++)
+ INIT_HLIST_HEAD(&rsc_cache.hash_table[i]);
+#endif
+ rc = cache_register_net(&rsc_cache, &init_net);
if (rc != 0) {
- _cache_unregister_net(&rsi_cache, &init_net);
+ cache_unregister_net(&rsi_cache, &init_net);
return rc;
}
/* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
- * drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
+ * drop the request directly, thus lead to unnecessary recovery time.
+ * Here we wait at minimum 1.5 seconds.
+ */
for (i = 0; i < 6; i++) {
- if (atomic_read(&rsi_cache.readers) > 0)
+ if (channel_users(&rsi_cache) > 0)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
- schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
}
- if (atomic_read(&rsi_cache.readers) == 0)
+ if (channel_users(&rsi_cache) == 0)
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
void gss_exit_svc_upcall(void)
{
cache_purge(&rsi_cache);
- _cache_unregister_net(&rsi_cache, &init_net);
+ cache_unregister_net(&rsi_cache, &init_net);
cache_purge(&rsc_cache);
- _cache_unregister_net(&rsc_cache, &init_net);
+ cache_unregister_net(&rsc_cache, &init_net);
}