#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock;
+static DEFINE_SPINLOCK(__ctx_index_lock);
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
{
char *buf = mesg;
- char *ep;
int len;
struct rsi rsii, *rsip = NULL;
time_t expiry;
if (len <= 0)
goto out;
- /* major */
- rsii.major_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
+ /* major */
+ status = kstrtoint(buf, 10, &rsii.major_status);
+ if (status)
+ goto out;
- /* minor */
- len = qword_get(&mesg, buf, mlen);
- if (len <= 0)
- goto out;
- rsii.minor_status = simple_strtol(buf, &ep, 10);
- if (*ep)
- goto out;
+ /* minor */
+ len = qword_get(&mesg, buf, mlen);
+ if (len <= 0) {
+ status = -EINVAL;
+ goto out;
+ }
+
+ status = kstrtoint(buf, 10, &rsii.minor_status);
+ if (status)
+ goto out;
/* out_handle */
len = qword_get(&mesg, buf, mlen);
break;
case LUSTRE_SP_CLI:
rsci.ctx.gsc_usr_root = 1;
+ break;
+ case LUSTRE_SP_MGS:
+ /* by convention, all 3 set to 1 means MGS */
+ rsci.ctx.gsc_usr_mds = 1;
+ rsci.ctx.gsc_usr_oss = 1;
+ rsci.ctx.gsc_usr_root = 1;
+ break;
default:
break;
}
struct ptlrpc_reply_state *rs;
struct rsc *rsci = NULL;
struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
+ wait_queue_entry_t wait;
int replen = sizeof(struct ptlrpc_body);
struct gss_rep_header *rephdr;
int first_check = 1;
memset(&rsikey, 0, sizeof(rsikey));
rsikey.lustre_svc = lustre_svc;
- rsikey.nid = (__u64) req->rq_peer.nid;
+ /* In case of MR, rq_peer is not the NID from which request is received,
+ * but primary NID of peer.
+ * So we need rq_source, which contains the NID actually in use.
+ */
+ rsikey.nid = (__u64) req->rq_source.nid;
nodemap_test_nid(req->rq_peer.nid, rsikey.nm_name,
sizeof(rsikey.nm_name));
{
int i, rc;
- spin_lock_init(&__ctx_index_lock);
/*
* this helps reducing context index confliction. after server reboot,
* conflicting request from clients might be filtered out by initial
/* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
- * drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
+ * drop the request directly, thus lead to unnecessary recovery time.
+ * Here we wait at minimum 1.5 seconds.
+ */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
+ LASSERT(msecs_to_jiffies(MSEC_PER_SEC / 4) > 0);
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
}