}
}
read_unlock(&rsi_cache.hash_lock);
- } while ((get_seconds() - starttime) <= 15);
- CERROR("15s timeout while waiting cache refill\n");
+ } while ((get_seconds() - starttime) <= SVCSEC_UPCALL_TIMEOUT);
+ CERROR("%ds timeout while waiting cache refill\n",
+ SVCSEC_UPCALL_TIMEOUT);
return NULL;
}
read_unlock(&rsc_cache.hash_lock);
RETURN(tmp);
}
-
+
static int rsc_parse(struct cache_detail *cd,
char *mesg, int mlen)
{
kgss_mech_put(gm);
goto out;
}
- expiry = (time_t) ctx_expiry;
+ expiry = (time_t) gss_roundup_expire_time(ctx_expiry);
kgss_mech_put(gm);
}
int n;
ENTRY;
+ if (uid == -1)
+ CWARN("flush all gss contexts\n");
+
write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
for (ch = &rsc_cache.hash_table[n]; *ch;) {
rscp = container_of(*ch, struct rsc, h);
- if (uid == -1 || rscp->cred.vc_uid == uid) {
- /* it seems simply set NEGATIVE doesn't work */
- *ch = (*ch)->next;
- rscp->h.next = NULL;
- cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
- clear_bit(CACHE_HASHED, &rscp->h.flags);
- CDEBUG(D_SEC, "flush rsc %p for uid %u\n",
- rscp, rscp->cred.vc_uid);
- rsc_put(&rscp->h, &rsc_cache);
- rsc_cache.entries--;
+
+ if (uid != -1 && rscp->cred.vc_uid != uid) {
+ ch = &((*ch)->next);
continue;
}
- ch = &((*ch)->next);
+
+ /* it seems simply set NEGATIVE doesn't work */
+ *ch = (*ch)->next;
+ rscp->h.next = NULL;
+ cache_get(&rscp->h);
+ set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ clear_bit(CACHE_HASHED, &rscp->h.flags);
+ if (uid != -1)
+ CWARN("flush rsc %p(%u) for uid %u\n", rscp,
+ *((__u32 *) rscp->handle.data),
+ rscp->cred.vc_uid);
+ rsc_put(&rscp->h, &rsc_cache);
+ rsc_cache.entries--;
}
}
write_unlock(&rsc_cache.hash_lock);
static struct cache_req my_chandle = {my_defer};
/* Implements sequence number algorithm as specified in RFC 2203. */
+static inline void __dbg_dump_seqwin(struct gss_svc_seq_data *sd)
+{
+ char buf[sizeof(sd->sd_win)*2+1];
+ int i;
+
+ for (i = 0; i < sizeof(sd->sd_win); i++)
+ sprintf(&buf[i+i], "%02x", ((__u8 *) sd->sd_win)[i]);
+ CWARN("dump seqwin: %s\n", buf);
+}
+
+static inline void __dbg_seq_jump(struct gss_svc_seq_data *sd, __u32 seq_num)
+{
+ CWARN("seq jump to %u, cur max %u!\n", seq_num, sd->sd_max);
+ __dbg_dump_seqwin(sd);
+}
+
+static inline void __dbg_seq_increase(struct gss_svc_seq_data *sd, __u32 seq_num)
+{
+ int n = seq_num - sd->sd_max;
+ int i, notset=0;
+
+ for (i = 0; i < n; i++) {
+ if (!test_bit(i, sd->sd_win))
+ notset++;
+ }
+ if (!notset)
+ return;
+
+ CWARN("seq increase to %u, cur max %u\n", seq_num, sd->sd_max);
+ __dbg_dump_seqwin(sd);
+}
+
static int
gss_check_seq_num(struct gss_svc_seq_data *sd, __u32 seq_num)
{
spin_lock(&sd->sd_lock);
if (seq_num > sd->sd_max) {
if (seq_num >= sd->sd_max + GSS_SEQ_WIN) {
+ __dbg_seq_jump(sd, seq_num);
memset(sd->sd_win, 0, sizeof(sd->sd_win));
sd->sd_max = seq_num;
} else {
+ __dbg_seq_increase(sd, seq_num);
while(sd->sd_max < seq_num) {
sd->sd_max++;
__clear_bit(sd->sd_max % GSS_SEQ_WIN,
req->rq_peer.peer_id.nid, nidstr));
svcdata->is_init = 1;
- svcdata->reserve_len = 6 * 4 +
+ svcdata->reserve_len = 7 * 4 +
size_round4(rsip->out_handle.len) +
size_round4(rsip->out_token.len);
resp += req->rq_replen / 4;
reslen = svcdata->reserve_len;
- /* gss reply:
- * status, major, minor, seq, out_handle, out_token
+ /* gss reply: (conform to err notify format)
+ * x, x, seq, major, minor, handle, token
*/
- *resp++ = cpu_to_le32(PTLRPCS_OK);
+ *resp++ = 0;
+ *resp++ = 0;
+ *resp++ = cpu_to_le32(GSS_SEQ_WIN);
*resp++ = cpu_to_le32(rsip->major_status);
*resp++ = cpu_to_le32(rsip->minor_status);
- *resp++ = cpu_to_le32(GSS_SEQ_WIN);
- reslen -= (4 * 4);
+ reslen -= (5 * 4);
if (rawobj_serialize(&rsip->out_handle,
&resp, &reslen)) {
dump_rsi(rsip);
GOTO(err_free, rc = SVC_DROP);
}
- if (rawobj_extract(&gc->gc_ctx, &secdata, &seclen)) {
+ /* We _must_ alloc new storage for gc_ctx. In case of recovery
+ * request will be saved to delayed handling, at that time the
+ * incoming buffer might have already been released.
+ */
+ if (rawobj_extract_alloc(&gc->gc_ctx, &secdata, &seclen)) {
CERROR("fail to obtain gss context handle\n");
GOTO(err_free, rc = SVC_DROP);
}
rscp = gss_svc_searchbyctx(&gc->gc_ctx);
if (!rscp) {
- CERROR("ctx disapeared under us?\n");
+ CERROR("ctx %u disapeared under us\n",
+ *((__u32 *) gc->gc_ctx.data));
RETURN(-EINVAL);
}
return;
}
- /* gsd->clclred.gc_ctx is NOT allocated, just set pointer
- * to the incoming packet buffer, so don't need free it
- */
+ /* gc_ctx is allocated, see gss_svcsec_accept() */
+ rawobj_free(&gsd->clcred.gc_ctx);
+
OBD_FREE(gsd, sizeof(*gsd));
req->rq_svcsec_data = NULL;
return;
void gss_svc_exit(void)
{
int rc;
- if ((rc = cache_unregister(&rsi_cache)))
- CERROR("unregister rsi cache: %d\n", rc);
+
+ /* XXX rsi didn't take module refcount. without really
+ * cleanup it we can't simply go, later user-space operations
+ * will certainly cause oops.
+ * use space might slow or stuck on something, wait it for
+ * a bit -- bad hack.
+ */
+ while ((rc = cache_unregister(&rsi_cache))) {
+ CERROR("unregister rsi cache: %d. Try again\n", rc);
+ schedule_timeout(2 * HZ);
+ cache_purge(&rsi_cache);
+ }
+
if ((rc = cache_unregister(&rsc_cache)))
CERROR("unregister rsc cache: %d\n", rc);
if ((rc = svcsec_unregister(&svcsec_gss)))