*
* Modifications for Lustre
*
- * Copyright 2008, Sun Microsystems, Inc.
- * Author: Eric Mei <eric.mei@sun.com>
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright 2004 - 2006, Cluster File Systems, Inc.
- * All rights reserved
* Author: Eric Mei <ericm@clusterfs.com>
*/
#define GSS_SVC_UPCALL_TIMEOUT (20)
-static spinlock_t __ctx_index_lock = SPIN_LOCK_UNLOCKED;
+static cfs_spinlock_t __ctx_index_lock;
static __u64 __ctx_index;
__u64 gss_get_next_ctx_index(void)
{
__u64 idx;
- spin_lock(&__ctx_index_lock);
+ cfs_spin_lock(&__ctx_index_lock);
idx = __ctx_index++;
- spin_unlock(&__ctx_index_lock);
+ cfs_spin_unlock(&__ctx_index_lock);
return idx;
}
len++;
if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = hash_long(hash^l, BITS_PER_LONG);
+ hash = cfs_hash_long(hash^l, BITS_PER_LONG);
} while (len);
return hash >> (BITS_PER_LONG - bits);
struct cache_head h;
__u32 lustre_svc;
__u64 nid;
- wait_queue_head_t waitq;
+ cfs_waitq_t waitq;
rawobj_t in_handle, in_token;
rawobj_t out_handle, out_token;
int major_status, minor_status;
(*bpp)[-1] = '\n';
}
+#ifdef HAVE_CACHE_UPCALL
+static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
+{
+ return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
+}
+#endif
+
static inline void __rsi_init(struct rsi *new, struct rsi *item)
{
new->out_handle = RAWOBJ_EMPTY;
new->lustre_svc = item->lustre_svc;
new->nid = item->nid;
- init_waitqueue_head(&new->waitq);
+ cfs_waitq_init(&new->waitq);
}
static inline void __rsi_update(struct rsi *new, struct rsi *item)
out:
rsi_free(&rsii);
if (rsip) {
- wake_up_all(&rsip->waitq);
+ cfs_waitq_broadcast(&rsip->waitq);
cache_put(&rsip->h, &rsi_cache);
} else {
status = -ENOMEM;
{
struct rsi *rsi = container_of(item, struct rsi, h);
- LASSERT(atomic_read(&item->refcnt) > 0);
+ LASSERT(cfs_atomic_read(&item->refcnt) > 0);
if (cache_put(item, cd)) {
LASSERT(item->next == NULL);
out:
rsi_free(&rsii);
if (rsip) {
- wake_up_all(&rsip->waitq);
+ cfs_waitq_broadcast(&rsip->waitq);
rsi_put(&rsip->h, &rsi_cache);
}
.hash_table = rsi_table,
.name = "auth.sptlrpc.init",
.cache_put = rsi_put,
+#ifdef HAVE_CACHE_UPCALL
+ .cache_upcall = rsi_upcall,
+#else
.cache_request = rsi_request,
+#endif
.cache_parse = rsi_parse,
#ifdef HAVE_SUNRPC_CACHE_V2
.match = rsi_match,
tmp->ctx.gsc_mechctx = NULL;
memset(&new->ctx.gsc_seqdata, 0, sizeof(new->ctx.gsc_seqdata));
- spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
+ cfs_spin_lock_init(&new->ctx.gsc_seqdata.ssd_lock);
}
#ifdef HAVE_SUNRPC_CACHE_V2
}
rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+ /* oss user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get oss user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_oss = (tmp_int != 0);
+
/* mapped uid */
rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
if (rv) {
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
rawobj_t tmp_buf;
unsigned long ctx_expiry;
{
struct rsc *rsci = container_of(item, struct rsc, h);
- LASSERT(atomic_read(&item->refcnt) > 0);
+ LASSERT(cfs_atomic_read(&item->refcnt) > 0);
if (cache_put(item, cd)) {
LASSERT(item->next == NULL);
}
rsci.ctx.gsc_usr_mds = (tmp_int != 0);
+ /* oss user flag */
+ rv = get_int(&mesg, &tmp_int);
+ if (rv) {
+ CERROR("fail to get oss user flag\n");
+ goto out;
+ }
+ rsci.ctx.gsc_usr_oss = (tmp_int != 0);
+
/* mapped uid */
rv = get_int(&mesg, (int *) &rsci.ctx.gsc_mapped_uid);
if (rv) {
goto out;
if (rv == -ENOENT) {
CERROR("NOENT? set rsc entry negative\n");
- set_bit(CACHE_NEGATIVE, &rsci.h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci.h.flags);
} else {
struct gss_api_mech *gm;
rawobj_t tmp_buf;
int n;
ENTRY;
- write_lock(&rsc_cache.hash_lock);
+ cfs_write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
for (ch = &rsc_cache.hash_table[n]; *ch;) {
rscp = container_of(*ch, struct rsc, h);
*ch = (*ch)->next;
rscp->h.next = NULL;
cache_get(&rscp->h);
- set_bit(CACHE_NEGATIVE, &rscp->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rscp->h.flags);
COMPAT_RSC_PUT(&rscp->h, &rsc_cache);
rsc_cache.entries--;
}
}
- write_unlock(&rsc_cache.hash_lock);
+ cfs_write_unlock(&rsc_cache.hash_lock);
EXIT;
}
}
rsci.h.expiry_time = (time_t) ctx_expiry;
- /* FIXME */
- rsci.ctx.gsc_usr_root = 1;
- rsci.ctx.gsc_usr_mds= 1;
- rsci.ctx.gsc_reverse = 1;
+ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
+ rsci.ctx.gsc_usr_mds = 1;
+ else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
+ rsci.ctx.gsc_usr_oss = 1;
+ else
+ rsci.ctx.gsc_usr_root = 1;
rscp = rsc_update(&rsci, rscp);
if (rscp == NULL)
}
rsci.h.expiry_time = (time_t) ctx_expiry;
- /* FIXME */
- rsci.ctx.gsc_usr_root = 1;
- rsci.ctx.gsc_usr_mds= 1;
- rsci.ctx.gsc_reverse = 1;
+ if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0)
+ rsci.ctx.gsc_usr_mds = 1;
+ else if (strcmp(imp->imp_obd->obd_type->typ_name, LUSTRE_OSC_NAME) == 0)
+ rsci.ctx.gsc_usr_oss = 1;
+ else
+ rsci.ctx.gsc_usr_root = 1;
rscp = rsc_lookup(&rsci, 1);
if (rscp == NULL) {
struct ptlrpc_reply_state *rs;
struct rsc *rsci = NULL;
struct rsi *rsip = NULL, rsikey;
- wait_queue_t wait;
+ cfs_waitlink_t wait;
int replen = sizeof(struct ptlrpc_body);
struct gss_rep_header *rephdr;
int first_check = 1;
}
cache_get(&rsip->h); /* take an extra ref */
- init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry(&wait, current);
- add_wait_queue(&rsip->waitq, &wait);
+ cfs_waitq_init(&rsip->waitq);
+ cfs_waitlink_init(&wait);
+ cfs_waitq_add(&rsip->waitq, &wait);
cache_check:
/* Note each time cache_check() will drop a reference if return
first_check = 0;
read_lock(&rsi_cache.hash_lock);
- valid = test_bit(CACHE_VALID, &rsip->h.flags);
+ valid = cfs_test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
- set_current_state(TASK_INTERRUPTIBLE);
+ cfs_set_current_state(CFS_TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
if (valid == 0)
- schedule_timeout(GSS_SVC_UPCALL_TIMEOUT * HZ);
+ cfs_schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
+ CFS_HZ);
cache_get(&rsip->h);
goto cache_check;
break;
}
- remove_wait_queue(&rsip->waitq, &wait);
+ cfs_waitq_del(&rsip->waitq, &wait);
cache_put(&rsip->h, &rsi_cache);
if (rc)
}
grctx->src_init = 1;
- grctx->src_reserve_len = size_round4(rsip->out_token.len);
+ grctx->src_reserve_len = cfs_size_round4(rsip->out_token.len);
rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
if (rsci) {
/* if anything went wrong, we don't keep the context too */
if (rc != SECSVC_OK)
- set_bit(CACHE_NEGATIVE, &rsci->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsci->h.flags);
else
CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
gss_handle_to_u64(&rsci->handle));
struct rsc *rsc = container_of(ctx, struct rsc, ctx);
/* can't be found */
- set_bit(CACHE_NEGATIVE, &rsc->h.flags);
+ cfs_set_bit(CACHE_NEGATIVE, &rsc->h.flags);
/* to be removed at next scan */
rsc->h.expiry_time = 1;
}
{
int i;
+ cfs_spin_lock_init(&__ctx_index_lock);
+ /*
+ * this helps reducing context index confliction. after server reboot,
+ * conflicting request from clients might be filtered out by initial
+ * sequence number checking, thus no chance to sent error notification
+ * back to clients.
+ */
+ cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
+
+
cache_register(&rsi_cache);
cache_register(&rsc_cache);
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(HZ >= 4);
- schedule_timeout(HZ / 4);
+ cfs_set_current_state(TASK_UNINTERRUPTIBLE);
+ LASSERT(CFS_HZ >= 4);
+ cfs_schedule_timeout(CFS_HZ / 4);
}
if (atomic_read(&rsi_cache.readers) == 0)
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
- /* this helps reducing context index confliction. after server reboot,
- * conflicting request from clients might be filtered out by initial
- * sequence number checking, thus no chance to sent error notification
- * back to clients. */
- get_random_bytes(&__ctx_index, sizeof(__ctx_index));
-
return 0;
}
void __exit gss_exit_svc_upcall(void)
{
- int rc;
-
cache_purge(&rsi_cache);
- if ((rc = cache_unregister(&rsi_cache)))
- CERROR("unregister rsi cache: %d\n", rc);
+ cache_unregister(&rsi_cache);
cache_purge(&rsc_cache);
- if ((rc = cache_unregister(&rsc_cache)))
- CERROR("unregister rsc cache: %d\n", rc);
+ cache_unregister(&rsc_cache);
}