return hash >> (BITS_PER_LONG - bits);
}
-/* This compatibility can be removed once kernel 3.3 is used,
- * since cache_register_net/cache_unregister_net are exported.
- * Note that since kernel 3.4 cache_register and cache_unregister
- * are removed.
-*/
-static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
-{
-#ifdef HAVE_CACHE_REGISTER
- return cache_register(cd);
-#else
- return cache_register_net(cd, net);
-#endif
-}
-static inline void _cache_unregister_net(struct cache_detail *cd,
- struct net *net)
-{
-#ifdef HAVE_CACHE_REGISTER
- cache_unregister(cd);
-#else
- cache_unregister_net(cd, net);
-#endif
-}
/****************************************
* rpc sec init (rsi) cache *
****************************************/
static struct rsi *rsi_update(struct rsi *new, struct rsi *old);
static struct rsi *rsi_lookup(struct rsi *item);
+#ifdef HAVE_CACHE_DETAIL_WRITERS
+static inline int channel_users(struct cache_detail *cd)
+{
+ return atomic_read(&cd->writers);
+}
+#else
+static inline int channel_users(struct cache_detail *cd)
+{
+ return atomic_read(&cd->readers);
+}
+#endif
+
static inline int rsi_hash(struct rsi *item)
{
return hash_mem((char *)item->in_handle.data, item->in_handle.len,
(*bpp)[-1] = '\n';
}
-#ifdef HAVE_SUNRPC_UPCALL_HAS_3ARGS
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
-}
-#else
-
-static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
-{
- return sunrpc_cache_pipe_upcall(cd, h);
-}
-#endif
-
static inline void __rsi_init(struct rsi *new, struct rsi *item)
{
new->out_handle = RAWOBJ_EMPTY;
static void rsi_put(struct kref *ref)
{
- struct rsi *rsi = container_of(ref, struct rsi, h.ref);
+ struct rsi *rsi = container_of(ref, struct rsi, h.ref);
#ifdef HAVE_CACHE_HEAD_HLIST
- LASSERT(rsi->h.cache_list.next == NULL);
+ LASSERT(hlist_unhashed(&rsi->h.cache_list));
#else
LASSERT(rsi->h.next == NULL);
#endif
- rsi_free(rsi);
- OBD_FREE_PTR(rsi);
+ rsi_free(rsi);
+ OBD_FREE_PTR(rsi);
}
static int rsi_match(struct cache_head *a, struct cache_head *b)
char *buf = mesg;
int len;
struct rsi rsii, *rsip = NULL;
- time_t expiry;
+ time64_t expiry;
int status = -EINVAL;
ENTRY;
.hash_table = rsi_table,
.name = "auth.sptlrpc.init",
.cache_put = rsi_put,
-#ifndef HAVE_SUNRPC_UPCALL_HAS_3ARGS
.cache_request = rsi_request,
-#endif
- .cache_upcall = rsi_upcall,
+ .cache_upcall = sunrpc_cache_pipe_upcall,
.cache_parse = rsi_parse,
.match = rsi_match,
.init = rsi_init,
static void rsc_put(struct kref *ref)
{
- struct rsc *rsci = container_of(ref, struct rsc, h.ref);
+ struct rsc *rsci = container_of(ref, struct rsc, h.ref);
#ifdef HAVE_CACHE_HEAD_HLIST
- LASSERT(rsci->h.cache_list.next == NULL);
+ LASSERT(hlist_unhashed(&rsci->h.cache_list));
#else
- LASSERT(rsci->h.next == NULL);
+ LASSERT(rsci->h.next == NULL);
#endif
- rsc_free(rsci);
- OBD_FREE_PTR(rsci);
+ rsc_free(rsci);
+ OBD_FREE_PTR(rsci);
}
static int rsc_match(struct cache_head *a, struct cache_head *b)
char *buf = mesg;
int len, rv, tmp_int;
struct rsc rsci, *rscp = NULL;
- time_t expiry;
+ time64_t expiry;
int status = -EINVAL;
struct gss_api_mech *gm = NULL;
CERROR("unable to get expire time, drop it\n");
GOTO(out, rc = -EINVAL);
}
- rsci.h.expiry_time = (time_t) ctx_expiry;
+ rsci.h.expiry_time = ctx_expiry;
switch (imp->imp_obd->u.cli.cl_sp_to) {
case LUSTRE_SP_MDT:
cache_get(&rsip->h); /* take an extra ref */
init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
*/
get_random_bytes(&__ctx_index, sizeof(__ctx_index));
- rc = _cache_register_net(&rsi_cache, &init_net);
+ rc = cache_register_net(&rsi_cache, &init_net);
if (rc != 0)
return rc;
- rc = _cache_register_net(&rsc_cache, &init_net);
+ rc = cache_register_net(&rsc_cache, &init_net);
if (rc != 0) {
- _cache_unregister_net(&rsi_cache, &init_net);
+ cache_unregister_net(&rsi_cache, &init_net);
return rc;
}
* Here we wait at minimum 1.5 seconds.
*/
for (i = 0; i < 6; i++) {
- if (atomic_read(&rsi_cache.readers) > 0)
+ if (channel_users(&rsi_cache) > 0)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 4);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
}
- if (atomic_read(&rsi_cache.readers) == 0)
+ if (channel_users(&rsi_cache) == 0)
CWARN("Init channel is not opened by lsvcgssd, following "
"request might be dropped until lsvcgssd is active\n");
void gss_exit_svc_upcall(void)
{
cache_purge(&rsi_cache);
- _cache_unregister_net(&rsi_cache, &init_net);
+ cache_unregister_net(&rsi_cache, &init_net);
cache_purge(&rsc_cache);
- _cache_unregister_net(&rsc_cache, &init_net);
+ cache_unregister_net(&rsc_cache, &init_net);
}