*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2014, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/hash.h>
#include <linux/mutex.h>
#include <linux/sunrpc/cache.h>
-#else
-#include <liblustre.h>
-#endif
+#include <net/sock.h>
#include <obd.h>
#include <obd_class.h>
static inline unsigned long hash_mem(char *buf, int length, int bits)
{
- unsigned long hash = 0;
- unsigned long l = 0;
- int len = 0;
- unsigned char c;
-
- do {
- if (len == length) {
- c = (char) len;
- len = -1;
- } else
- c = *buf++;
-
- l = (l << 8) | c;
- len++;
-
- if ((len & (BITS_PER_LONG/8-1)) == 0)
- hash = cfs_hash_long(hash^l, BITS_PER_LONG);
- } while (len);
-
- return hash >> (BITS_PER_LONG - bits);
+ unsigned long hash = 0;
+ unsigned long l = 0;
+ int len = 0;
+ unsigned char c;
+
+ do {
+ if (len == length) {
+ c = (char) len;
+ len = -1;
+ } else
+ c = *buf++;
+
+ l = (l << 8) | c;
+ len++;
+
+ if ((len & (BITS_PER_LONG/8-1)) == 0)
+ hash = hash_long(hash^l, BITS_PER_LONG);
+ } while (len);
+
+ return hash >> (BITS_PER_LONG - bits);
}
+/* This compatibility can be removed once kernel 3.3 is used,
+ * since cache_register_net/cache_unregister_net are exported.
+ * Note that since kernel 3.4 cache_register and cache_unregister
+ * are removed.
+*/
+static inline int _cache_register_net(struct cache_detail *cd, struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ return cache_register(cd);
+#else
+ return cache_register_net(cd, net);
+#endif
+}
+static inline void _cache_unregister_net(struct cache_detail *cd,
+ struct net *net)
+{
+#ifdef HAVE_CACHE_REGISTER
+ cache_unregister(cd);
+#else
+ cache_unregister_net(cd, net);
+#endif
+}
/****************************************
* rsi cache *
****************************************/
{
rsc_flush_target(target);
}
-EXPORT_SYMBOL(gss_secsvc_flush);
static struct rsc *gss_svc_searchbyctx(rawobj_t *handle)
{
cache_get(&rsip->h); /* take an extra ref */
init_waitqueue_head(&rsip->waitq);
- init_waitqueue_entry_current(&wait);
+ init_waitqueue_entry(&wait, current);
add_wait_queue(&rsip->waitq, &wait);
cache_check:
set_current_state(TASK_INTERRUPTIBLE);
read_unlock(&rsi_cache.hash_lock);
- if (valid == 0)
- schedule_timeout(GSS_SVC_UPCALL_TIMEOUT *
- HZ);
-
+ if (valid == 0) {
+ unsigned long jiffies;
+ jiffies = msecs_to_jiffies(MSEC_PER_SEC *
+ GSS_SVC_UPCALL_TIMEOUT);
+ schedule_timeout(jiffies);
+ }
cache_get(&rsip->h);
goto cache_check;
}
*/
cfs_get_random_bytes(&__ctx_index, sizeof(__ctx_index));
- rc = cache_register_net(&rsi_cache, &init_net);
+ rc = _cache_register_net(&rsi_cache, &init_net);
if (rc != 0)
return rc;
- rc = cache_register_net(&rsc_cache, &init_net);
+ rc = _cache_register_net(&rsc_cache, &init_net);
if (rc != 0) {
- cache_unregister_net(&rsi_cache, &init_net);
+ _cache_unregister_net(&rsi_cache, &init_net);
return rc;
}
if (atomic_read(&rsi_cache.readers) > 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(HZ >= 4);
- schedule_timeout(HZ / 4);
+ LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
+ schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
}
if (atomic_read(&rsi_cache.readers) == 0)
void gss_exit_svc_upcall(void)
{
cache_purge(&rsi_cache);
- cache_unregister_net(&rsi_cache, &init_net);
+ _cache_unregister_net(&rsi_cache, &init_net);
cache_purge(&rsc_cache);
- cache_unregister_net(&rsc_cache, &init_net);
+ _cache_unregister_net(&rsc_cache, &init_net);
}