]) # LC_UAPI_LINUX_MOUNT_H
#
+# LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
+#
+# kernel 4.20 commit 1863d77f15da0addcd293a1719fa5d3ef8cde3ca
+# SUNRPC: Replace the cache_detail->hash_lock with a regular spinlock
+#
+# Now that the reader functions are all RCU protected, use a regular
+# spinlock rather than a reader/writer lock.
+#
+AC_DEFUN([LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK], [
+tmp_flags="$EXTRA_KCFLAGS"
+EXTRA_KCFLAGS="-Werror"
+LB_CHECK_COMPILE([if cache_detail->hash_lock is a spinlock],
+hash_lock_isa_spinlock_t, [
+ #include <linux/sunrpc/cache.h>
+],[
+ spinlock_t *lock = &(((struct cache_detail *)0)->hash_lock);
+ spin_lock(lock);
+],[
+ AC_DEFINE(HAVE_CACHE_HASH_SPINLOCK, 1,
+ [if cache_detail->hash_lock is a spinlock])
+])
+EXTRA_KCFLAGS="$tmp_flags"
+]) # LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
+
+#
# LC_BIO_INTEGRITY_ENABLED
#
# 4.13 removed bio_integrity_enabled
# 4.18
LC_INODE_TIMESPEC64
+ # 4.20
+ LC_HAVE_SUNRPC_CACHE_HASH_LOCK_IS_A_SPINLOCK
+
# 5.0
LC_UAPI_LINUX_MOUNT_H
return container_of(sec2gsec(sec), struct gss_sec_keyring, gsk_base);
}
+#ifdef HAVE_CACHE_HASH_SPINLOCK
+# define sunrpc_cache_lookup(c, i, h) sunrpc_cache_lookup_rcu((c), (i), (h))
+# define cache_read_lock(cdetail) spin_lock(&((cdetail)->hash_lock))
+# define cache_read_unlock(cdetail) spin_unlock(&((cdetail)->hash_lock))
+#else /* ! HAVE_CACHE_HASH_SPINLOCK */
+# define cache_read_lock(cdetail) read_lock(&((cdetail)->hash_lock))
+# define cache_read_unlock(cdetail) read_unlock(&((cdetail)->hash_lock))
+#endif
#define GSS_CTX_INIT_MAX_LEN (1024)
if (first_check) {
first_check = 0;
- read_lock(&rsi_cache.hash_lock);
+ cache_read_lock(&rsi_cache);
valid = test_bit(CACHE_VALID, &rsip->h.flags);
if (valid == 0)
set_current_state(TASK_INTERRUPTIBLE);
- read_unlock(&rsi_cache.hash_lock);
+ cache_read_unlock(&rsi_cache);
if (valid == 0) {
unsigned long timeout;