/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LIBCFS_LINUX_WAIT_BIT_H
-#define __LIBCFS_LINUX_WAIT_BIT_H
+#ifndef __LIBCFS_LINUX_WAIT_H
+#define __LIBCFS_LINUX_WAIT_H
/* Make sure we can see if we have TASK_NOLOAD */
#include <linux/sched.h>
-/*
- * Linux wait-bit related types and methods:
- */
-#ifdef HAVE_WAIT_BIT_HEADER_H
-#include <linux/wait_bit.h>
-#endif
#include <linux/wait.h>
#ifndef HAVE_WAIT_QUEUE_ENTRY
#define __add_wait_queue_entry_tail __add_wait_queue_tail
#endif
-#ifndef HAVE_WAIT_BIT_HEADER_H
-struct wait_bit_queue_entry {
- struct wait_bit_key key;
- wait_queue_entry_t wq_entry;
-};
-
-#define ___wait_is_interruptible(state) \
- (!__builtin_constant_p(state) || \
- state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
-
-#endif /* ! HAVE_WAIT_BIT_HEADER_H */
-
#ifndef HAVE_PREPARE_TO_WAIT_EVENT
extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
wait_queue_entry_t *wq_entry, int state);
__cond || !__ret; \
})
-#ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
-/**
- * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
- *
- * @bit: the bit of the word being waited on
- * @word: the word being waited on, a kernel virtual address
- *
- * You can use this helper if bitflags are manipulated atomically rather than
- * non-atomically under a lock.
- */
-static inline void clear_and_wake_up_bit(int bit, void *word)
-{
- clear_bit_unlock(bit, word);
- /* See wake_up_bit() for which memory barrier you need to use. */
- smp_mb__after_atomic();
- wake_up_bit(word, bit);
-}
-#endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
-
-#ifndef HAVE_WAIT_VAR_EVENT
-extern void __init wait_bit_init(void);
-extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
- void *var, int flags);
-extern void wake_up_var(void *var);
-extern wait_queue_head_t *__var_waitqueue(void *p);
-
-#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
-({ \
- __label__ __out; \
- wait_queue_head_t *__wq_head = __var_waitqueue(var); \
- struct wait_bit_queue_entry __wbq_entry; \
- long __ret = ret; /* explicit shadow */ \
- \
- init_wait_var_entry(&__wbq_entry, var, \
- exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
- for (;;) { \
- long __int = prepare_to_wait_event(__wq_head, \
- &__wbq_entry.wq_entry, \
- state); \
- if (condition) \
- break; \
- \
- if (___wait_is_interruptible(state) && __int) { \
- __ret = __int; \
- goto __out; \
- } \
- \
- cmd; \
- } \
- finish_wait(__wq_head, &__wbq_entry.wq_entry); \
-__out: __ret; \
-})
-
-#define __wait_var_event(var, condition) \
- ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
- schedule())
-
-#define wait_var_event(var, condition) \
-do { \
- might_sleep(); \
- if (condition) \
- break; \
- __wait_var_event(var, condition); \
-} while (0)
-
-#define __wait_var_event_killable(var, condition) \
- ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
- schedule())
-
-#define wait_var_event_killable(var, condition) \
-({ \
- int __ret = 0; \
- might_sleep(); \
- if (!(condition)) \
- __ret = __wait_var_event_killable(var, condition); \
- __ret; \
-})
-
-#define __wait_var_event_timeout(var, condition, timeout) \
- ___wait_var_event(var, ___wait_cond_timeout1(condition), \
- TASK_UNINTERRUPTIBLE, 0, timeout, \
- __ret = schedule_timeout(__ret))
-
-#define wait_var_event_timeout(var, condition, timeout) \
-({ \
- long __ret = timeout; \
- might_sleep(); \
- if (!___wait_cond_timeout1(condition)) \
- __ret = __wait_var_event_timeout(var, condition, timeout); \
- __ret; \
-})
-#else /* !HAVE_WAIT_VAR_EVENT */
-/* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
- * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
- */
-# ifndef __wait_cond_timeout
-# define ___wait_cond_timeout(condition) \
-({ \
- bool __cond = (condition); \
- if (__cond && !__ret) \
- __ret = 1; \
- __cond || !__ret; \
-})
-# endif /* __wait_cond_timeout */
-
-#endif /* ! HAVE_WAIT_VAR_EVENT */
-
/*
* prepare_to_wait_event() does not support an exclusive
* lifo wait.
int sync, void *key);
#endif /* HAVE_WAIT_WOKEN */
-#endif /* __LICBFS_LINUX_WAIT_BIT_H */
+#endif /* __LICBFS_LINUX_WAIT_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __LIBCFS_LINUX_WAIT_BIT_H
+#define __LIBCFS_LINUX_WAIT_BIT_H
+
+/* Make sure we can see if we have TASK_NOLOAD */
+#include <linux/sched.h>
+#ifdef HAVE_WAIT_BIT_HEADER_H
+#include <linux/wait_bit.h>
+#endif
+
+#include <lustre_compat/linux/wait.h>
+
+#ifndef HAVE_WAIT_BIT_HEADER_H
+struct wait_bit_queue_entry {
+ struct wait_bit_key key;
+ wait_queue_entry_t wq_entry;
+};
+
+#define ___wait_is_interruptible(state) \
+ (!__builtin_constant_p(state) || \
+ state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
+
+#endif /* ! HAVE_WAIT_BIT_HEADER_H */
+
+#ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
+/**
+ * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
+ *
+ * @bit: the bit of the word being waited on
+ * @word: the word being waited on, a kernel virtual address
+ *
+ * You can use this helper if bitflags are manipulated atomically rather than
+ * non-atomically under a lock.
+ */
+static inline void clear_and_wake_up_bit(int bit, void *word)
+{
+ clear_bit_unlock(bit, word);
+ /* See wake_up_bit() for which memory barrier you need to use. */
+ smp_mb__after_atomic();
+ wake_up_bit(word, bit);
+}
+#endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
+
+#ifndef HAVE_WAIT_VAR_EVENT
+extern void __init wait_bit_init(void);
+extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
+ void *var, int flags);
+extern void wake_up_var(void *var);
+extern wait_queue_head_t *__var_waitqueue(void *p);
+
+#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
+({ \
+ __label__ __out; \
+ wait_queue_head_t *__wq_head = __var_waitqueue(var); \
+ struct wait_bit_queue_entry __wbq_entry; \
+ long __ret = ret; /* explicit shadow */ \
+ \
+ init_wait_var_entry(&__wbq_entry, var, \
+ exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
+ for (;;) { \
+ long __int = prepare_to_wait_event(__wq_head, \
+ &__wbq_entry.wq_entry, \
+ state); \
+ if (condition) \
+ break; \
+ \
+ if (___wait_is_interruptible(state) && __int) { \
+ __ret = __int; \
+ goto __out; \
+ } \
+ \
+ cmd; \
+ } \
+ finish_wait(__wq_head, &__wbq_entry.wq_entry); \
+__out: __ret; \
+})
+
+#define __wait_var_event(var, condition) \
+ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
+ schedule())
+
+#define wait_var_event(var, condition) \
+do { \
+ might_sleep(); \
+ if (condition) \
+ break; \
+ __wait_var_event(var, condition); \
+} while (0)
+
+#define __wait_var_event_killable(var, condition) \
+ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
+ schedule())
+
+#define wait_var_event_killable(var, condition) \
+({ \
+ int __ret = 0; \
+ might_sleep(); \
+ if (!(condition)) \
+ __ret = __wait_var_event_killable(var, condition); \
+ __ret; \
+})
+
+#define __wait_var_event_timeout(var, condition, timeout) \
+ ___wait_var_event(var, ___wait_cond_timeout1(condition), \
+ TASK_UNINTERRUPTIBLE, 0, timeout, \
+ __ret = schedule_timeout(__ret))
+
+#define wait_var_event_timeout(var, condition, timeout) \
+({ \
+ long __ret = timeout; \
+ might_sleep(); \
+ if (!___wait_cond_timeout1(condition)) \
+ __ret = __wait_var_event_timeout(var, condition, timeout); \
+ __ret; \
+})
+#else /* !HAVE_WAIT_VAR_EVENT */
+/* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
+ * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
+ */
+# ifndef __wait_cond_timeout
+# define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+# endif /* __wait_cond_timeout */
+
+#endif /* ! HAVE_WAIT_VAR_EVENT */
+#endif /* __LICBFS_LINUX_WAIT_BIT_H */
#include <libcfs/linux/linux-misc.h>
#include <libcfs/linux/linux-mem.h>
#include <libcfs/linux/linux-time.h>
-#include <libcfs/linux/linux-wait.h>
+#include <lustre_compat/linux/wait_bit.h>
+#include <lustre_compat/linux/wait.h>
#include <libcfs/linux/linux-fortify-string.h>
#include <uapi/linux/lnet/libcfs_ioctl.h>
#
EXTRA_DIST = linux-misc.h linux-fs.h linux-mem.h linux-time.h linux-cpu.h \
- linux-wait.h linux-net.h \
+ linux-net.h \
refcount.h processor.h linux-fortify-string.h
libcfs-compat-objs :=
+COMPAT_SCHED := @top_srcdir@/lustre_compat/kernel/sched/
+include $(libcfs_dir)/../../lustre_compat/kernel/sched/Makefile
+libcfs-compat-objs += $(patsubst %,$(COMPAT_SCHED)%,$(sched_objs))
+
COMPAT_MM := @top_srcdir@/lustre_compat/mm/
include $(libcfs_dir)/../../lustre_compat/mm/Makefile
libcfs-compat-objs += $(patsubst %,$(COMPAT_MM)%,$(mm_objs))
include $(libcfs_dir)/../../lustre_compat/lib/Makefile
libcfs-compat-objs += $(patsubst %,$(COMPAT_LIB)%,$(lib_objs))
+
libcfs-linux-objs := linux-prim.o
-libcfs-linux-objs += linux-wait.o
EXTRA_DIST = $(libcfs-compat-objs:.o=.c)
# This file is part of Lustre, http://www.lustre.org/
#
-EXTRA_DIST = linux-prim.c \
- linux-wait.c
+EXTRA_DIST = linux-prim.c
#include <lustre_compat.h>
#include <libcfs/linux/linux-time.h>
-#include <libcfs/linux/linux-wait.h>
#include <libcfs/linux/linux-misc.h>
#include <libcfs/linux/linux-mem.h>
#include <lustre_compat/linux/xarray.h>
+#include <lustre_compat/linux/wait_bit.h>
+#include <lustre_compat/linux/wait.h>
#include <lustre_crypto.h>
#ifndef HAVE_KTIME_GET_TS64
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# This file is part of Lustre, http://www.lustre.org/
+#
+
+sched_objs := wait.o wait_bit.o
// SPDX-License-Identifier: GPL-2.0-only
-/*
- * The implementation of the wait_bit*() and related waiting APIs:
- */
-
#include <linux/sched.h>
#ifdef HAVE_SCHED_HEADERS
#include <linux/sched/signal.h>
#endif
-#include <libcfs/linux/linux-wait.h>
+#include <lustre_compat/linux/wait_bit.h>
+#include <lustre_compat/linux/wait.h>
#ifndef HAVE_PREPARE_TO_WAIT_EVENT
-
long prepare_to_wait_event(wait_queue_head_t *wq_head,
wait_queue_entry_t *wq_entry, int state)
{
EXPORT_SYMBOL(prepare_to_wait_event);
#endif /* !HAVE_PREPARE_TO_WAIT_EVENT */
-#ifndef HAVE_WAIT_VAR_EVENT
-
-#define WAIT_TABLE_BITS 8
-#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
-
-static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
-
-wait_queue_head_t *__var_waitqueue(void *p)
-{
- return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
-}
-EXPORT_SYMBOL(__var_waitqueue);
-
-static int
-var_wake_function(wait_queue_entry_t *wq_entry, unsigned int mode,
- int sync, void *arg)
-{
- struct wait_bit_key *key = arg;
- struct wait_bit_queue_entry *wbq_entry =
- container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
-
- if (wbq_entry->key.flags != key->flags ||
- wbq_entry->key.bit_nr != key->bit_nr)
- return 0;
-
- return autoremove_wake_function(wq_entry, mode, sync, key);
-}
-
-void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var,
- int flags)
-{
- *wbq_entry = (struct wait_bit_queue_entry){
- .key = {
- .flags = (var),
- .bit_nr = -1,
- },
- .wq_entry = {
- .private = current,
- .func = var_wake_function,
-#ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
- .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
-#else
- .task_list = LIST_HEAD_INIT(wbq_entry->wq_entry.task_list),
-#endif
- },
- };
-}
-EXPORT_SYMBOL(init_wait_var_entry);
-
-void wake_up_var(void *var)
-{
- __wake_up_bit(__var_waitqueue(var), var, -1);
-}
-EXPORT_SYMBOL(wake_up_var);
-
-void __init wait_bit_init(void)
-{
- int i;
-
- for (i = 0; i < WAIT_TABLE_SIZE; i++)
- init_waitqueue_head(bit_wait_table + i);
-}
-#endif /* ! HAVE_WAIT_VAR_EVENT */
-
#ifndef HAVE_WAIT_WOKEN
/*
* DEFINE_WAIT_FUNC(wait, woken_wake_func);
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * The implementation of the wait_bit*() and related waiting APIs:
+ */
+
+#include <linux/sched.h>
+#ifdef HAVE_SCHED_HEADERS
+#include <linux/sched/signal.h>
+#endif
+#include <lustre_compat/linux/wait_bit.h>
+
+#ifndef HAVE_WAIT_VAR_EVENT
+
+#define WAIT_TABLE_BITS 8
+#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
+
+static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
+
+wait_queue_head_t *__var_waitqueue(void *p)
+{
+ return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
+}
+EXPORT_SYMBOL(__var_waitqueue);
+
+static int
+var_wake_function(wait_queue_entry_t *wq_entry, unsigned int mode,
+ int sync, void *arg)
+{
+ struct wait_bit_key *key = arg;
+ struct wait_bit_queue_entry *wbq_entry =
+ container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
+
+ if (wbq_entry->key.flags != key->flags ||
+ wbq_entry->key.bit_nr != key->bit_nr)
+ return 0;
+
+ return autoremove_wake_function(wq_entry, mode, sync, key);
+}
+
+void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var,
+ int flags)
+{
+ *wbq_entry = (struct wait_bit_queue_entry){
+ .key = {
+ .flags = (var),
+ .bit_nr = -1,
+ },
+ .wq_entry = {
+ .private = current,
+ .func = var_wake_function,
+#ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
+ .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
+#else
+ .task_list = LIST_HEAD_INIT(wbq_entry->wq_entry.task_list),
+#endif
+ },
+ };
+}
+EXPORT_SYMBOL(init_wait_var_entry);
+
+void wake_up_var(void *var)
+{
+ __wake_up_bit(__var_waitqueue(var), var, -1);
+}
+EXPORT_SYMBOL(wake_up_var);
+
+void __init wait_bit_init(void)
+{
+ int i;
+
+ for (i = 0; i < WAIT_TABLE_SIZE; i++)
+ init_waitqueue_head(bit_wait_table + i);
+}
+#endif /* ! HAVE_WAIT_VAR_EVENT */