#define wait_queue_entry_t wait_queue_t
#endif
+#ifndef HAVE_PREPARE_TO_WAIT_EVENT
+#define __add_wait_queue_entry_tail __add_wait_queue_tail
+#endif
+
#ifndef HAVE_WAIT_BIT_HEADER_H
struct wait_bit_queue_entry {
struct wait_bit_key key;
__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
+#else /* !HAVE_WAIT_VAR_EVENT */
+/* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
+ * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
+ */
+# ifndef __wait_cond_timeout
+# define ___wait_cond_timeout(condition) \
+({ \
+ bool __cond = (condition); \
+ if (__cond && !__ret) \
+ __ret = 1; \
+ __cond || !__ret; \
+})
+# endif /* __wait_cond_timeout */
+
#endif /* ! HAVE_WAIT_VAR_EVENT */
/*
#define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
- wait_queue_entry_ __wq_entry; \
+ wait_queue_entry_t __wq_entry; \
long __ret = ret; /* explicit shadow */ \
\
init_wait(&__wq_entry); \
if (exclusive) \
- __wq_entry.flags = WQ_FLAG_EXCLUSIVE \
+ __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
for (;;) { \
long __int = prepare_to_wait_event(&wq_head, \
&__wq_entry, state); \
#ifndef TASK_NOLOAD
+#define TASK_IDLE TASK_INTERRUPTIBLE
+
#define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
({ \
wait_queue_entry_t __wq_entry; \
unsigned long flags; \
long __ret = ret; /* explicit shadow */ \
- sigset_t __blocked; \
+ sigset_t __old_blocked, __new_blocked; \
\
- __blocked = cfs_block_sigsinv(0); \
+ siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
+ sigprocmask(0, &__new_blocked, &__old_blocked); \
init_wait(&__wq_entry); \
if (exclusive) \
__wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
\
if (condition) \
break; \
- /* See justification in __l_wait_event */ \
+ /* We have to do this here because some signals */ \
+ /* are not blockable - ie from strace(1). */ \
+ /* In these cases we want to schedule_timeout() */ \
+ /* again, because we don't want that to return */ \
+ /* -EINTR when the RPC actually succeeded. */ \
+ /* the recalc_sigpending() below will deliver the */ \
+ /* signal properly. */ \
if (signal_pending(current)) { \
spin_lock_irqsave(¤t->sighand->siglock, \
flags); \
cmd; \
} \
finish_wait(&wq_head, &__wq_entry); \
- cfs_restore_sigs(__blocked); \
+ sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
__ret; \
})
cmd1, cmd2) \
({ \
long __ret = timeout; \
- might_sleep(); \
if (!___wait_cond_timeout1(condition)) \
__ret = __wait_event_idle_exclusive_timeout_cmd( \
wq_head, condition, timeout, cmd1, cmd2); \
cmd1, cmd2) \
({ \
long __ret = timeout; \
- might_sleep(); \
if (!___wait_cond_timeout1(condition)) \
__ret = __wait_event_idle_exclusive_timeout_cmd( \
wq_head, condition, timeout, cmd1, cmd2); \
wait_queue_entry_t __wq_entry; \
unsigned long flags; \
long __ret = ret; /* explicit shadow */ \
- sigset_t __blocked; \
+ sigset_t __old_blocked, __new_blocked; \
\
- __blocked = cfs_block_sigsinv(0); \
+ siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
+ sigprocmask(0, &__new_blocked, &__old_blocked); \
init_wait(&__wq_entry); \
__wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
for (;;) { \
\
if (condition) \
break; \
- /* See justification in __l_wait_event */ \
+ /* See justification in ___wait_event_idle */ \
if (signal_pending(current)) { \
spin_lock_irqsave(¤t->sighand->siglock, \
flags); \
} \
cmd; \
} \
- cfs_restore_sigs(__blocked); \
+ sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
finish_wait(&wq_head, &__wq_entry); \
__ret; \
})
__ret; \
})
+#ifndef HAVE_WAIT_WOKEN
+#define WQ_FLAG_WOKEN 0x02
+long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
+int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
+ int sync, void *key);
+#endif /* HAVE_WAIT_WOKEN */
+
#endif /* __LICBFS_LINUX_WAIT_BIT_H */