1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
8 * Linux wait-bit related types and methods:
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
13 #include <linux/wait.h>
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
19 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
20 #define __add_wait_queue_entry_tail __add_wait_queue_tail
23 #ifndef HAVE_WAIT_BIT_HEADER_H
24 struct wait_bit_queue_entry {
25 struct wait_bit_key key;
26 wait_queue_entry_t wq_entry;
29 #define ___wait_is_interruptible(state) \
30 (!__builtin_constant_p(state) || \
31 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
33 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
35 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
36 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
37 wait_queue_entry_t *wq_entry, int state);
40 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
41 * so let's define our own ___wait_cond_timeout1
44 #define ___wait_cond_timeout1(condition) \
46 bool __cond = (condition); \
47 if (__cond && !__ret) \
52 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
54 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
56 * @bit: the bit of the word being waited on
57 * @word: the word being waited on, a kernel virtual address
59 * You can use this helper if bitflags are manipulated atomically rather than
60 * non-atomically under a lock.
62 static inline void clear_and_wake_up_bit(int bit, void *word)
64 clear_bit_unlock(bit, word);
65 /* See wake_up_bit() for which memory barrier you need to use. */
66 smp_mb__after_atomic();
67 wake_up_bit(word, bit);
69 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
71 #ifndef HAVE_WAIT_VAR_EVENT
72 extern void __init wait_bit_init(void);
73 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
74 void *var, int flags);
75 extern void wake_up_var(void *var);
76 extern wait_queue_head_t *__var_waitqueue(void *p);
78 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
81 wait_queue_head_t *__wq_head = __var_waitqueue(var); \
82 struct wait_bit_queue_entry __wbq_entry; \
83 long __ret = ret; /* explicit shadow */ \
85 init_wait_var_entry(&__wbq_entry, var, \
86 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
88 long __int = prepare_to_wait_event(__wq_head, \
89 &__wbq_entry.wq_entry, \
94 if (___wait_is_interruptible(state) && __int) { \
101 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
105 #define __wait_var_event(var, condition) \
106 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
109 #define wait_var_event(var, condition) \
114 __wait_var_event(var, condition); \
117 #define __wait_var_event_killable(var, condition) \
118 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
121 #define wait_var_event_killable(var, condition) \
126 __ret = __wait_var_event_killable(var, condition); \
130 #define __wait_var_event_timeout(var, condition, timeout) \
131 ___wait_var_event(var, ___wait_cond_timeout1(condition), \
132 TASK_UNINTERRUPTIBLE, 0, timeout, \
133 __ret = schedule_timeout(__ret))
135 #define wait_var_event_timeout(var, condition, timeout) \
137 long __ret = timeout; \
139 if (!___wait_cond_timeout1(condition)) \
140 __ret = __wait_var_event_timeout(var, condition, timeout); \
143 #else /* !HAVE_WAIT_VAR_EVENT */
144 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
145 * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
147 # ifndef __wait_cond_timeout
148 # define ___wait_cond_timeout(condition) \
150 bool __cond = (condition); \
151 if (__cond && !__ret) \
155 # endif /* __wait_cond_timeout */
157 #endif /* ! HAVE_WAIT_VAR_EVENT */
160 * prepare_to_wait_event() does not support an exclusive
162 * However it will not relink the wait_queue_entry if
163 * it is already linked. So we link to the head of the
164 * queue here, and it will stay there.
166 static inline void prepare_to_wait_exclusive_head(
167 wait_queue_head_t *waitq, wait_queue_entry_t *link)
171 spin_lock_irqsave(&(waitq->lock), flags);
172 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
173 if (list_empty(&link->entry))
175 if (list_empty(&link->task_list))
177 __add_wait_queue_exclusive(waitq, link);
178 spin_unlock_irqrestore(&((waitq)->lock), flags);
181 #ifndef ___wait_event
183 * The below macro ___wait_event() has an explicit shadow of the __ret
184 * variable when used from the wait_event_*() macros.
186 * This is so that both can use the ___wait_cond_timeout1() construct
187 * to wrap the condition.
189 * The type inconsistency of the wait_event_*() __ret variable is also
190 * on purpose; we use long where we can return timeout values and int
194 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
197 wait_queue_entry_t __wq_entry; \
198 long __ret = ret; /* explicit shadow */ \
200 init_wait(&__wq_entry); \
202 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
204 long __int = prepare_to_wait_event(&wq_head, \
205 &__wq_entry, state); \
210 if (___wait_is_interruptible(state) && __int) { \
217 finish_wait(&wq_head, &__wq_entry); \
224 #define TASK_IDLE TASK_INTERRUPTIBLE
226 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
228 wait_queue_entry_t __wq_entry; \
229 unsigned long flags; \
230 long __ret = ret; /* explicit shadow */ \
231 sigset_t __old_blocked, __new_blocked; \
233 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
234 sigprocmask(0, &__new_blocked, &__old_blocked); \
235 init_wait(&__wq_entry); \
237 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
239 prepare_to_wait_event(&wq_head, \
241 TASK_INTERRUPTIBLE); \
245 /* We have to do this here because some signals */ \
246 /* are not blockable - ie from strace(1). */ \
247 /* In these cases we want to schedule_timeout() */ \
248 /* again, because we don't want that to return */ \
249 /* -EINTR when the RPC actually succeeded. */ \
250 /* the recalc_sigpending() below will deliver the */ \
251 /* signal properly. */ \
252 if (signal_pending(current)) { \
253 spin_lock_irqsave(¤t->sighand->siglock, \
255 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
256 spin_unlock_irqrestore(¤t->sighand->siglock,\
261 finish_wait(&wq_head, &__wq_entry); \
262 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
266 #define wait_event_idle(wq_head, condition) \
270 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
273 #define wait_event_idle_exclusive(wq_head, condition) \
277 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
280 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
281 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
283 __ret = schedule_timeout(__ret))
285 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
287 long __ret = timeout; \
289 if (!___wait_cond_timeout1(condition)) \
290 __ret = __wait_event_idle_exclusive_timeout( \
291 wq_head, condition, timeout); \
295 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
296 timeout, cmd1, cmd2) \
297 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
299 cmd1; __ret = schedule_timeout(__ret); cmd2)
301 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
304 long __ret = timeout; \
305 if (!___wait_cond_timeout1(condition)) \
306 __ret = __wait_event_idle_exclusive_timeout_cmd( \
307 wq_head, condition, timeout, cmd1, cmd2); \
311 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
312 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
314 __ret = schedule_timeout(__ret))
316 #define wait_event_idle_timeout(wq_head, condition, timeout) \
318 long __ret = timeout; \
320 if (!___wait_cond_timeout1(condition)) \
321 __ret = __wait_event_idle_timeout(wq_head, condition, \
326 #else /* TASK_IDLE */
327 #ifndef wait_event_idle
329 * wait_event_idle - wait for a condition without contributing to system load
330 * @wq_head: the waitqueue to wait on
331 * @condition: a C expression for the event to wait for
333 * The process is put to sleep (TASK_IDLE) until the
334 * @condition evaluates to true.
335 * The @condition is checked each time the waitqueue @wq_head is woken up.
337 * wake_up() has to be called after changing any variable that could
338 * change the result of the wait condition.
341 #define wait_event_idle(wq_head, condition) \
345 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, \
349 #ifndef wait_event_idle_exclusive
351 * wait_event_idle_exclusive - wait for a condition without contributing to
353 * @wq_head: the waitqueue to wait on
354 * @condition: a C expression for the event to wait for
356 * The process is put to sleep (TASK_IDLE) until the
357 * @condition evaluates to true.
358 * The @condition is checked each time the waitqueue @wq_head is woken up.
360 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
361 * set thus if other processes wait on the same list, when this
362 * process is woken further processes are not considered.
364 * wake_up() has to be called after changing any variable that could
365 * change the result of the wait condition.
368 #define wait_event_idle_exclusive(wq_head, condition) \
372 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, \
376 #ifndef wait_event_idle_exclusive_timeout
378 * wait_event_idle_exclusive_timeout - sleep without load until a condition
379 * becomes true or a timeout elapses
380 * @wq_head: the waitqueue to wait on
381 * @condition: a C expression for the event to wait for
382 * @timeout: timeout, in jiffies
384 * The process is put to sleep (TASK_IDLE) until the
385 * @condition evaluates to true. The @condition is checked each time
386 * the waitqueue @wq_head is woken up.
388 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
389 * set thus if other processes wait on the same list, when this
390 * process is woken further processes are not considered.
392 * wake_up() has to be called after changing any variable that could
393 * change the result of the wait condition.
396 * 0 if the @condition evaluated to %false after the @timeout elapsed,
397 * 1 if the @condition evaluated to %true after the @timeout elapsed,
398 * or the remaining jiffies (at least 1) if the @condition evaluated
399 * to %true before the @timeout elapsed.
401 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
403 long __ret = timeout; \
405 if (!___wait_cond_timeout1(condition)) \
406 __ret = __wait_event_idle_exclusive_timeout(wq_head, \
412 #ifndef wait_event_idle_exclusive_timeout_cmd
413 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
414 timeout, cmd1, cmd2) \
415 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
416 TASK_IDLE, 1, timeout, \
417 cmd1; __ret = schedule_timeout(__ret); cmd2)
419 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
422 long __ret = timeout; \
423 if (!___wait_cond_timeout1(condition)) \
424 __ret = __wait_event_idle_exclusive_timeout_cmd( \
425 wq_head, condition, timeout, cmd1, cmd2); \
430 #ifndef wait_event_idle_timeout
432 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
433 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
434 TASK_IDLE, 0, timeout, \
435 __ret = schedule_timeout(__ret))
438 * wait_event_idle_timeout - sleep without load until a condition becomes
439 * true or a timeout elapses
440 * @wq_head: the waitqueue to wait on
441 * @condition: a C expression for the event to wait for
442 * @timeout: timeout, in jiffies
444 * The process is put to sleep (TASK_IDLE) until the
445 * @condition evaluates to true. The @condition is checked each time
446 * the waitqueue @wq_head is woken up.
448 * wake_up() has to be called after changing any variable that could
449 * change the result of the wait condition.
452 * 0 if the @condition evaluated to %false after the @timeout elapsed,
453 * 1 if the @condition evaluated to %true after the @timeout elapsed,
454 * or the remaining jiffies (at least 1) if the @condition evaluated
455 * to %true before the @timeout elapsed.
457 #define wait_event_idle_timeout(wq_head, condition, timeout) \
459 long __ret = timeout; \
461 if (!___wait_cond_timeout1(condition)) \
462 __ret = __wait_event_idle_timeout(wq_head, condition, \
467 #endif /* TASK_IDLE */
469 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
472 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
474 wait_queue_entry_t __wq_entry; \
475 long __ret = ret; /* explicit shadow */ \
477 init_wait(&__wq_entry); \
478 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
480 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
481 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
488 finish_wait(&wq_head, &__wq_entry); \
492 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
494 wait_queue_entry_t __wq_entry; \
495 unsigned long flags; \
496 long __ret = ret; /* explicit shadow */ \
497 sigset_t __old_blocked, __new_blocked; \
499 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
500 sigprocmask(0, &__new_blocked, &__old_blocked); \
501 init_wait(&__wq_entry); \
502 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
504 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
505 prepare_to_wait_event(&wq_head, &__wq_entry, \
506 TASK_INTERRUPTIBLE); \
510 /* See justification in ___wait_event_idle */ \
511 if (signal_pending(current)) { \
512 spin_lock_irqsave(¤t->sighand->siglock, \
514 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
515 spin_unlock_irqrestore(¤t->sighand->siglock,\
520 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
521 finish_wait(&wq_head, &__wq_entry); \
526 #define wait_event_idle_exclusive_lifo(wq_head, condition) \
530 ___wait_event_lifo(wq_head, condition, 0, schedule()); \
533 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout) \
534 ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition), \
536 __ret = schedule_timeout(__ret))
538 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
540 long __ret = timeout; \
542 if (!___wait_cond_timeout1(condition)) \
543 __ret = __wait_event_idle_lifo_timeout(wq_head, \
549 /* l_wait_event_abortable() is a bit like wait_event_killable()
550 * except there is a fixed set of signals which will abort:
553 #define LUSTRE_FATAL_SIGS \
554 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
555 sigmask(SIGQUIT) | sigmask(SIGALRM))
557 #define l_wait_event_abortable(wq, condition) \
559 sigset_t __new_blocked, __old_blocked; \
561 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
562 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
563 __ret = wait_event_interruptible(wq, condition); \
564 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
568 #define l_wait_event_abortable_timeout(wq, condition, timeout) \
570 sigset_t __new_blocked, __old_blocked; \
572 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
573 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
574 __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
575 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
579 #define l_wait_event_abortable_exclusive(wq, condition) \
581 sigset_t __new_blocked, __old_blocked; \
583 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
584 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
585 __ret = wait_event_interruptible_exclusive(wq, condition); \
586 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
590 #ifndef HAVE_WAIT_WOKEN
591 #define WQ_FLAG_WOKEN 0x02
592 long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
593 int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
594 int sync, void *key);
595 #endif /* HAVE_WAIT_WOKEN */
597 #endif /* __LICBFS_LINUX_WAIT_BIT_H */