1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
4 #define __LIBCFS_LINUX_WAIT_BIT_H
6 /* Make sure we can see if we have TASK_NOLOAD */
7 #include <linux/sched.h>
9 * Linux wait-bit related types and methods:
11 #ifdef HAVE_WAIT_BIT_HEADER_H
12 #include <linux/wait_bit.h>
14 #include <linux/wait.h>
16 #ifndef HAVE_WAIT_QUEUE_ENTRY
17 #define wait_queue_entry_t wait_queue_t
20 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
21 #define __add_wait_queue_entry_tail __add_wait_queue_tail
24 #ifndef HAVE_WAIT_BIT_HEADER_H
25 struct wait_bit_queue_entry {
26 struct wait_bit_key key;
27 wait_queue_entry_t wq_entry;
30 #define ___wait_is_interruptible(state) \
31 (!__builtin_constant_p(state) || \
32 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
34 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
36 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
37 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
38 wait_queue_entry_t *wq_entry, int state);
41 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
42 * so let's define our own ___wait_cond_timeout1
45 #define ___wait_cond_timeout1(condition) \
47 bool __cond = (condition); \
48 if (__cond && !__ret) \
53 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
55 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
57 * @bit: the bit of the word being waited on
58 * @word: the word being waited on, a kernel virtual address
60 * You can use this helper if bitflags are manipulated atomically rather than
61 * non-atomically under a lock.
63 static inline void clear_and_wake_up_bit(int bit, void *word)
65 clear_bit_unlock(bit, word);
66 /* See wake_up_bit() for which memory barrier you need to use. */
67 smp_mb__after_atomic();
68 wake_up_bit(word, bit);
70 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
72 #ifndef HAVE_WAIT_VAR_EVENT
73 extern void __init wait_bit_init(void);
74 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
75 void *var, int flags);
76 extern void wake_up_var(void *var);
77 extern wait_queue_head_t *__var_waitqueue(void *p);
79 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
82 wait_queue_head_t *__wq_head = __var_waitqueue(var); \
83 struct wait_bit_queue_entry __wbq_entry; \
84 long __ret = ret; /* explicit shadow */ \
86 init_wait_var_entry(&__wbq_entry, var, \
87 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
89 long __int = prepare_to_wait_event(__wq_head, \
90 &__wbq_entry.wq_entry, \
95 if (___wait_is_interruptible(state) && __int) { \
102 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
106 #define __wait_var_event(var, condition) \
107 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
110 #define wait_var_event(var, condition) \
115 __wait_var_event(var, condition); \
118 #define __wait_var_event_killable(var, condition) \
119 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
122 #define wait_var_event_killable(var, condition) \
127 __ret = __wait_var_event_killable(var, condition); \
131 #define __wait_var_event_timeout(var, condition, timeout) \
132 ___wait_var_event(var, ___wait_cond_timeout1(condition), \
133 TASK_UNINTERRUPTIBLE, 0, timeout, \
134 __ret = schedule_timeout(__ret))
136 #define wait_var_event_timeout(var, condition, timeout) \
138 long __ret = timeout; \
140 if (!___wait_cond_timeout1(condition)) \
141 __ret = __wait_var_event_timeout(var, condition, timeout); \
144 #else /* !HAVE_WAIT_VAR_EVENT */
145 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
146 * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
148 # ifndef __wait_cond_timeout
149 # define ___wait_cond_timeout(condition) \
151 bool __cond = (condition); \
152 if (__cond && !__ret) \
156 # endif /* __wait_cond_timeout */
158 #endif /* ! HAVE_WAIT_VAR_EVENT */
161 * prepare_to_wait_event() does not support an exclusive
163 * However it will not relink the wait_queue_entry if
164 * it is already linked. So we link to the head of the
165 * queue here, and it will stay there.
167 static inline void prepare_to_wait_exclusive_head(
168 wait_queue_head_t *waitq, wait_queue_entry_t *link)
172 spin_lock_irqsave(&(waitq->lock), flags);
173 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
174 if (list_empty(&link->entry))
176 if (list_empty(&link->task_list))
178 __add_wait_queue_exclusive(waitq, link);
179 spin_unlock_irqrestore(&((waitq)->lock), flags);
182 #ifndef ___wait_event
184 * The below macro ___wait_event() has an explicit shadow of the __ret
185 * variable when used from the wait_event_*() macros.
187 * This is so that both can use the ___wait_cond_timeout1() construct
188 * to wrap the condition.
190 * The type inconsistency of the wait_event_*() __ret variable is also
191 * on purpose; we use long where we can return timeout values and int
195 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
198 wait_queue_entry_t __wq_entry; \
199 long __ret = ret; /* explicit shadow */ \
201 init_wait(&__wq_entry); \
203 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
205 long __int = prepare_to_wait_event(&wq_head, \
206 &__wq_entry, state); \
211 if (___wait_is_interruptible(state) && __int) { \
218 finish_wait(&wq_head, &__wq_entry); \
225 #define TASK_IDLE TASK_INTERRUPTIBLE
227 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
229 wait_queue_entry_t __wq_entry; \
230 unsigned long flags; \
231 long __ret = ret; /* explicit shadow */ \
232 sigset_t __old_blocked, __new_blocked; \
234 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
235 sigprocmask(0, &__new_blocked, &__old_blocked); \
236 init_wait(&__wq_entry); \
238 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
240 prepare_to_wait_event(&wq_head, \
242 TASK_INTERRUPTIBLE); \
246 /* We have to do this here because some signals */ \
247 /* are not blockable - ie from strace(1). */ \
248 /* In these cases we want to schedule_timeout() */ \
249 /* again, because we don't want that to return */ \
250 /* -EINTR when the RPC actually succeeded. */ \
251 /* the recalc_sigpending() below will deliver the */ \
252 /* signal properly. */ \
253 if (signal_pending(current)) { \
254 spin_lock_irqsave(¤t->sighand->siglock, \
256 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
257 spin_unlock_irqrestore(¤t->sighand->siglock,\
262 finish_wait(&wq_head, &__wq_entry); \
263 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
267 #define wait_event_idle(wq_head, condition) \
271 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
274 #define wait_event_idle_exclusive(wq_head, condition) \
278 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
281 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
282 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
284 __ret = schedule_timeout(__ret))
286 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
288 long __ret = timeout; \
290 if (!___wait_cond_timeout1(condition)) \
291 __ret = __wait_event_idle_exclusive_timeout( \
292 wq_head, condition, timeout); \
296 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
297 timeout, cmd1, cmd2) \
298 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
300 cmd1; __ret = schedule_timeout(__ret); cmd2)
302 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
305 long __ret = timeout; \
306 if (!___wait_cond_timeout1(condition)) \
307 __ret = __wait_event_idle_exclusive_timeout_cmd( \
308 wq_head, condition, timeout, cmd1, cmd2); \
312 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
313 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
315 __ret = schedule_timeout(__ret))
317 #define wait_event_idle_timeout(wq_head, condition, timeout) \
319 long __ret = timeout; \
321 if (!___wait_cond_timeout1(condition)) \
322 __ret = __wait_event_idle_timeout(wq_head, condition, \
327 #else /* TASK_IDLE */
328 #ifndef wait_event_idle
330 * wait_event_idle - wait for a condition without contributing to system load
331 * @wq_head: the waitqueue to wait on
332 * @condition: a C expression for the event to wait for
334 * The process is put to sleep (TASK_IDLE) until the
335 * @condition evaluates to true.
336 * The @condition is checked each time the waitqueue @wq_head is woken up.
338 * wake_up() has to be called after changing any variable that could
339 * change the result of the wait condition.
342 #define wait_event_idle(wq_head, condition) \
346 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, \
350 #ifndef wait_event_idle_exclusive
352 * wait_event_idle_exclusive - wait for a condition without contributing to
354 * @wq_head: the waitqueue to wait on
355 * @condition: a C expression for the event to wait for
357 * The process is put to sleep (TASK_IDLE) until the
358 * @condition evaluates to true.
359 * The @condition is checked each time the waitqueue @wq_head is woken up.
361 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
362 * set thus if other processes wait on the same list, when this
363 * process is woken further processes are not considered.
365 * wake_up() has to be called after changing any variable that could
366 * change the result of the wait condition.
369 #define wait_event_idle_exclusive(wq_head, condition) \
373 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, \
377 #ifndef wait_event_idle_exclusive_timeout
379 * wait_event_idle_exclusive_timeout - sleep without load until a condition
380 * becomes true or a timeout elapses
381 * @wq_head: the waitqueue to wait on
382 * @condition: a C expression for the event to wait for
383 * @timeout: timeout, in jiffies
385 * The process is put to sleep (TASK_IDLE) until the
386 * @condition evaluates to true. The @condition is checked each time
387 * the waitqueue @wq_head is woken up.
389 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
390 * set thus if other processes wait on the same list, when this
391 * process is woken further processes are not considered.
393 * wake_up() has to be called after changing any variable that could
394 * change the result of the wait condition.
397 * 0 if the @condition evaluated to %false after the @timeout elapsed,
398 * 1 if the @condition evaluated to %true after the @timeout elapsed,
399 * or the remaining jiffies (at least 1) if the @condition evaluated
400 * to %true before the @timeout elapsed.
402 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
404 long __ret = timeout; \
406 if (!___wait_cond_timeout1(condition)) \
407 __ret = __wait_event_idle_exclusive_timeout(wq_head, \
413 #ifndef wait_event_idle_exclusive_timeout_cmd
414 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
415 timeout, cmd1, cmd2) \
416 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
417 TASK_IDLE, 1, timeout, \
418 cmd1; __ret = schedule_timeout(__ret); cmd2)
420 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
423 long __ret = timeout; \
424 if (!___wait_cond_timeout1(condition)) \
425 __ret = __wait_event_idle_exclusive_timeout_cmd( \
426 wq_head, condition, timeout, cmd1, cmd2); \
431 #ifndef wait_event_idle_timeout
433 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
434 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
435 TASK_IDLE, 0, timeout, \
436 __ret = schedule_timeout(__ret))
439 * wait_event_idle_timeout - sleep without load until a condition becomes
440 * true or a timeout elapses
441 * @wq_head: the waitqueue to wait on
442 * @condition: a C expression for the event to wait for
443 * @timeout: timeout, in jiffies
445 * The process is put to sleep (TASK_IDLE) until the
446 * @condition evaluates to true. The @condition is checked each time
447 * the waitqueue @wq_head is woken up.
449 * wake_up() has to be called after changing any variable that could
450 * change the result of the wait condition.
453 * 0 if the @condition evaluated to %false after the @timeout elapsed,
454 * 1 if the @condition evaluated to %true after the @timeout elapsed,
455 * or the remaining jiffies (at least 1) if the @condition evaluated
456 * to %true before the @timeout elapsed.
458 #define wait_event_idle_timeout(wq_head, condition, timeout) \
460 long __ret = timeout; \
462 if (!___wait_cond_timeout1(condition)) \
463 __ret = __wait_event_idle_timeout(wq_head, condition, \
468 #endif /* TASK_IDLE */
470 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
473 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
475 wait_queue_entry_t __wq_entry; \
476 long __ret = ret; /* explicit shadow */ \
478 init_wait(&__wq_entry); \
479 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
481 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
482 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
489 finish_wait(&wq_head, &__wq_entry); \
493 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
495 wait_queue_entry_t __wq_entry; \
496 unsigned long flags; \
497 long __ret = ret; /* explicit shadow */ \
498 sigset_t __old_blocked, __new_blocked; \
500 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
501 sigprocmask(0, &__new_blocked, &__old_blocked); \
502 init_wait(&__wq_entry); \
503 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
505 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
506 prepare_to_wait_event(&wq_head, &__wq_entry, \
507 TASK_INTERRUPTIBLE); \
511 /* See justification in ___wait_event_idle */ \
512 if (signal_pending(current)) { \
513 spin_lock_irqsave(¤t->sighand->siglock, \
515 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
516 spin_unlock_irqrestore(¤t->sighand->siglock,\
521 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
522 finish_wait(&wq_head, &__wq_entry); \
527 #define wait_event_idle_exclusive_lifo(wq_head, condition) \
531 ___wait_event_lifo(wq_head, condition, 0, schedule()); \
534 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout) \
535 ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition), \
537 __ret = schedule_timeout(__ret))
539 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
541 long __ret = timeout; \
543 if (!___wait_cond_timeout1(condition)) \
544 __ret = __wait_event_idle_lifo_timeout(wq_head, \
550 /* l_wait_event_abortable() is a bit like wait_event_killable()
551 * except there is a fixed set of signals which will abort:
554 #define LUSTRE_FATAL_SIGS \
555 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
556 sigmask(SIGQUIT) | sigmask(SIGALRM))
558 #define l_wait_event_abortable(wq, condition) \
560 sigset_t __new_blocked, __old_blocked; \
562 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
563 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
564 __ret = wait_event_interruptible(wq, condition); \
565 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
569 #define l_wait_event_abortable_exclusive(wq, condition) \
571 sigset_t __new_blocked, __old_blocked; \
573 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
574 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
575 __ret = wait_event_interruptible_exclusive(wq, condition); \
576 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
580 #ifndef HAVE_WAIT_WOKEN
581 #define WQ_FLAG_WOKEN 0x02
582 long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
583 int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
584 int sync, void *key);
585 #endif /* HAVE_WAIT_WOKEN */
587 #endif /* __LICBFS_LINUX_WAIT_BIT_H */