1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
8 * Linux wait-bit related types and methods:
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
13 #include <linux/wait.h>
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
19 #ifndef HAVE_WAIT_BIT_HEADER_H
20 struct wait_bit_queue_entry {
21 struct wait_bit_key key;
22 wait_queue_entry_t wq_entry;
25 #define ___wait_is_interruptible(state) \
26 (!__builtin_constant_p(state) || \
27 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
29 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
31 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
32 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
33 wait_queue_entry_t *wq_entry, int state);
36 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
37 * so let's define our own ___wait_cond_timeout1
40 #define ___wait_cond_timeout1(condition) \
42 bool __cond = (condition); \
43 if (__cond && !__ret) \
48 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
50 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
52 * @bit: the bit of the word being waited on
53 * @word: the word being waited on, a kernel virtual address
55 * You can use this helper if bitflags are manipulated atomically rather than
56 * non-atomically under a lock.
58 static inline void clear_and_wake_up_bit(int bit, void *word)
60 clear_bit_unlock(bit, word);
61 /* See wake_up_bit() for which memory barrier you need to use. */
62 smp_mb__after_atomic();
63 wake_up_bit(word, bit);
65 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
67 #ifndef HAVE_WAIT_VAR_EVENT
68 extern void __init wait_bit_init(void);
69 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
70 void *var, int flags);
71 extern void wake_up_var(void *var);
72 extern wait_queue_head_t *__var_waitqueue(void *p);
74 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
77 wait_queue_head_t *__wq_head = __var_waitqueue(var); \
78 struct wait_bit_queue_entry __wbq_entry; \
79 long __ret = ret; /* explicit shadow */ \
81 init_wait_var_entry(&__wbq_entry, var, \
82 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
84 long __int = prepare_to_wait_event(__wq_head, \
85 &__wbq_entry.wq_entry, \
90 if (___wait_is_interruptible(state) && __int) { \
97 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
101 #define __wait_var_event(var, condition) \
102 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
105 #define wait_var_event(var, condition) \
110 __wait_var_event(var, condition); \
113 #define __wait_var_event_killable(var, condition) \
114 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
117 #define wait_var_event_killable(var, condition) \
122 __ret = __wait_var_event_killable(var, condition); \
126 #define __wait_var_event_timeout(var, condition, timeout) \
127 ___wait_var_event(var, ___wait_cond_timeout1(condition), \
128 TASK_UNINTERRUPTIBLE, 0, timeout, \
129 __ret = schedule_timeout(__ret))
131 #define wait_var_event_timeout(var, condition, timeout) \
133 long __ret = timeout; \
135 if (!___wait_cond_timeout1(condition)) \
136 __ret = __wait_var_event_timeout(var, condition, timeout); \
139 #endif /* ! HAVE_WAIT_VAR_EVENT */
142 * prepare_to_wait_event() does not support an exclusive
144 * However it will not relink the wait_queue_entry if
145 * it is already linked. So we link to the head of the
146 * queue here, and it will stay there.
148 static inline void prepare_to_wait_exclusive_head(
149 wait_queue_head_t *waitq, wait_queue_entry_t *link)
153 spin_lock_irqsave(&(waitq->lock), flags);
154 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
155 if (list_empty(&link->entry))
157 if (list_empty(&link->task_list))
159 __add_wait_queue_exclusive(waitq, link);
160 spin_unlock_irqrestore(&((waitq)->lock), flags);
163 #ifndef ___wait_event
165 * The below macro ___wait_event() has an explicit shadow of the __ret
166 * variable when used from the wait_event_*() macros.
168 * This is so that both can use the ___wait_cond_timeout1() construct
169 * to wrap the condition.
171 * The type inconsistency of the wait_event_*() __ret variable is also
172 * on purpose; we use long where we can return timeout values and int
176 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
179 wait_queue_entry_ __wq_entry; \
180 long __ret = ret; /* explicit shadow */ \
182 init_wait(&__wq_entry); \
184 __wq_entry.flags = WQ_FLAG_EXCLUSIVE \
186 long __int = prepare_to_wait_event(&wq_head, \
187 &__wq_entry, state); \
192 if (___wait_is_interruptible(state) && __int) { \
199 finish_wait(&wq_head, &__wq_entry); \
206 #define TASK_IDLE TASK_INTERRUPTIBLE
208 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
210 wait_queue_entry_t __wq_entry; \
211 unsigned long flags; \
212 long __ret = ret; /* explicit shadow */ \
213 sigset_t __old_blocked, __new_blocked; \
215 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
216 sigprocmask(0, &__new_blocked, &__old_blocked); \
217 init_wait(&__wq_entry); \
219 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
221 prepare_to_wait_event(&wq_head, \
223 TASK_INTERRUPTIBLE); \
227 /* We have to do this here because some signals */ \
228 /* are not blockable - ie from strace(1). */ \
229 /* In these cases we want to schedule_timeout() */ \
230 /* again, because we don't want that to return */ \
231 /* -EINTR when the RPC actually succeeded. */ \
232 /* the recalc_sigpending() below will deliver the */ \
233 /* signal properly. */ \
234 if (signal_pending(current)) { \
235 spin_lock_irqsave(¤t->sighand->siglock, \
237 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
238 spin_unlock_irqrestore(¤t->sighand->siglock,\
243 finish_wait(&wq_head, &__wq_entry); \
244 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
248 #define wait_event_idle(wq_head, condition) \
252 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
255 #define wait_event_idle_exclusive(wq_head, condition) \
259 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
262 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
263 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
265 __ret = schedule_timeout(__ret))
267 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
269 long __ret = timeout; \
271 if (!___wait_cond_timeout1(condition)) \
272 __ret = __wait_event_idle_exclusive_timeout( \
273 wq_head, condition, timeout); \
277 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
278 timeout, cmd1, cmd2) \
279 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
281 cmd1; __ret = schedule_timeout(__ret); cmd2)
283 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
286 long __ret = timeout; \
287 if (!___wait_cond_timeout1(condition)) \
288 __ret = __wait_event_idle_exclusive_timeout_cmd( \
289 wq_head, condition, timeout, cmd1, cmd2); \
293 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
294 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
296 __ret = schedule_timeout(__ret))
298 #define wait_event_idle_timeout(wq_head, condition, timeout) \
300 long __ret = timeout; \
302 if (!___wait_cond_timeout1(condition)) \
303 __ret = __wait_event_idle_timeout(wq_head, condition, \
308 #else /* TASK_IDLE */
309 #ifndef wait_event_idle
311 * wait_event_idle - wait for a condition without contributing to system load
312 * @wq_head: the waitqueue to wait on
313 * @condition: a C expression for the event to wait for
315 * The process is put to sleep (TASK_IDLE) until the
316 * @condition evaluates to true.
317 * The @condition is checked each time the waitqueue @wq_head is woken up.
319 * wake_up() has to be called after changing any variable that could
320 * change the result of the wait condition.
323 #define wait_event_idle(wq_head, condition) \
327 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, \
331 #ifndef wait_event_idle_exclusive
333 * wait_event_idle_exclusive - wait for a condition without contributing to
335 * @wq_head: the waitqueue to wait on
336 * @condition: a C expression for the event to wait for
338 * The process is put to sleep (TASK_IDLE) until the
339 * @condition evaluates to true.
340 * The @condition is checked each time the waitqueue @wq_head is woken up.
342 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
343 * set thus if other processes wait on the same list, when this
344 * process is woken further processes are not considered.
346 * wake_up() has to be called after changing any variable that could
347 * change the result of the wait condition.
350 #define wait_event_idle_exclusive(wq_head, condition) \
354 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, \
358 #ifndef wait_event_idle_exclusive_timeout
360 * wait_event_idle_exclusive_timeout - sleep without load until a condition
361 * becomes true or a timeout elapses
362 * @wq_head: the waitqueue to wait on
363 * @condition: a C expression for the event to wait for
364 * @timeout: timeout, in jiffies
366 * The process is put to sleep (TASK_IDLE) until the
367 * @condition evaluates to true. The @condition is checked each time
368 * the waitqueue @wq_head is woken up.
370 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
371 * set thus if other processes wait on the same list, when this
372 * process is woken further processes are not considered.
374 * wake_up() has to be called after changing any variable that could
375 * change the result of the wait condition.
378 * 0 if the @condition evaluated to %false after the @timeout elapsed,
379 * 1 if the @condition evaluated to %true after the @timeout elapsed,
380 * or the remaining jiffies (at least 1) if the @condition evaluated
381 * to %true before the @timeout elapsed.
383 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
385 long __ret = timeout; \
387 if (!___wait_cond_timeout1(condition)) \
388 __ret = __wait_event_idle_exclusive_timeout(wq_head, \
394 #ifndef wait_event_idle_exclusive_timeout_cmd
395 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
396 timeout, cmd1, cmd2) \
397 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
398 TASK_IDLE, 1, timeout, \
399 cmd1; __ret = schedule_timeout(__ret); cmd2)
401 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
404 long __ret = timeout; \
405 if (!___wait_cond_timeout1(condition)) \
406 __ret = __wait_event_idle_exclusive_timeout_cmd( \
407 wq_head, condition, timeout, cmd1, cmd2); \
412 #ifndef wait_event_idle_timeout
414 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
415 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
416 TASK_IDLE, 0, timeout, \
417 __ret = schedule_timeout(__ret))
420 * wait_event_idle_timeout - sleep without load until a condition becomes
421 * true or a timeout elapses
422 * @wq_head: the waitqueue to wait on
423 * @condition: a C expression for the event to wait for
424 * @timeout: timeout, in jiffies
426 * The process is put to sleep (TASK_IDLE) until the
427 * @condition evaluates to true. The @condition is checked each time
428 * the waitqueue @wq_head is woken up.
430 * wake_up() has to be called after changing any variable that could
431 * change the result of the wait condition.
434 * 0 if the @condition evaluated to %false after the @timeout elapsed,
435 * 1 if the @condition evaluated to %true after the @timeout elapsed,
436 * or the remaining jiffies (at least 1) if the @condition evaluated
437 * to %true before the @timeout elapsed.
439 #define wait_event_idle_timeout(wq_head, condition, timeout) \
441 long __ret = timeout; \
443 if (!___wait_cond_timeout1(condition)) \
444 __ret = __wait_event_idle_timeout(wq_head, condition, \
449 #endif /* TASK_IDLE */
451 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
454 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
456 wait_queue_entry_t __wq_entry; \
457 long __ret = ret; /* explicit shadow */ \
459 init_wait(&__wq_entry); \
460 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
462 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
463 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
470 finish_wait(&wq_head, &__wq_entry); \
474 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
476 wait_queue_entry_t __wq_entry; \
477 unsigned long flags; \
478 long __ret = ret; /* explicit shadow */ \
479 sigset_t __old_blocked, __new_blocked; \
481 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
482 sigprocmask(0, &__new_blocked, &__old_blocked); \
483 init_wait(&__wq_entry); \
484 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
486 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
487 prepare_to_wait_event(&wq_head, &__wq_entry, \
488 TASK_INTERRUPTIBLE); \
492 /* See justification in ___wait_event_idle */ \
493 if (signal_pending(current)) { \
494 spin_lock_irqsave(¤t->sighand->siglock, \
496 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
497 spin_unlock_irqrestore(¤t->sighand->siglock,\
502 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
503 finish_wait(&wq_head, &__wq_entry); \
508 #define wait_event_idle_exclusive_lifo(wq_head, condition) \
512 ___wait_event_lifo(wq_head, condition, 0, schedule()); \
515 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout) \
516 ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition), \
518 __ret = schedule_timeout(__ret))
520 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
522 long __ret = timeout; \
524 if (!___wait_cond_timeout1(condition)) \
525 __ret = __wait_event_idle_lifo_timeout(wq_head, \
531 /* l_wait_event_abortable() is a bit like wait_event_killable()
532 * except there is a fixed set of signals which will abort:
535 #define LUSTRE_FATAL_SIGS \
536 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
537 sigmask(SIGQUIT) | sigmask(SIGALRM))
539 #define l_wait_event_abortable(wq, condition) \
541 sigset_t __new_blocked, __old_blocked; \
543 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
544 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
545 __ret = wait_event_interruptible(wq, condition); \
546 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
550 #define l_wait_event_abortable_timeout(wq, condition, timeout) \
552 sigset_t __new_blocked, __old_blocked; \
554 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
555 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
556 __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
557 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
561 #define l_wait_event_abortable_exclusive(wq, condition) \
563 sigset_t __new_blocked, __old_blocked; \
565 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
566 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
567 __ret = wait_event_interruptible_exclusive(wq, condition); \
568 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
572 #endif /* __LICBFS_LINUX_WAIT_BIT_H */