1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
8 * Linux wait-bit related types and methods:
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
13 #include <linux/wait.h>
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
19 #ifndef HAVE_WAIT_BIT_HEADER_H
20 struct wait_bit_queue_entry {
21 struct wait_bit_key key;
22 wait_queue_entry_t wq_entry;
25 #define ___wait_is_interruptible(state) \
26 (!__builtin_constant_p(state) || \
27 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
29 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
31 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
32 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
33 wait_queue_entry_t *wq_entry, int state);
36 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
37 * so let's define our own ___wait_cond_timeout1
40 #define ___wait_cond_timeout1(condition) \
42 bool __cond = (condition); \
43 if (__cond && !__ret) \
48 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
50 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
52 * @bit: the bit of the word being waited on
53 * @word: the word being waited on, a kernel virtual address
55 * You can use this helper if bitflags are manipulated atomically rather than
56 * non-atomically under a lock.
58 static inline void clear_and_wake_up_bit(int bit, void *word)
60 clear_bit_unlock(bit, word);
61 /* See wake_up_bit() for which memory barrier you need to use. */
62 smp_mb__after_atomic();
63 wake_up_bit(word, bit);
65 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
67 #ifndef HAVE_WAIT_VAR_EVENT
68 extern void __init wait_bit_init(void);
69 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
70 void *var, int flags);
71 extern void wake_up_var(void *var);
72 extern wait_queue_head_t *__var_waitqueue(void *p);
74 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
77 wait_queue_head_t *__wq_head = __var_waitqueue(var); \
78 struct wait_bit_queue_entry __wbq_entry; \
79 long __ret = ret; /* explicit shadow */ \
81 init_wait_var_entry(&__wbq_entry, var, \
82 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
84 long __int = prepare_to_wait_event(__wq_head, \
85 &__wbq_entry.wq_entry, \
90 if (___wait_is_interruptible(state) && __int) { \
97 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
101 #define __wait_var_event(var, condition) \
102 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
105 #define wait_var_event(var, condition) \
110 __wait_var_event(var, condition); \
113 #define __wait_var_event_killable(var, condition) \
114 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
117 #define wait_var_event_killable(var, condition) \
122 __ret = __wait_var_event_killable(var, condition); \
126 #define __wait_var_event_timeout(var, condition, timeout) \
127 ___wait_var_event(var, ___wait_cond_timeout1(condition), \
128 TASK_UNINTERRUPTIBLE, 0, timeout, \
129 __ret = schedule_timeout(__ret))
131 #define wait_var_event_timeout(var, condition, timeout) \
133 long __ret = timeout; \
135 if (!___wait_cond_timeout1(condition)) \
136 __ret = __wait_var_event_timeout(var, condition, timeout); \
139 #else /* !HAVE_WAIT_VAR_EVENT */
140 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
141 * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
143 # ifndef __wait_cond_timeout
144 # define ___wait_cond_timeout(condition) \
146 bool __cond = (condition); \
147 if (__cond && !__ret) \
151 # endif /* __wait_cond_timeout */
153 #endif /* ! HAVE_WAIT_VAR_EVENT */
156 * prepare_to_wait_event() does not support an exclusive
158 * However it will not relink the wait_queue_entry if
159 * it is already linked. So we link to the head of the
160 * queue here, and it will stay there.
162 static inline void prepare_to_wait_exclusive_head(
163 wait_queue_head_t *waitq, wait_queue_entry_t *link)
167 spin_lock_irqsave(&(waitq->lock), flags);
168 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
169 if (list_empty(&link->entry))
171 if (list_empty(&link->task_list))
173 __add_wait_queue_exclusive(waitq, link);
174 spin_unlock_irqrestore(&((waitq)->lock), flags);
177 #ifndef ___wait_event
179 * The below macro ___wait_event() has an explicit shadow of the __ret
180 * variable when used from the wait_event_*() macros.
182 * This is so that both can use the ___wait_cond_timeout1() construct
183 * to wrap the condition.
185 * The type inconsistency of the wait_event_*() __ret variable is also
186 * on purpose; we use long where we can return timeout values and int
190 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
193 wait_queue_entry_ __wq_entry; \
194 long __ret = ret; /* explicit shadow */ \
196 init_wait(&__wq_entry); \
198 __wq_entry.flags = WQ_FLAG_EXCLUSIVE \
200 long __int = prepare_to_wait_event(&wq_head, \
201 &__wq_entry, state); \
206 if (___wait_is_interruptible(state) && __int) { \
213 finish_wait(&wq_head, &__wq_entry); \
220 #define TASK_IDLE TASK_INTERRUPTIBLE
222 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
224 wait_queue_entry_t __wq_entry; \
225 unsigned long flags; \
226 long __ret = ret; /* explicit shadow */ \
227 sigset_t __old_blocked, __new_blocked; \
229 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
230 sigprocmask(0, &__new_blocked, &__old_blocked); \
231 init_wait(&__wq_entry); \
233 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
235 prepare_to_wait_event(&wq_head, \
237 TASK_INTERRUPTIBLE); \
241 /* We have to do this here because some signals */ \
242 /* are not blockable - ie from strace(1). */ \
243 /* In these cases we want to schedule_timeout() */ \
244 /* again, because we don't want that to return */ \
245 /* -EINTR when the RPC actually succeeded. */ \
246 /* the recalc_sigpending() below will deliver the */ \
247 /* signal properly. */ \
248 if (signal_pending(current)) { \
249 spin_lock_irqsave(¤t->sighand->siglock, \
251 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
252 spin_unlock_irqrestore(¤t->sighand->siglock,\
257 finish_wait(&wq_head, &__wq_entry); \
258 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
262 #define wait_event_idle(wq_head, condition) \
266 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
269 #define wait_event_idle_exclusive(wq_head, condition) \
273 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
276 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
277 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
279 __ret = schedule_timeout(__ret))
281 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
283 long __ret = timeout; \
285 if (!___wait_cond_timeout1(condition)) \
286 __ret = __wait_event_idle_exclusive_timeout( \
287 wq_head, condition, timeout); \
291 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
292 timeout, cmd1, cmd2) \
293 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
295 cmd1; __ret = schedule_timeout(__ret); cmd2)
297 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
300 long __ret = timeout; \
301 if (!___wait_cond_timeout1(condition)) \
302 __ret = __wait_event_idle_exclusive_timeout_cmd( \
303 wq_head, condition, timeout, cmd1, cmd2); \
307 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
308 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
310 __ret = schedule_timeout(__ret))
312 #define wait_event_idle_timeout(wq_head, condition, timeout) \
314 long __ret = timeout; \
316 if (!___wait_cond_timeout1(condition)) \
317 __ret = __wait_event_idle_timeout(wq_head, condition, \
322 #else /* TASK_IDLE */
323 #ifndef wait_event_idle
325 * wait_event_idle - wait for a condition without contributing to system load
326 * @wq_head: the waitqueue to wait on
327 * @condition: a C expression for the event to wait for
329 * The process is put to sleep (TASK_IDLE) until the
330 * @condition evaluates to true.
331 * The @condition is checked each time the waitqueue @wq_head is woken up.
333 * wake_up() has to be called after changing any variable that could
334 * change the result of the wait condition.
337 #define wait_event_idle(wq_head, condition) \
341 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, \
345 #ifndef wait_event_idle_exclusive
347 * wait_event_idle_exclusive - wait for a condition without contributing to
349 * @wq_head: the waitqueue to wait on
350 * @condition: a C expression for the event to wait for
352 * The process is put to sleep (TASK_IDLE) until the
353 * @condition evaluates to true.
354 * The @condition is checked each time the waitqueue @wq_head is woken up.
356 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
357 * set thus if other processes wait on the same list, when this
358 * process is woken further processes are not considered.
360 * wake_up() has to be called after changing any variable that could
361 * change the result of the wait condition.
364 #define wait_event_idle_exclusive(wq_head, condition) \
368 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, \
372 #ifndef wait_event_idle_exclusive_timeout
374 * wait_event_idle_exclusive_timeout - sleep without load until a condition
375 * becomes true or a timeout elapses
376 * @wq_head: the waitqueue to wait on
377 * @condition: a C expression for the event to wait for
378 * @timeout: timeout, in jiffies
380 * The process is put to sleep (TASK_IDLE) until the
381 * @condition evaluates to true. The @condition is checked each time
382 * the waitqueue @wq_head is woken up.
384 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
385 * set thus if other processes wait on the same list, when this
386 * process is woken further processes are not considered.
388 * wake_up() has to be called after changing any variable that could
389 * change the result of the wait condition.
392 * 0 if the @condition evaluated to %false after the @timeout elapsed,
393 * 1 if the @condition evaluated to %true after the @timeout elapsed,
394 * or the remaining jiffies (at least 1) if the @condition evaluated
395 * to %true before the @timeout elapsed.
397 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
399 long __ret = timeout; \
401 if (!___wait_cond_timeout1(condition)) \
402 __ret = __wait_event_idle_exclusive_timeout(wq_head, \
408 #ifndef wait_event_idle_exclusive_timeout_cmd
409 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
410 timeout, cmd1, cmd2) \
411 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
412 TASK_IDLE, 1, timeout, \
413 cmd1; __ret = schedule_timeout(__ret); cmd2)
415 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
418 long __ret = timeout; \
419 if (!___wait_cond_timeout1(condition)) \
420 __ret = __wait_event_idle_exclusive_timeout_cmd( \
421 wq_head, condition, timeout, cmd1, cmd2); \
426 #ifndef wait_event_idle_timeout
428 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
429 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
430 TASK_IDLE, 0, timeout, \
431 __ret = schedule_timeout(__ret))
434 * wait_event_idle_timeout - sleep without load until a condition becomes
435 * true or a timeout elapses
436 * @wq_head: the waitqueue to wait on
437 * @condition: a C expression for the event to wait for
438 * @timeout: timeout, in jiffies
440 * The process is put to sleep (TASK_IDLE) until the
441 * @condition evaluates to true. The @condition is checked each time
442 * the waitqueue @wq_head is woken up.
444 * wake_up() has to be called after changing any variable that could
445 * change the result of the wait condition.
448 * 0 if the @condition evaluated to %false after the @timeout elapsed,
449 * 1 if the @condition evaluated to %true after the @timeout elapsed,
450 * or the remaining jiffies (at least 1) if the @condition evaluated
451 * to %true before the @timeout elapsed.
453 #define wait_event_idle_timeout(wq_head, condition, timeout) \
455 long __ret = timeout; \
457 if (!___wait_cond_timeout1(condition)) \
458 __ret = __wait_event_idle_timeout(wq_head, condition, \
463 #endif /* TASK_IDLE */
465 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
468 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
470 wait_queue_entry_t __wq_entry; \
471 long __ret = ret; /* explicit shadow */ \
473 init_wait(&__wq_entry); \
474 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
476 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
477 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
484 finish_wait(&wq_head, &__wq_entry); \
488 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
490 wait_queue_entry_t __wq_entry; \
491 unsigned long flags; \
492 long __ret = ret; /* explicit shadow */ \
493 sigset_t __old_blocked, __new_blocked; \
495 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
496 sigprocmask(0, &__new_blocked, &__old_blocked); \
497 init_wait(&__wq_entry); \
498 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
500 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
501 prepare_to_wait_event(&wq_head, &__wq_entry, \
502 TASK_INTERRUPTIBLE); \
506 /* See justification in ___wait_event_idle */ \
507 if (signal_pending(current)) { \
508 spin_lock_irqsave(¤t->sighand->siglock, \
510 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
511 spin_unlock_irqrestore(¤t->sighand->siglock,\
516 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
517 finish_wait(&wq_head, &__wq_entry); \
522 #define wait_event_idle_exclusive_lifo(wq_head, condition) \
526 ___wait_event_lifo(wq_head, condition, 0, schedule()); \
529 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout) \
530 ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition), \
532 __ret = schedule_timeout(__ret))
534 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
536 long __ret = timeout; \
538 if (!___wait_cond_timeout1(condition)) \
539 __ret = __wait_event_idle_lifo_timeout(wq_head, \
545 /* l_wait_event_abortable() is a bit like wait_event_killable()
546 * except there is a fixed set of signals which will abort:
549 #define LUSTRE_FATAL_SIGS \
550 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
551 sigmask(SIGQUIT) | sigmask(SIGALRM))
553 #define l_wait_event_abortable(wq, condition) \
555 sigset_t __new_blocked, __old_blocked; \
557 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
558 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
559 __ret = wait_event_interruptible(wq, condition); \
560 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
564 #define l_wait_event_abortable_timeout(wq, condition, timeout) \
566 sigset_t __new_blocked, __old_blocked; \
568 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
569 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
570 __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
571 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
575 #define l_wait_event_abortable_exclusive(wq, condition) \
577 sigset_t __new_blocked, __old_blocked; \
579 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
580 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
581 __ret = wait_event_interruptible_exclusive(wq, condition); \
582 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
586 #endif /* __LICBFS_LINUX_WAIT_BIT_H */