1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
8 * Linux wait-bit related types and methods:
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
13 #include <linux/wait.h>
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
19 #ifndef HAVE_WAIT_BIT_HEADER_H
20 struct wait_bit_queue_entry {
21 struct wait_bit_key key;
22 wait_queue_entry_t wq_entry;
25 #define ___wait_is_interruptible(state) \
26 (!__builtin_constant_p(state) || \
27 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
29 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
31 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
32 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
33 wait_queue_entry_t *wq_entry, int state);
36 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
37 * so let's define our own ___wait_cond_timeout1
40 #define ___wait_cond_timeout1(condition) \
42 bool __cond = (condition); \
43 if (__cond && !__ret) \
48 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
50 * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
52 * @bit: the bit of the word being waited on
53 * @word: the word being waited on, a kernel virtual address
55 * You can use this helper if bitflags are manipulated atomically rather than
56 * non-atomically under a lock.
58 static inline void clear_and_wake_up_bit(int bit, void *word)
60 clear_bit_unlock(bit, word);
61 /* See wake_up_bit() for which memory barrier you need to use. */
62 smp_mb__after_atomic();
63 wake_up_bit(word, bit);
65 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
67 #ifndef HAVE_WAIT_VAR_EVENT
68 extern void __init wait_bit_init(void);
69 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
70 void *var, int flags);
71 extern void wake_up_var(void *var);
72 extern wait_queue_head_t *__var_waitqueue(void *p);
74 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
77 wait_queue_head_t *__wq_head = __var_waitqueue(var); \
78 struct wait_bit_queue_entry __wbq_entry; \
79 long __ret = ret; /* explicit shadow */ \
81 init_wait_var_entry(&__wbq_entry, var, \
82 exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
84 long __int = prepare_to_wait_event(__wq_head, \
85 &__wbq_entry.wq_entry, \
90 if (___wait_is_interruptible(state) && __int) { \
97 finish_wait(__wq_head, &__wbq_entry.wq_entry); \
101 #define __wait_var_event(var, condition) \
102 ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
105 #define wait_var_event(var, condition) \
110 __wait_var_event(var, condition); \
113 #define __wait_var_event_killable(var, condition) \
114 ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
117 #define wait_var_event_killable(var, condition) \
122 __ret = __wait_var_event_killable(var, condition); \
126 #define __wait_var_event_timeout(var, condition, timeout) \
127 ___wait_var_event(var, ___wait_cond_timeout1(condition), \
128 TASK_UNINTERRUPTIBLE, 0, timeout, \
129 __ret = schedule_timeout(__ret))
131 #define wait_var_event_timeout(var, condition, timeout) \
133 long __ret = timeout; \
135 if (!___wait_cond_timeout1(condition)) \
136 __ret = __wait_var_event_timeout(var, condition, timeout); \
139 #endif /* ! HAVE_WAIT_VAR_EVENT */
142 * prepare_to_wait_event() does not support an exclusive
144 * However it will not relink the wait_queue_entry if
145 * it is already linked. So we link to the head of the
146 * queue here, and it will stay there.
148 static inline void prepare_to_wait_exclusive_head(
149 wait_queue_head_t *waitq, wait_queue_entry_t *link)
153 spin_lock_irqsave(&(waitq->lock), flags);
154 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
155 if (list_empty(&link->entry))
157 if (list_empty(&link->task_list))
159 __add_wait_queue_exclusive(waitq, link);
160 spin_unlock_irqrestore(&((waitq)->lock), flags);
163 #ifndef ___wait_event
165 * The below macro ___wait_event() has an explicit shadow of the __ret
166 * variable when used from the wait_event_*() macros.
168 * This is so that both can use the ___wait_cond_timeout1() construct
169 * to wrap the condition.
171 * The type inconsistency of the wait_event_*() __ret variable is also
172 * on purpose; we use long where we can return timeout values and int
176 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd) \
179 wait_queue_entry_ __wq_entry; \
180 long __ret = ret; /* explicit shadow */ \
182 init_wait(&__wq_entry); \
184 __wq_entry.flags = WQ_FLAG_EXCLUSIVE \
186 long __int = prepare_to_wait_event(&wq_head, \
187 &__wq_entry, state); \
192 if (___wait_is_interruptible(state) && __int) { \
199 finish_wait(&wq_head, &__wq_entry); \
206 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd) \
208 wait_queue_entry_t __wq_entry; \
209 unsigned long flags; \
210 long __ret = ret; /* explicit shadow */ \
211 sigset_t __old_blocked, __new_blocked; \
213 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
214 sigprocmask(0, &__new_blocked, &__old_blocked); \
215 init_wait(&__wq_entry); \
217 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
219 prepare_to_wait_event(&wq_head, \
221 TASK_INTERRUPTIBLE); \
225 /* We have to do this here because some signals */ \
226 /* are not blockable - ie from strace(1). */ \
227 /* In these cases we want to schedule_timeout() */ \
228 /* again, because we don't want that to return */ \
229 /* -EINTR when the RPC actually succeeded. */ \
230 /* the recalc_sigpending() below will deliver the */ \
231 /* signal properly. */ \
232 if (signal_pending(current)) { \
233 spin_lock_irqsave(¤t->sighand->siglock, \
235 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
236 spin_unlock_irqrestore(¤t->sighand->siglock,\
241 finish_wait(&wq_head, &__wq_entry); \
242 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
246 #define wait_event_idle(wq_head, condition) \
250 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
253 #define wait_event_idle_exclusive(wq_head, condition) \
257 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
260 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
261 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
263 __ret = schedule_timeout(__ret))
265 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
267 long __ret = timeout; \
269 if (!___wait_cond_timeout1(condition)) \
270 __ret = __wait_event_idle_exclusive_timeout( \
271 wq_head, condition, timeout); \
275 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
276 timeout, cmd1, cmd2) \
277 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
279 cmd1; __ret = schedule_timeout(__ret); cmd2)
281 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
284 long __ret = timeout; \
285 if (!___wait_cond_timeout1(condition)) \
286 __ret = __wait_event_idle_exclusive_timeout_cmd( \
287 wq_head, condition, timeout, cmd1, cmd2); \
291 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
292 ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition), \
294 __ret = schedule_timeout(__ret))
296 #define wait_event_idle_timeout(wq_head, condition, timeout) \
298 long __ret = timeout; \
300 if (!___wait_cond_timeout1(condition)) \
301 __ret = __wait_event_idle_timeout(wq_head, condition, \
306 #else /* TASK_IDLE */
307 #ifndef wait_event_idle
309 * wait_event_idle - wait for a condition without contributing to system load
310 * @wq_head: the waitqueue to wait on
311 * @condition: a C expression for the event to wait for
313 * The process is put to sleep (TASK_IDLE) until the
314 * @condition evaluates to true.
315 * The @condition is checked each time the waitqueue @wq_head is woken up.
317 * wake_up() has to be called after changing any variable that could
318 * change the result of the wait condition.
321 #define wait_event_idle(wq_head, condition) \
325 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0, \
329 #ifndef wait_event_idle_exclusive
331 * wait_event_idle_exclusive - wait for a condition without contributing to
333 * @wq_head: the waitqueue to wait on
334 * @condition: a C expression for the event to wait for
336 * The process is put to sleep (TASK_IDLE) until the
337 * @condition evaluates to true.
338 * The @condition is checked each time the waitqueue @wq_head is woken up.
340 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
341 * set thus if other processes wait on the same list, when this
342 * process is woken further processes are not considered.
344 * wake_up() has to be called after changing any variable that could
345 * change the result of the wait condition.
348 #define wait_event_idle_exclusive(wq_head, condition) \
352 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0, \
356 #ifndef wait_event_idle_exclusive_timeout
358 * wait_event_idle_exclusive_timeout - sleep without load until a condition
359 * becomes true or a timeout elapses
360 * @wq_head: the waitqueue to wait on
361 * @condition: a C expression for the event to wait for
362 * @timeout: timeout, in jiffies
364 * The process is put to sleep (TASK_IDLE) until the
365 * @condition evaluates to true. The @condition is checked each time
366 * the waitqueue @wq_head is woken up.
368 * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
369 * set thus if other processes wait on the same list, when this
370 * process is woken further processes are not considered.
372 * wake_up() has to be called after changing any variable that could
373 * change the result of the wait condition.
376 * 0 if the @condition evaluated to %false after the @timeout elapsed,
377 * 1 if the @condition evaluated to %true after the @timeout elapsed,
378 * or the remaining jiffies (at least 1) if the @condition evaluated
379 * to %true before the @timeout elapsed.
381 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout) \
383 long __ret = timeout; \
385 if (!___wait_cond_timeout1(condition)) \
386 __ret = __wait_event_idle_exclusive_timeout(wq_head, \
392 #ifndef wait_event_idle_exclusive_timeout_cmd
393 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition, \
394 timeout, cmd1, cmd2) \
395 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
396 TASK_IDLE, 1, timeout, \
397 cmd1; __ret = schedule_timeout(__ret); cmd2)
399 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
402 long __ret = timeout; \
403 if (!___wait_cond_timeout1(condition)) \
404 __ret = __wait_event_idle_exclusive_timeout_cmd( \
405 wq_head, condition, timeout, cmd1, cmd2); \
410 #ifndef wait_event_idle_timeout
412 #define __wait_event_idle_timeout(wq_head, condition, timeout) \
413 ___wait_event(wq_head, ___wait_cond_timeout1(condition), \
414 TASK_IDLE, 0, timeout, \
415 __ret = schedule_timeout(__ret))
418 * wait_event_idle_timeout - sleep without load until a condition becomes
419 * true or a timeout elapses
420 * @wq_head: the waitqueue to wait on
421 * @condition: a C expression for the event to wait for
422 * @timeout: timeout, in jiffies
424 * The process is put to sleep (TASK_IDLE) until the
425 * @condition evaluates to true. The @condition is checked each time
426 * the waitqueue @wq_head is woken up.
428 * wake_up() has to be called after changing any variable that could
429 * change the result of the wait condition.
432 * 0 if the @condition evaluated to %false after the @timeout elapsed,
433 * 1 if the @condition evaluated to %true after the @timeout elapsed,
434 * or the remaining jiffies (at least 1) if the @condition evaluated
435 * to %true before the @timeout elapsed.
437 #define wait_event_idle_timeout(wq_head, condition, timeout) \
439 long __ret = timeout; \
441 if (!___wait_cond_timeout1(condition)) \
442 __ret = __wait_event_idle_timeout(wq_head, condition, \
447 #endif /* TASK_IDLE */
449 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
452 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
454 wait_queue_entry_t __wq_entry; \
455 long __ret = ret; /* explicit shadow */ \
457 init_wait(&__wq_entry); \
458 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
460 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
461 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
468 finish_wait(&wq_head, &__wq_entry); \
472 #define ___wait_event_lifo(wq_head, condition, ret, cmd) \
474 wait_queue_entry_t __wq_entry; \
475 unsigned long flags; \
476 long __ret = ret; /* explicit shadow */ \
477 sigset_t __old_blocked, __new_blocked; \
479 siginitset(&__new_blocked, LUSTRE_FATAL_SIGS); \
480 sigprocmask(0, &__new_blocked, &__old_blocked); \
481 init_wait(&__wq_entry); \
482 __wq_entry.flags = WQ_FLAG_EXCLUSIVE; \
484 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry); \
485 prepare_to_wait_event(&wq_head, &__wq_entry, \
486 TASK_INTERRUPTIBLE); \
490 /* See justification in ___wait_event_idle */ \
491 if (signal_pending(current)) { \
492 spin_lock_irqsave(¤t->sighand->siglock, \
494 clear_tsk_thread_flag(current, TIF_SIGPENDING); \
495 spin_unlock_irqrestore(¤t->sighand->siglock,\
500 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
501 finish_wait(&wq_head, &__wq_entry); \
506 #define wait_event_idle_exclusive_lifo(wq_head, condition) \
510 ___wait_event_lifo(wq_head, condition, 0, schedule()); \
513 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout) \
514 ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition), \
516 __ret = schedule_timeout(__ret))
518 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
520 long __ret = timeout; \
522 if (!___wait_cond_timeout1(condition)) \
523 __ret = __wait_event_idle_lifo_timeout(wq_head, \
529 /* l_wait_event_abortable() is a bit like wait_event_killable()
530 * except there is a fixed set of signals which will abort:
533 #define LUSTRE_FATAL_SIGS \
534 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
535 sigmask(SIGQUIT) | sigmask(SIGALRM))
537 #define l_wait_event_abortable(wq, condition) \
539 sigset_t __new_blocked, __old_blocked; \
541 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
542 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
543 __ret = wait_event_interruptible(wq, condition); \
544 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
548 #define l_wait_event_abortable_timeout(wq, condition, timeout) \
550 sigset_t __new_blocked, __old_blocked; \
552 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
553 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
554 __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
555 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
559 #define l_wait_event_abortable_exclusive(wq, condition) \
561 sigset_t __new_blocked, __old_blocked; \
563 siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS); \
564 sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked); \
565 __ret = wait_event_interruptible_exclusive(wq, condition); \
566 sigprocmask(SIG_SETMASK, &__old_blocked, NULL); \
570 #endif /* __LICBFS_LINUX_WAIT_BIT_H */