Whamcloud - gitweb
LU-17705 ptlrpc: replace synchronize_rcu() with rcu_barrier()
[fs/lustre-release.git] / libcfs / include / libcfs / linux / linux-wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
4
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
7 /*
8  * Linux wait-bit related types and methods:
9  */
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
12 #endif
13 #include <linux/wait.h>
14
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
17 #endif
18
19 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
20 #define __add_wait_queue_entry_tail __add_wait_queue_tail
21 #endif
22
23 #ifndef HAVE_WAIT_BIT_HEADER_H
24 struct wait_bit_queue_entry {
25         struct wait_bit_key     key;
26         wait_queue_entry_t      wq_entry;
27 };
28
29 #define ___wait_is_interruptible(state)                                         \
30         (!__builtin_constant_p(state) ||                                        \
31                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
32
33 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
34
35 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
36 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
37                                   wait_queue_entry_t *wq_entry, int state);
38 #endif
39
40 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
41  * so let's define our own ___wait_cond_timeout1
42  */
43
44 #define ___wait_cond_timeout1(condition)                                \
45 ({                                                                      \
46         bool __cond = (condition);                                      \
47         if (__cond && !__ret)                                           \
48                 __ret = 1;                                              \
49         __cond || !__ret;                                               \
50 })
51
52 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
53 /**
54  * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
55  *
56  * @bit: the bit of the word being waited on
57  * @word: the word being waited on, a kernel virtual address
58  *
59  * You can use this helper if bitflags are manipulated atomically rather than
60  * non-atomically under a lock.
61  */
62 static inline void clear_and_wake_up_bit(int bit, void *word)
63 {
64         clear_bit_unlock(bit, word);
65         /* See wake_up_bit() for which memory barrier you need to use. */
66         smp_mb__after_atomic();
67         wake_up_bit(word, bit);
68 }
69 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
70
71 #ifndef HAVE_WAIT_VAR_EVENT
72 extern void __init wait_bit_init(void);
73 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
74                                 void *var, int flags);
75 extern void wake_up_var(void *var);
76 extern wait_queue_head_t *__var_waitqueue(void *p);
77
78 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd)   \
79 ({                                                                      \
80         __label__ __out;                                                \
81         wait_queue_head_t *__wq_head = __var_waitqueue(var);            \
82         struct wait_bit_queue_entry __wbq_entry;                        \
83         long __ret = ret; /* explicit shadow */                         \
84                                                                         \
85         init_wait_var_entry(&__wbq_entry, var,                          \
86                             exclusive ? WQ_FLAG_EXCLUSIVE : 0);         \
87         for (;;) {                                                      \
88                 long __int = prepare_to_wait_event(__wq_head,           \
89                                                    &__wbq_entry.wq_entry, \
90                                                    state);              \
91                 if (condition)                                          \
92                         break;                                          \
93                                                                         \
94                 if (___wait_is_interruptible(state) && __int) {         \
95                         __ret = __int;                                  \
96                         goto __out;                                     \
97                 }                                                       \
98                                                                         \
99                 cmd;                                                    \
100         }                                                               \
101         finish_wait(__wq_head, &__wbq_entry.wq_entry);                  \
102 __out:  __ret;                                                          \
103 })
104
105 #define __wait_var_event(var, condition)                                \
106         ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0,   \
107                           schedule())
108
109 #define wait_var_event(var, condition)                                  \
110 do {                                                                    \
111         might_sleep();                                                  \
112         if (condition)                                                  \
113                 break;                                                  \
114         __wait_var_event(var, condition);                               \
115 } while (0)
116
117 #define __wait_var_event_killable(var, condition)                       \
118         ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0,          \
119                           schedule())
120
121 #define wait_var_event_killable(var, condition)                         \
122 ({                                                                      \
123         int __ret = 0;                                                  \
124         might_sleep();                                                  \
125         if (!(condition))                                               \
126                 __ret = __wait_var_event_killable(var, condition);      \
127         __ret;                                                          \
128 })
129
130 #define __wait_var_event_timeout(var, condition, timeout)               \
131         ___wait_var_event(var, ___wait_cond_timeout1(condition),        \
132                           TASK_UNINTERRUPTIBLE, 0, timeout,             \
133                           __ret = schedule_timeout(__ret))
134
135 #define wait_var_event_timeout(var, condition, timeout)                 \
136 ({                                                                      \
137         long __ret = timeout;                                           \
138         might_sleep();                                                  \
139         if (!___wait_cond_timeout1(condition))                          \
140                 __ret = __wait_var_event_timeout(var, condition, timeout); \
141         __ret;                                                          \
142 })
143 #else /* !HAVE_WAIT_VAR_EVENT */
144 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
145  * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
146  */
147 # ifndef __wait_cond_timeout
148 # define ___wait_cond_timeout(condition)                                \
149 ({                                                                      \
150         bool __cond = (condition);                                      \
151         if (__cond && !__ret)                                           \
152                 __ret = 1;                                              \
153         __cond || !__ret;                                               \
154 })
155 # endif /* __wait_cond_timeout */
156
157 #endif /* ! HAVE_WAIT_VAR_EVENT */
158
159 /*
160  * prepare_to_wait_event() does not support an exclusive
161  * lifo wait.
162  * However it will not relink the wait_queue_entry if
163  * it is already linked.  So we link to the head of the
164  * queue here, and it will stay there.
165  */
166 static inline void prepare_to_wait_exclusive_head(
167         wait_queue_head_t *waitq, wait_queue_entry_t *link)
168 {
169         unsigned long flags;
170
171         spin_lock_irqsave(&(waitq->lock), flags);
172 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
173         if (list_empty(&link->entry))
174 #else
175         if (list_empty(&link->task_list))
176 #endif
177                 __add_wait_queue_exclusive(waitq, link);
178         spin_unlock_irqrestore(&((waitq)->lock), flags);
179 }
180
181 #ifndef ___wait_event
182 /*
183  * The below macro ___wait_event() has an explicit shadow of the __ret
184  * variable when used from the wait_event_*() macros.
185  *
186  * This is so that both can use the ___wait_cond_timeout1() construct
187  * to wrap the condition.
188  *
189  * The type inconsistency of the wait_event_*() __ret variable is also
190  * on purpose; we use long where we can return timeout values and int
191  * otherwise.
192  */
193
194 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)   \
195 ({                                                                      \
196         __label__ __out;                                                \
197         wait_queue_entry_t __wq_entry;                                  \
198         long __ret = ret;       /* explicit shadow */                   \
199                                                                         \
200         init_wait(&__wq_entry);                                         \
201         if (exclusive)                                                  \
202                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                   \
203         for (;;) {                                                      \
204                 long __int = prepare_to_wait_event(&wq_head,            \
205                                                   &__wq_entry, state);  \
206                                                                         \
207                 if (condition)                                          \
208                         break;                                          \
209                                                                         \
210                 if (___wait_is_interruptible(state) && __int) {         \
211                         __ret = __int;                                  \
212                         goto __out;                                     \
213                 }                                                       \
214                                                                         \
215                 cmd;                                                    \
216         }                                                               \
217         finish_wait(&wq_head, &__wq_entry);                             \
218 __out:  __ret;                                                          \
219 })
220 #endif
221
222 #ifndef TASK_NOLOAD
223
224 #define TASK_IDLE TASK_INTERRUPTIBLE
225
226 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd)     \
227 ({                                                                      \
228         wait_queue_entry_t __wq_entry;                                  \
229         unsigned long flags;                                            \
230         long __ret = ret;       /* explicit shadow */                   \
231         sigset_t __old_blocked, __new_blocked;                          \
232                                                                         \
233         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
234         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
235         init_wait(&__wq_entry);                                         \
236         if (exclusive)                                                  \
237                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                   \
238         for (;;) {                                                      \
239                 prepare_to_wait_event(&wq_head,                         \
240                                    &__wq_entry,                         \
241                                    TASK_INTERRUPTIBLE);                 \
242                                                                         \
243                 if (condition)                                          \
244                         break;                                          \
245                 /* We have to do this here because some signals */      \
246                 /* are not blockable - ie from strace(1).       */      \
247                 /* In these cases we want to schedule_timeout() */      \
248                 /* again, because we don't want that to return  */      \
249                 /* -EINTR when the RPC actually succeeded.      */      \
250                 /* the recalc_sigpending() below will deliver the */    \
251                 /* signal properly.                             */      \
252                 if (signal_pending(current)) {                          \
253                         spin_lock_irqsave(&current->sighand->siglock,   \
254                                           flags);                       \
255                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
256                         spin_unlock_irqrestore(&current->sighand->siglock,\
257                                                flags);                  \
258                 }                                                       \
259                 cmd;                                                    \
260         }                                                               \
261         finish_wait(&wq_head, &__wq_entry);                             \
262         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
263         __ret;                                                          \
264 })
265
266 #define wait_event_idle(wq_head, condition)                             \
267 do {                                                                    \
268         might_sleep();                                                  \
269         if (!(condition))                                               \
270                 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
271 } while (0)
272
273 #define wait_event_idle_exclusive(wq_head, condition)                   \
274 do {                                                                    \
275         might_sleep();                                                  \
276         if (!(condition))                                               \
277                 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
278 } while (0)
279
280 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
281         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
282                            1, timeout,                                  \
283                            __ret = schedule_timeout(__ret))
284
285 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
286 ({                                                                      \
287         long __ret = timeout;                                           \
288         might_sleep();                                                  \
289         if (!___wait_cond_timeout1(condition))                          \
290                 __ret = __wait_event_idle_exclusive_timeout(            \
291                         wq_head, condition, timeout);                   \
292         __ret;                                                          \
293 })
294
295 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
296                                                 timeout, cmd1, cmd2)    \
297         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
298                            1, timeout,                                  \
299                            cmd1; __ret = schedule_timeout(__ret); cmd2)
300
301 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
302                                               cmd1, cmd2)               \
303 ({                                                                      \
304         long __ret = timeout;                                           \
305         if (!___wait_cond_timeout1(condition))                          \
306                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
307                         wq_head, condition, timeout, cmd1, cmd2);       \
308         __ret;                                                          \
309 })
310
311 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
312         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
313                            0, timeout,                                  \
314                            __ret = schedule_timeout(__ret))
315
316 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
317 ({                                                                      \
318         long __ret = timeout;                                           \
319         might_sleep();                                                  \
320         if (!___wait_cond_timeout1(condition))                          \
321                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
322                                                   timeout);             \
323         __ret;                                                          \
324 })
325
326 #else /* TASK_IDLE */
327 #ifndef wait_event_idle
328 /**
329  * wait_event_idle - wait for a condition without contributing to system load
330  * @wq_head: the waitqueue to wait on
331  * @condition: a C expression for the event to wait for
332  *
333  * The process is put to sleep (TASK_IDLE) until the
334  * @condition evaluates to true.
335  * The @condition is checked each time the waitqueue @wq_head is woken up.
336  *
337  * wake_up() has to be called after changing any variable that could
338  * change the result of the wait condition.
339  *
340  */
341 #define wait_event_idle(wq_head, condition)                             \
342 do {                                                                    \
343         might_sleep();                                                  \
344         if (!(condition))                                               \
345                 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0,      \
346                               schedule());                              \
347 } while (0)
348 #endif
349 #ifndef wait_event_idle_exclusive
350 /**
351  * wait_event_idle_exclusive - wait for a condition without contributing to
352  *               system load
353  * @wq_head: the waitqueue to wait on
354  * @condition: a C expression for the event to wait for
355  *
356  * The process is put to sleep (TASK_IDLE) until the
357  * @condition evaluates to true.
358  * The @condition is checked each time the waitqueue @wq_head is woken up.
359  *
360  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
361  * set thus if other processes wait on the same list, when this
362  * process is woken further processes are not considered.
363  *
364  * wake_up() has to be called after changing any variable that could
365  * change the result of the wait condition.
366  *
367  */
368 #define wait_event_idle_exclusive(wq_head, condition)                   \
369 do {                                                                    \
370         might_sleep();                                                  \
371         if (!(condition))                                               \
372                 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0,      \
373                               schedule());                              \
374 } while (0)
375 #endif
376 #ifndef wait_event_idle_exclusive_timeout
377 /**
378  * wait_event_idle_exclusive_timeout - sleep without load until a condition
379  *                       becomes true or a timeout elapses
380  * @wq_head: the waitqueue to wait on
381  * @condition: a C expression for the event to wait for
382  * @timeout: timeout, in jiffies
383  *
384  * The process is put to sleep (TASK_IDLE) until the
385  * @condition evaluates to true. The @condition is checked each time
386  * the waitqueue @wq_head is woken up.
387  *
388  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
389  * set thus if other processes wait on the same list, when this
390  * process is woken further processes are not considered.
391  *
392  * wake_up() has to be called after changing any variable that could
393  * change the result of the wait condition.
394  *
395  * Returns:
396  * 0 if the @condition evaluated to %false after the @timeout elapsed,
397  * 1 if the @condition evaluated to %true after the @timeout elapsed,
398  * or the remaining jiffies (at least 1) if the @condition evaluated
399  * to %true before the @timeout elapsed.
400  */
401 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
402 ({                                                                      \
403         long __ret = timeout;                                           \
404         might_sleep();                                                  \
405         if (!___wait_cond_timeout1(condition))                          \
406                 __ret = __wait_event_idle_exclusive_timeout(wq_head,    \
407                                                             condition,  \
408                                                             timeout);   \
409         __ret;                                                          \
410 })
411 #endif
412 #ifndef wait_event_idle_exclusive_timeout_cmd
413 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
414                                                 timeout, cmd1, cmd2)    \
415         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
416                       TASK_IDLE, 1, timeout,                            \
417                       cmd1; __ret = schedule_timeout(__ret); cmd2)
418
419 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
420                                               cmd1, cmd2)               \
421 ({                                                                      \
422         long __ret = timeout;                                           \
423         if (!___wait_cond_timeout1(condition))                          \
424                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
425                         wq_head, condition, timeout, cmd1, cmd2);       \
426         __ret;                                                          \
427 })
428 #endif
429
430 #ifndef wait_event_idle_timeout
431
432 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
433         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
434                       TASK_IDLE, 0, timeout,                            \
435                       __ret = schedule_timeout(__ret))
436
437 /**
438  * wait_event_idle_timeout - sleep without load until a condition becomes
439  *                           true or a timeout elapses
440  * @wq_head: the waitqueue to wait on
441  * @condition: a C expression for the event to wait for
442  * @timeout: timeout, in jiffies
443  *
444  * The process is put to sleep (TASK_IDLE) until the
445  * @condition evaluates to true. The @condition is checked each time
446  * the waitqueue @wq_head is woken up.
447  *
448  * wake_up() has to be called after changing any variable that could
449  * change the result of the wait condition.
450  *
451  * Returns:
452  * 0 if the @condition evaluated to %false after the @timeout elapsed,
453  * 1 if the @condition evaluated to %true after the @timeout elapsed,
454  * or the remaining jiffies (at least 1) if the @condition evaluated
455  * to %true before the @timeout elapsed.
456  */
457 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
458 ({                                                                      \
459         long __ret = timeout;                                           \
460         might_sleep();                                                  \
461         if (!___wait_cond_timeout1(condition))                          \
462                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
463                                                   timeout);             \
464         __ret;                                                          \
465 })
466 #endif
467 #endif /* TASK_IDLE */
468
469 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
470 #ifdef TASK_NOLOAD
471
472 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
473 ({                                                                      \
474         wait_queue_entry_t       __wq_entry;                            \
475         long __ret = ret;       /* explicit shadow */                   \
476                                                                         \
477         init_wait(&__wq_entry);                                         \
478         __wq_entry.flags =  WQ_FLAG_EXCLUSIVE;                          \
479         for (;;) {                                                      \
480                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
481                 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
482                                                                         \
483                 if (condition)                                          \
484                         break;                                          \
485                                                                         \
486                 cmd;                                                    \
487         }                                                               \
488         finish_wait(&wq_head, &__wq_entry);                             \
489         __ret;                                                          \
490 })
491 #else
492 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
493 ({                                                                      \
494         wait_queue_entry_t __wq_entry;                                  \
495         unsigned long flags;                                            \
496         long __ret = ret;       /* explicit shadow */                   \
497         sigset_t __old_blocked, __new_blocked;                          \
498                                                                         \
499         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
500         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
501         init_wait(&__wq_entry);                                         \
502         __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                           \
503         for (;;) {                                                      \
504                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
505                 prepare_to_wait_event(&wq_head, &__wq_entry,            \
506                                       TASK_INTERRUPTIBLE);              \
507                                                                         \
508                 if (condition)                                          \
509                         break;                                          \
510                 /* See justification in ___wait_event_idle */           \
511                 if (signal_pending(current)) {                          \
512                         spin_lock_irqsave(&current->sighand->siglock,   \
513                                           flags);                       \
514                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
515                         spin_unlock_irqrestore(&current->sighand->siglock,\
516                                                flags);                  \
517                 }                                                       \
518                 cmd;                                                    \
519         }                                                               \
520         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
521         finish_wait(&wq_head, &__wq_entry);                             \
522         __ret;                                                          \
523 })
524 #endif
525
526 #define wait_event_idle_exclusive_lifo(wq_head, condition)              \
527 do {                                                                    \
528         might_sleep();                                                  \
529         if (!(condition))                                               \
530                 ___wait_event_lifo(wq_head, condition, 0, schedule());  \
531 } while (0)
532
533 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout)     \
534         ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition),   \
535                            timeout,                                     \
536                            __ret = schedule_timeout(__ret))
537
538 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
539 ({                                                                      \
540         long __ret = timeout;                                           \
541         might_sleep();                                                  \
542         if (!___wait_cond_timeout1(condition))                          \
543                 __ret = __wait_event_idle_lifo_timeout(wq_head,         \
544                                                        condition,       \
545                                                        timeout);        \
546         __ret;                                                          \
547 })
548
549 /* l_wait_event_abortable() is a bit like wait_event_killable()
550  * except there is a fixed set of signals which will abort:
551  * LUSTRE_FATAL_SIGS
552  */
553 #define LUSTRE_FATAL_SIGS                                        \
554         (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
555          sigmask(SIGQUIT) | sigmask(SIGALRM))
556
557 #define l_wait_event_abortable(wq, condition)                           \
558 ({                                                                      \
559         sigset_t __new_blocked, __old_blocked;                          \
560         int __ret = 0;                                                  \
561         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
562         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
563         __ret = wait_event_interruptible(wq, condition);                \
564         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
565         __ret;                                                          \
566 })
567
568 #define l_wait_event_abortable_timeout(wq, condition, timeout)          \
569 ({                                                                      \
570         sigset_t __new_blocked, __old_blocked;                          \
571         int __ret = 0;                                                  \
572         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
573         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
574         __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
575         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
576         __ret;                                                          \
577 })
578
579 #define l_wait_event_abortable_exclusive(wq, condition)                 \
580 ({                                                                      \
581         sigset_t __new_blocked, __old_blocked;                          \
582         int __ret = 0;                                                  \
583         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
584         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
585         __ret = wait_event_interruptible_exclusive(wq, condition);      \
586         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
587         __ret;                                                          \
588 })
589
590 #ifndef HAVE_WAIT_WOKEN
591 #define WQ_FLAG_WOKEN           0x02
592 long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
593 int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
594                         int sync, void *key);
595 #endif /* HAVE_WAIT_WOKEN */
596
597 #endif /* __LICBFS_LINUX_WAIT_BIT_H */