Whamcloud - gitweb
LU-12362 ptlrpc: use wait_woken() in ptlrpcd()
[fs/lustre-release.git] / libcfs / include / libcfs / linux / linux-wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
3 #define __LIBCFS_LINUX_WAIT_BIT_H
4
5 /* Make sure we can see if we have TASK_NOLOAD */
6 #include <linux/sched.h>
7 /*
8  * Linux wait-bit related types and methods:
9  */
10 #ifdef HAVE_WAIT_BIT_HEADER_H
11 #include <linux/wait_bit.h>
12 #endif
13 #include <linux/wait.h>
14
15 #ifndef HAVE_WAIT_QUEUE_ENTRY
16 #define wait_queue_entry_t wait_queue_t
17 #endif
18
19 #ifndef HAVE_WAIT_BIT_HEADER_H
20 struct wait_bit_queue_entry {
21         struct wait_bit_key     key;
22         wait_queue_entry_t      wq_entry;
23 };
24
25 #define ___wait_is_interruptible(state)                                         \
26         (!__builtin_constant_p(state) ||                                        \
27                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
28
29 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
30
31 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
32 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
33                                   wait_queue_entry_t *wq_entry, int state);
34 #endif
35
36 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
37  * so let's define our own ___wait_cond_timeout1
38  */
39
40 #define ___wait_cond_timeout1(condition)                                \
41 ({                                                                      \
42         bool __cond = (condition);                                      \
43         if (__cond && !__ret)                                           \
44                 __ret = 1;                                              \
45         __cond || !__ret;                                               \
46 })
47
48 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
49 /**
50  * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
51  *
52  * @bit: the bit of the word being waited on
53  * @word: the word being waited on, a kernel virtual address
54  *
55  * You can use this helper if bitflags are manipulated atomically rather than
56  * non-atomically under a lock.
57  */
58 static inline void clear_and_wake_up_bit(int bit, void *word)
59 {
60         clear_bit_unlock(bit, word);
61         /* See wake_up_bit() for which memory barrier you need to use. */
62         smp_mb__after_atomic();
63         wake_up_bit(word, bit);
64 }
65 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
66
67 #ifndef HAVE_WAIT_VAR_EVENT
68 extern void __init wait_bit_init(void);
69 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
70                                 void *var, int flags);
71 extern void wake_up_var(void *var);
72 extern wait_queue_head_t *__var_waitqueue(void *p);
73
74 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd)   \
75 ({                                                                      \
76         __label__ __out;                                                \
77         wait_queue_head_t *__wq_head = __var_waitqueue(var);            \
78         struct wait_bit_queue_entry __wbq_entry;                        \
79         long __ret = ret; /* explicit shadow */                         \
80                                                                         \
81         init_wait_var_entry(&__wbq_entry, var,                          \
82                             exclusive ? WQ_FLAG_EXCLUSIVE : 0);         \
83         for (;;) {                                                      \
84                 long __int = prepare_to_wait_event(__wq_head,           \
85                                                    &__wbq_entry.wq_entry, \
86                                                    state);              \
87                 if (condition)                                          \
88                         break;                                          \
89                                                                         \
90                 if (___wait_is_interruptible(state) && __int) {         \
91                         __ret = __int;                                  \
92                         goto __out;                                     \
93                 }                                                       \
94                                                                         \
95                 cmd;                                                    \
96         }                                                               \
97         finish_wait(__wq_head, &__wbq_entry.wq_entry);                  \
98 __out:  __ret;                                                          \
99 })
100
101 #define __wait_var_event(var, condition)                                \
102         ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0,   \
103                           schedule())
104
105 #define wait_var_event(var, condition)                                  \
106 do {                                                                    \
107         might_sleep();                                                  \
108         if (condition)                                                  \
109                 break;                                                  \
110         __wait_var_event(var, condition);                               \
111 } while (0)
112
113 #define __wait_var_event_killable(var, condition)                       \
114         ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0,          \
115                           schedule())
116
117 #define wait_var_event_killable(var, condition)                         \
118 ({                                                                      \
119         int __ret = 0;                                                  \
120         might_sleep();                                                  \
121         if (!(condition))                                               \
122                 __ret = __wait_var_event_killable(var, condition);      \
123         __ret;                                                          \
124 })
125
126 #define __wait_var_event_timeout(var, condition, timeout)               \
127         ___wait_var_event(var, ___wait_cond_timeout1(condition),        \
128                           TASK_UNINTERRUPTIBLE, 0, timeout,             \
129                           __ret = schedule_timeout(__ret))
130
131 #define wait_var_event_timeout(var, condition, timeout)                 \
132 ({                                                                      \
133         long __ret = timeout;                                           \
134         might_sleep();                                                  \
135         if (!___wait_cond_timeout1(condition))                          \
136                 __ret = __wait_var_event_timeout(var, condition, timeout); \
137         __ret;                                                          \
138 })
139 #else /* !HAVE_WAIT_VAR_EVENT */
140 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
141  * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
142  */
143 # ifndef __wait_cond_timeout
144 # define ___wait_cond_timeout(condition)                                \
145 ({                                                                      \
146         bool __cond = (condition);                                      \
147         if (__cond && !__ret)                                           \
148                 __ret = 1;                                              \
149         __cond || !__ret;                                               \
150 })
151 # endif /* __wait_cond_timeout */
152
153 #endif /* ! HAVE_WAIT_VAR_EVENT */
154
155 /*
156  * prepare_to_wait_event() does not support an exclusive
157  * lifo wait.
158  * However it will not relink the wait_queue_entry if
159  * it is already linked.  So we link to the head of the
160  * queue here, and it will stay there.
161  */
162 static inline void prepare_to_wait_exclusive_head(
163         wait_queue_head_t *waitq, wait_queue_entry_t *link)
164 {
165         unsigned long flags;
166
167         spin_lock_irqsave(&(waitq->lock), flags);
168 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
169         if (list_empty(&link->entry))
170 #else
171         if (list_empty(&link->task_list))
172 #endif
173                 __add_wait_queue_exclusive(waitq, link);
174         spin_unlock_irqrestore(&((waitq)->lock), flags);
175 }
176
177 #ifndef ___wait_event
178 /*
179  * The below macro ___wait_event() has an explicit shadow of the __ret
180  * variable when used from the wait_event_*() macros.
181  *
182  * This is so that both can use the ___wait_cond_timeout1() construct
183  * to wrap the condition.
184  *
185  * The type inconsistency of the wait_event_*() __ret variable is also
186  * on purpose; we use long where we can return timeout values and int
187  * otherwise.
188  */
189
190 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)   \
191 ({                                                                      \
192         __label__ __out;                                                \
193         wait_queue_entry_ __wq_entry;                                   \
194         long __ret = ret;       /* explicit shadow */                   \
195                                                                         \
196         init_wait(&__wq_entry);                                         \
197         if (exclusive)                                                  \
198                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE                    \
199         for (;;) {                                                      \
200                 long __int = prepare_to_wait_event(&wq_head,            \
201                                                   &__wq_entry, state);  \
202                                                                         \
203                 if (condition)                                          \
204                         break;                                          \
205                                                                         \
206                 if (___wait_is_interruptible(state) && __int) {         \
207                         __ret = __int;                                  \
208                         goto __out;                                     \
209                 }                                                       \
210                                                                         \
211                 cmd;                                                    \
212         }                                                               \
213         finish_wait(&wq_head, &__wq_entry);                             \
214 __out:  __ret;                                                          \
215 })
216 #endif
217
218 #ifndef TASK_NOLOAD
219
220 #define TASK_IDLE TASK_INTERRUPTIBLE
221
222 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd)     \
223 ({                                                                      \
224         wait_queue_entry_t __wq_entry;                                  \
225         unsigned long flags;                                            \
226         long __ret = ret;       /* explicit shadow */                   \
227         sigset_t __old_blocked, __new_blocked;                          \
228                                                                         \
229         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
230         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
231         init_wait(&__wq_entry);                                         \
232         if (exclusive)                                                  \
233                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                   \
234         for (;;) {                                                      \
235                 prepare_to_wait_event(&wq_head,                         \
236                                    &__wq_entry,                         \
237                                    TASK_INTERRUPTIBLE);                 \
238                                                                         \
239                 if (condition)                                          \
240                         break;                                          \
241                 /* We have to do this here because some signals */      \
242                 /* are not blockable - ie from strace(1).       */      \
243                 /* In these cases we want to schedule_timeout() */      \
244                 /* again, because we don't want that to return  */      \
245                 /* -EINTR when the RPC actually succeeded.      */      \
246                 /* the recalc_sigpending() below will deliver the */    \
247                 /* signal properly.                             */      \
248                 if (signal_pending(current)) {                          \
249                         spin_lock_irqsave(&current->sighand->siglock,   \
250                                           flags);                       \
251                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
252                         spin_unlock_irqrestore(&current->sighand->siglock,\
253                                                flags);                  \
254                 }                                                       \
255                 cmd;                                                    \
256         }                                                               \
257         finish_wait(&wq_head, &__wq_entry);                             \
258         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
259         __ret;                                                          \
260 })
261
262 #define wait_event_idle(wq_head, condition)                             \
263 do {                                                                    \
264         might_sleep();                                                  \
265         if (!(condition))                                               \
266                 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
267 } while (0)
268
269 #define wait_event_idle_exclusive(wq_head, condition)                   \
270 do {                                                                    \
271         might_sleep();                                                  \
272         if (!(condition))                                               \
273                 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
274 } while (0)
275
276 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
277         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
278                            1, timeout,                                  \
279                            __ret = schedule_timeout(__ret))
280
281 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
282 ({                                                                      \
283         long __ret = timeout;                                           \
284         might_sleep();                                                  \
285         if (!___wait_cond_timeout1(condition))                          \
286                 __ret = __wait_event_idle_exclusive_timeout(            \
287                         wq_head, condition, timeout);                   \
288         __ret;                                                          \
289 })
290
291 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
292                                                 timeout, cmd1, cmd2)    \
293         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
294                            1, timeout,                                  \
295                            cmd1; __ret = schedule_timeout(__ret); cmd2)
296
297 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
298                                               cmd1, cmd2)               \
299 ({                                                                      \
300         long __ret = timeout;                                           \
301         if (!___wait_cond_timeout1(condition))                          \
302                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
303                         wq_head, condition, timeout, cmd1, cmd2);       \
304         __ret;                                                          \
305 })
306
307 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
308         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
309                            0, timeout,                                  \
310                            __ret = schedule_timeout(__ret))
311
312 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
313 ({                                                                      \
314         long __ret = timeout;                                           \
315         might_sleep();                                                  \
316         if (!___wait_cond_timeout1(condition))                          \
317                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
318                                                   timeout);             \
319         __ret;                                                          \
320 })
321
322 #else /* TASK_IDLE */
323 #ifndef wait_event_idle
324 /**
325  * wait_event_idle - wait for a condition without contributing to system load
326  * @wq_head: the waitqueue to wait on
327  * @condition: a C expression for the event to wait for
328  *
329  * The process is put to sleep (TASK_IDLE) until the
330  * @condition evaluates to true.
331  * The @condition is checked each time the waitqueue @wq_head is woken up.
332  *
333  * wake_up() has to be called after changing any variable that could
334  * change the result of the wait condition.
335  *
336  */
337 #define wait_event_idle(wq_head, condition)                             \
338 do {                                                                    \
339         might_sleep();                                                  \
340         if (!(condition))                                               \
341                 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0,      \
342                               schedule());                              \
343 } while (0)
344 #endif
345 #ifndef wait_event_idle_exclusive
346 /**
347  * wait_event_idle_exclusive - wait for a condition without contributing to
348  *               system load
349  * @wq_head: the waitqueue to wait on
350  * @condition: a C expression for the event to wait for
351  *
352  * The process is put to sleep (TASK_IDLE) until the
353  * @condition evaluates to true.
354  * The @condition is checked each time the waitqueue @wq_head is woken up.
355  *
356  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
357  * set thus if other processes wait on the same list, when this
358  * process is woken further processes are not considered.
359  *
360  * wake_up() has to be called after changing any variable that could
361  * change the result of the wait condition.
362  *
363  */
364 #define wait_event_idle_exclusive(wq_head, condition)                   \
365 do {                                                                    \
366         might_sleep();                                                  \
367         if (!(condition))                                               \
368                 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0,      \
369                               schedule());                              \
370 } while (0)
371 #endif
372 #ifndef wait_event_idle_exclusive_timeout
373 /**
374  * wait_event_idle_exclusive_timeout - sleep without load until a condition
375  *                       becomes true or a timeout elapses
376  * @wq_head: the waitqueue to wait on
377  * @condition: a C expression for the event to wait for
378  * @timeout: timeout, in jiffies
379  *
380  * The process is put to sleep (TASK_IDLE) until the
381  * @condition evaluates to true. The @condition is checked each time
382  * the waitqueue @wq_head is woken up.
383  *
384  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
385  * set thus if other processes wait on the same list, when this
386  * process is woken further processes are not considered.
387  *
388  * wake_up() has to be called after changing any variable that could
389  * change the result of the wait condition.
390  *
391  * Returns:
392  * 0 if the @condition evaluated to %false after the @timeout elapsed,
393  * 1 if the @condition evaluated to %true after the @timeout elapsed,
394  * or the remaining jiffies (at least 1) if the @condition evaluated
395  * to %true before the @timeout elapsed.
396  */
397 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
398 ({                                                                      \
399         long __ret = timeout;                                           \
400         might_sleep();                                                  \
401         if (!___wait_cond_timeout1(condition))                          \
402                 __ret = __wait_event_idle_exclusive_timeout(wq_head,    \
403                                                             condition,  \
404                                                             timeout);   \
405         __ret;                                                          \
406 })
407 #endif
408 #ifndef wait_event_idle_exclusive_timeout_cmd
409 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
410                                                 timeout, cmd1, cmd2)    \
411         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
412                       TASK_IDLE, 1, timeout,                            \
413                       cmd1; __ret = schedule_timeout(__ret); cmd2)
414
415 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
416                                               cmd1, cmd2)               \
417 ({                                                                      \
418         long __ret = timeout;                                           \
419         if (!___wait_cond_timeout1(condition))                          \
420                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
421                         wq_head, condition, timeout, cmd1, cmd2);       \
422         __ret;                                                          \
423 })
424 #endif
425
426 #ifndef wait_event_idle_timeout
427
428 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
429         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
430                       TASK_IDLE, 0, timeout,                            \
431                       __ret = schedule_timeout(__ret))
432
433 /**
434  * wait_event_idle_timeout - sleep without load until a condition becomes
435  *                           true or a timeout elapses
436  * @wq_head: the waitqueue to wait on
437  * @condition: a C expression for the event to wait for
438  * @timeout: timeout, in jiffies
439  *
440  * The process is put to sleep (TASK_IDLE) until the
441  * @condition evaluates to true. The @condition is checked each time
442  * the waitqueue @wq_head is woken up.
443  *
444  * wake_up() has to be called after changing any variable that could
445  * change the result of the wait condition.
446  *
447  * Returns:
448  * 0 if the @condition evaluated to %false after the @timeout elapsed,
449  * 1 if the @condition evaluated to %true after the @timeout elapsed,
450  * or the remaining jiffies (at least 1) if the @condition evaluated
451  * to %true before the @timeout elapsed.
452  */
453 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
454 ({                                                                      \
455         long __ret = timeout;                                           \
456         might_sleep();                                                  \
457         if (!___wait_cond_timeout1(condition))                          \
458                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
459                                                   timeout);             \
460         __ret;                                                          \
461 })
462 #endif
463 #endif /* TASK_IDLE */
464
465 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
466 #ifdef TASK_NOLOAD
467
468 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
469 ({                                                                      \
470         wait_queue_entry_t       __wq_entry;                            \
471         long __ret = ret;       /* explicit shadow */                   \
472                                                                         \
473         init_wait(&__wq_entry);                                         \
474         __wq_entry.flags =  WQ_FLAG_EXCLUSIVE;                          \
475         for (;;) {                                                      \
476                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
477                 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
478                                                                         \
479                 if (condition)                                          \
480                         break;                                          \
481                                                                         \
482                 cmd;                                                    \
483         }                                                               \
484         finish_wait(&wq_head, &__wq_entry);                             \
485         __ret;                                                          \
486 })
487 #else
488 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
489 ({                                                                      \
490         wait_queue_entry_t __wq_entry;                                  \
491         unsigned long flags;                                            \
492         long __ret = ret;       /* explicit shadow */                   \
493         sigset_t __old_blocked, __new_blocked;                          \
494                                                                         \
495         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
496         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
497         init_wait(&__wq_entry);                                         \
498         __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                           \
499         for (;;) {                                                      \
500                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
501                 prepare_to_wait_event(&wq_head, &__wq_entry,            \
502                                       TASK_INTERRUPTIBLE);              \
503                                                                         \
504                 if (condition)                                          \
505                         break;                                          \
506                 /* See justification in ___wait_event_idle */           \
507                 if (signal_pending(current)) {                          \
508                         spin_lock_irqsave(&current->sighand->siglock,   \
509                                           flags);                       \
510                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
511                         spin_unlock_irqrestore(&current->sighand->siglock,\
512                                                flags);                  \
513                 }                                                       \
514                 cmd;                                                    \
515         }                                                               \
516         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
517         finish_wait(&wq_head, &__wq_entry);                             \
518         __ret;                                                          \
519 })
520 #endif
521
522 #define wait_event_idle_exclusive_lifo(wq_head, condition)              \
523 do {                                                                    \
524         might_sleep();                                                  \
525         if (!(condition))                                               \
526                 ___wait_event_lifo(wq_head, condition, 0, schedule());  \
527 } while (0)
528
529 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout)     \
530         ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition),   \
531                            timeout,                                     \
532                            __ret = schedule_timeout(__ret))
533
534 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
535 ({                                                                      \
536         long __ret = timeout;                                           \
537         might_sleep();                                                  \
538         if (!___wait_cond_timeout1(condition))                          \
539                 __ret = __wait_event_idle_lifo_timeout(wq_head,         \
540                                                        condition,       \
541                                                        timeout);        \
542         __ret;                                                          \
543 })
544
545 /* l_wait_event_abortable() is a bit like wait_event_killable()
546  * except there is a fixed set of signals which will abort:
547  * LUSTRE_FATAL_SIGS
548  */
549 #define LUSTRE_FATAL_SIGS                                        \
550         (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
551          sigmask(SIGQUIT) | sigmask(SIGALRM))
552
553 #define l_wait_event_abortable(wq, condition)                           \
554 ({                                                                      \
555         sigset_t __new_blocked, __old_blocked;                          \
556         int __ret = 0;                                                  \
557         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
558         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
559         __ret = wait_event_interruptible(wq, condition);                \
560         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
561         __ret;                                                          \
562 })
563
564 #define l_wait_event_abortable_timeout(wq, condition, timeout)          \
565 ({                                                                      \
566         sigset_t __new_blocked, __old_blocked;                          \
567         int __ret = 0;                                                  \
568         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
569         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
570         __ret = wait_event_interruptible_timeout(wq, condition, timeout);\
571         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
572         __ret;                                                          \
573 })
574
575 #define l_wait_event_abortable_exclusive(wq, condition)                 \
576 ({                                                                      \
577         sigset_t __new_blocked, __old_blocked;                          \
578         int __ret = 0;                                                  \
579         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
580         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
581         __ret = wait_event_interruptible_exclusive(wq, condition);      \
582         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
583         __ret;                                                          \
584 })
585
586 #ifndef HAVE_WAIT_WOKEN
587 #define WQ_FLAG_WOKEN           0x02
588 long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
589 int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
590                         int sync, void *key);
591 #endif /* HAVE_WAIT_WOKEN */
592
593 #endif /* __LICBFS_LINUX_WAIT_BIT_H */