Whamcloud - gitweb
LU-19098 hsm: don't print progname twice with lhsmtool
[fs/lustre-release.git] / libcfs / include / libcfs / linux / linux-wait.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #ifndef __LIBCFS_LINUX_WAIT_BIT_H
4 #define __LIBCFS_LINUX_WAIT_BIT_H
5
6 /* Make sure we can see if we have TASK_NOLOAD */
7 #include <linux/sched.h>
8 /*
9  * Linux wait-bit related types and methods:
10  */
11 #ifdef HAVE_WAIT_BIT_HEADER_H
12 #include <linux/wait_bit.h>
13 #endif
14 #include <linux/wait.h>
15
16 #ifndef HAVE_WAIT_QUEUE_ENTRY
17 #define wait_queue_entry_t wait_queue_t
18 #endif
19
20 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
21 #define __add_wait_queue_entry_tail __add_wait_queue_tail
22 #endif
23
24 #ifndef HAVE_WAIT_BIT_HEADER_H
25 struct wait_bit_queue_entry {
26         struct wait_bit_key     key;
27         wait_queue_entry_t      wq_entry;
28 };
29
30 #define ___wait_is_interruptible(state)                                         \
31         (!__builtin_constant_p(state) ||                                        \
32                 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE)          \
33
34 #endif /* ! HAVE_WAIT_BIT_HEADER_H */
35
36 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
37 extern long prepare_to_wait_event(wait_queue_head_t *wq_head,
38                                   wait_queue_entry_t *wq_entry, int state);
39 #endif
40
41 /* ___wait_cond_timeout changed number of args in v3.12-rc1-78-g35a2af94c7ce
42  * so let's define our own ___wait_cond_timeout1
43  */
44
45 #define ___wait_cond_timeout1(condition)                                \
46 ({                                                                      \
47         bool __cond = (condition);                                      \
48         if (__cond && !__ret)                                           \
49                 __ret = 1;                                              \
50         __cond || !__ret;                                               \
51 })
52
53 #ifndef HAVE_CLEAR_AND_WAKE_UP_BIT
54 /**
55  * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
56  *
57  * @bit: the bit of the word being waited on
58  * @word: the word being waited on, a kernel virtual address
59  *
60  * You can use this helper if bitflags are manipulated atomically rather than
61  * non-atomically under a lock.
62  */
63 static inline void clear_and_wake_up_bit(int bit, void *word)
64 {
65         clear_bit_unlock(bit, word);
66         /* See wake_up_bit() for which memory barrier you need to use. */
67         smp_mb__after_atomic();
68         wake_up_bit(word, bit);
69 }
70 #endif /* ! HAVE_CLEAR_AND_WAKE_UP_BIT */
71
72 #ifndef HAVE_WAIT_VAR_EVENT
73 extern void __init wait_bit_init(void);
74 extern void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry,
75                                 void *var, int flags);
76 extern void wake_up_var(void *var);
77 extern wait_queue_head_t *__var_waitqueue(void *p);
78
79 #define ___wait_var_event(var, condition, state, exclusive, ret, cmd)   \
80 ({                                                                      \
81         __label__ __out;                                                \
82         wait_queue_head_t *__wq_head = __var_waitqueue(var);            \
83         struct wait_bit_queue_entry __wbq_entry;                        \
84         long __ret = ret; /* explicit shadow */                         \
85                                                                         \
86         init_wait_var_entry(&__wbq_entry, var,                          \
87                             exclusive ? WQ_FLAG_EXCLUSIVE : 0);         \
88         for (;;) {                                                      \
89                 long __int = prepare_to_wait_event(__wq_head,           \
90                                                    &__wbq_entry.wq_entry, \
91                                                    state);              \
92                 if (condition)                                          \
93                         break;                                          \
94                                                                         \
95                 if (___wait_is_interruptible(state) && __int) {         \
96                         __ret = __int;                                  \
97                         goto __out;                                     \
98                 }                                                       \
99                                                                         \
100                 cmd;                                                    \
101         }                                                               \
102         finish_wait(__wq_head, &__wbq_entry.wq_entry);                  \
103 __out:  __ret;                                                          \
104 })
105
106 #define __wait_var_event(var, condition)                                \
107         ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0,   \
108                           schedule())
109
110 #define wait_var_event(var, condition)                                  \
111 do {                                                                    \
112         might_sleep();                                                  \
113         if (condition)                                                  \
114                 break;                                                  \
115         __wait_var_event(var, condition);                               \
116 } while (0)
117
118 #define __wait_var_event_killable(var, condition)                       \
119         ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0,          \
120                           schedule())
121
122 #define wait_var_event_killable(var, condition)                         \
123 ({                                                                      \
124         int __ret = 0;                                                  \
125         might_sleep();                                                  \
126         if (!(condition))                                               \
127                 __ret = __wait_var_event_killable(var, condition);      \
128         __ret;                                                          \
129 })
130
131 #define __wait_var_event_timeout(var, condition, timeout)               \
132         ___wait_var_event(var, ___wait_cond_timeout1(condition),        \
133                           TASK_UNINTERRUPTIBLE, 0, timeout,             \
134                           __ret = schedule_timeout(__ret))
135
136 #define wait_var_event_timeout(var, condition, timeout)                 \
137 ({                                                                      \
138         long __ret = timeout;                                           \
139         might_sleep();                                                  \
140         if (!___wait_cond_timeout1(condition))                          \
141                 __ret = __wait_var_event_timeout(var, condition, timeout); \
142         __ret;                                                          \
143 })
144 #else /* !HAVE_WAIT_VAR_EVENT */
145 /* linux-3.10.0-1062.el7 defines wait_var_event_timeout() using
146  * __wait_cond_timeout(), but doesn't define __wait_cond_timeout !!!
147  */
148 # ifndef __wait_cond_timeout
149 # define ___wait_cond_timeout(condition)                                \
150 ({                                                                      \
151         bool __cond = (condition);                                      \
152         if (__cond && !__ret)                                           \
153                 __ret = 1;                                              \
154         __cond || !__ret;                                               \
155 })
156 # endif /* __wait_cond_timeout */
157
158 #endif /* ! HAVE_WAIT_VAR_EVENT */
159
160 /*
161  * prepare_to_wait_event() does not support an exclusive
162  * lifo wait.
163  * However it will not relink the wait_queue_entry if
164  * it is already linked.  So we link to the head of the
165  * queue here, and it will stay there.
166  */
167 static inline void prepare_to_wait_exclusive_head(
168         wait_queue_head_t *waitq, wait_queue_entry_t *link)
169 {
170         unsigned long flags;
171
172         spin_lock_irqsave(&(waitq->lock), flags);
173 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
174         if (list_empty(&link->entry))
175 #else
176         if (list_empty(&link->task_list))
177 #endif
178                 __add_wait_queue_exclusive(waitq, link);
179         spin_unlock_irqrestore(&((waitq)->lock), flags);
180 }
181
182 #ifndef ___wait_event
183 /*
184  * The below macro ___wait_event() has an explicit shadow of the __ret
185  * variable when used from the wait_event_*() macros.
186  *
187  * This is so that both can use the ___wait_cond_timeout1() construct
188  * to wrap the condition.
189  *
190  * The type inconsistency of the wait_event_*() __ret variable is also
191  * on purpose; we use long where we can return timeout values and int
192  * otherwise.
193  */
194
195 #define ___wait_event(wq_head, condition, state, exclusive, ret, cmd)   \
196 ({                                                                      \
197         __label__ __out;                                                \
198         wait_queue_entry_t __wq_entry;                                  \
199         long __ret = ret;       /* explicit shadow */                   \
200                                                                         \
201         init_wait(&__wq_entry);                                         \
202         if (exclusive)                                                  \
203                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                   \
204         for (;;) {                                                      \
205                 long __int = prepare_to_wait_event(&wq_head,            \
206                                                   &__wq_entry, state);  \
207                                                                         \
208                 if (condition)                                          \
209                         break;                                          \
210                                                                         \
211                 if (___wait_is_interruptible(state) && __int) {         \
212                         __ret = __int;                                  \
213                         goto __out;                                     \
214                 }                                                       \
215                                                                         \
216                 cmd;                                                    \
217         }                                                               \
218         finish_wait(&wq_head, &__wq_entry);                             \
219 __out:  __ret;                                                          \
220 })
221 #endif
222
223 #ifndef TASK_NOLOAD
224
225 #define TASK_IDLE TASK_INTERRUPTIBLE
226
227 #define ___wait_event_idle(wq_head, condition, exclusive, ret, cmd)     \
228 ({                                                                      \
229         wait_queue_entry_t __wq_entry;                                  \
230         unsigned long flags;                                            \
231         long __ret = ret;       /* explicit shadow */                   \
232         sigset_t __old_blocked, __new_blocked;                          \
233                                                                         \
234         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
235         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
236         init_wait(&__wq_entry);                                         \
237         if (exclusive)                                                  \
238                 __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                   \
239         for (;;) {                                                      \
240                 prepare_to_wait_event(&wq_head,                         \
241                                    &__wq_entry,                         \
242                                    TASK_INTERRUPTIBLE);                 \
243                                                                         \
244                 if (condition)                                          \
245                         break;                                          \
246                 /* We have to do this here because some signals */      \
247                 /* are not blockable - ie from strace(1).       */      \
248                 /* In these cases we want to schedule_timeout() */      \
249                 /* again, because we don't want that to return  */      \
250                 /* -EINTR when the RPC actually succeeded.      */      \
251                 /* the recalc_sigpending() below will deliver the */    \
252                 /* signal properly.                             */      \
253                 if (signal_pending(current)) {                          \
254                         spin_lock_irqsave(&current->sighand->siglock,   \
255                                           flags);                       \
256                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
257                         spin_unlock_irqrestore(&current->sighand->siglock,\
258                                                flags);                  \
259                 }                                                       \
260                 cmd;                                                    \
261         }                                                               \
262         finish_wait(&wq_head, &__wq_entry);                             \
263         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
264         __ret;                                                          \
265 })
266
267 #define wait_event_idle(wq_head, condition)                             \
268 do {                                                                    \
269         might_sleep();                                                  \
270         if (!(condition))                                               \
271                 ___wait_event_idle(wq_head, condition, 0, 0, schedule());\
272 } while (0)
273
274 #define wait_event_idle_exclusive(wq_head, condition)                   \
275 do {                                                                    \
276         might_sleep();                                                  \
277         if (!(condition))                                               \
278                 ___wait_event_idle(wq_head, condition, 1, 0, schedule());\
279 } while (0)
280
281 #define __wait_event_idle_exclusive_timeout(wq_head, condition, timeout)\
282         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
283                            1, timeout,                                  \
284                            __ret = schedule_timeout(__ret))
285
286 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
287 ({                                                                      \
288         long __ret = timeout;                                           \
289         might_sleep();                                                  \
290         if (!___wait_cond_timeout1(condition))                          \
291                 __ret = __wait_event_idle_exclusive_timeout(            \
292                         wq_head, condition, timeout);                   \
293         __ret;                                                          \
294 })
295
296 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
297                                                 timeout, cmd1, cmd2)    \
298         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
299                            1, timeout,                                  \
300                            cmd1; __ret = schedule_timeout(__ret); cmd2)
301
302 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
303                                               cmd1, cmd2)               \
304 ({                                                                      \
305         long __ret = timeout;                                           \
306         if (!___wait_cond_timeout1(condition))                          \
307                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
308                         wq_head, condition, timeout, cmd1, cmd2);       \
309         __ret;                                                          \
310 })
311
312 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
313         ___wait_event_idle(wq_head, ___wait_cond_timeout1(condition),   \
314                            0, timeout,                                  \
315                            __ret = schedule_timeout(__ret))
316
317 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
318 ({                                                                      \
319         long __ret = timeout;                                           \
320         might_sleep();                                                  \
321         if (!___wait_cond_timeout1(condition))                          \
322                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
323                                                   timeout);             \
324         __ret;                                                          \
325 })
326
327 #else /* TASK_IDLE */
328 #ifndef wait_event_idle
329 /**
330  * wait_event_idle - wait for a condition without contributing to system load
331  * @wq_head: the waitqueue to wait on
332  * @condition: a C expression for the event to wait for
333  *
334  * The process is put to sleep (TASK_IDLE) until the
335  * @condition evaluates to true.
336  * The @condition is checked each time the waitqueue @wq_head is woken up.
337  *
338  * wake_up() has to be called after changing any variable that could
339  * change the result of the wait condition.
340  *
341  */
342 #define wait_event_idle(wq_head, condition)                             \
343 do {                                                                    \
344         might_sleep();                                                  \
345         if (!(condition))                                               \
346                 ___wait_event(wq_head, condition, TASK_IDLE, 0, 0,      \
347                               schedule());                              \
348 } while (0)
349 #endif
350 #ifndef wait_event_idle_exclusive
351 /**
352  * wait_event_idle_exclusive - wait for a condition without contributing to
353  *               system load
354  * @wq_head: the waitqueue to wait on
355  * @condition: a C expression for the event to wait for
356  *
357  * The process is put to sleep (TASK_IDLE) until the
358  * @condition evaluates to true.
359  * The @condition is checked each time the waitqueue @wq_head is woken up.
360  *
361  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
362  * set thus if other processes wait on the same list, when this
363  * process is woken further processes are not considered.
364  *
365  * wake_up() has to be called after changing any variable that could
366  * change the result of the wait condition.
367  *
368  */
369 #define wait_event_idle_exclusive(wq_head, condition)                   \
370 do {                                                                    \
371         might_sleep();                                                  \
372         if (!(condition))                                               \
373                 ___wait_event(wq_head, condition, TASK_IDLE, 1, 0,      \
374                               schedule());                              \
375 } while (0)
376 #endif
377 #ifndef wait_event_idle_exclusive_timeout
378 /**
379  * wait_event_idle_exclusive_timeout - sleep without load until a condition
380  *                       becomes true or a timeout elapses
381  * @wq_head: the waitqueue to wait on
382  * @condition: a C expression for the event to wait for
383  * @timeout: timeout, in jiffies
384  *
385  * The process is put to sleep (TASK_IDLE) until the
386  * @condition evaluates to true. The @condition is checked each time
387  * the waitqueue @wq_head is woken up.
388  *
389  * The process is put on the wait queue with an WQ_FLAG_EXCLUSIVE flag
390  * set thus if other processes wait on the same list, when this
391  * process is woken further processes are not considered.
392  *
393  * wake_up() has to be called after changing any variable that could
394  * change the result of the wait condition.
395  *
396  * Returns:
397  * 0 if the @condition evaluated to %false after the @timeout elapsed,
398  * 1 if the @condition evaluated to %true after the @timeout elapsed,
399  * or the remaining jiffies (at least 1) if the @condition evaluated
400  * to %true before the @timeout elapsed.
401  */
402 #define wait_event_idle_exclusive_timeout(wq_head, condition, timeout)  \
403 ({                                                                      \
404         long __ret = timeout;                                           \
405         might_sleep();                                                  \
406         if (!___wait_cond_timeout1(condition))                          \
407                 __ret = __wait_event_idle_exclusive_timeout(wq_head,    \
408                                                             condition,  \
409                                                             timeout);   \
410         __ret;                                                          \
411 })
412 #endif
413 #ifndef wait_event_idle_exclusive_timeout_cmd
414 #define __wait_event_idle_exclusive_timeout_cmd(wq_head, condition,     \
415                                                 timeout, cmd1, cmd2)    \
416         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
417                       TASK_IDLE, 1, timeout,                            \
418                       cmd1; __ret = schedule_timeout(__ret); cmd2)
419
420 #define wait_event_idle_exclusive_timeout_cmd(wq_head, condition, timeout,\
421                                               cmd1, cmd2)               \
422 ({                                                                      \
423         long __ret = timeout;                                           \
424         if (!___wait_cond_timeout1(condition))                          \
425                 __ret = __wait_event_idle_exclusive_timeout_cmd(        \
426                         wq_head, condition, timeout, cmd1, cmd2);       \
427         __ret;                                                          \
428 })
429 #endif
430
431 #ifndef wait_event_idle_timeout
432
433 #define __wait_event_idle_timeout(wq_head, condition, timeout)          \
434         ___wait_event(wq_head, ___wait_cond_timeout1(condition),        \
435                       TASK_IDLE, 0, timeout,                            \
436                       __ret = schedule_timeout(__ret))
437
438 /**
439  * wait_event_idle_timeout - sleep without load until a condition becomes
440  *                           true or a timeout elapses
441  * @wq_head: the waitqueue to wait on
442  * @condition: a C expression for the event to wait for
443  * @timeout: timeout, in jiffies
444  *
445  * The process is put to sleep (TASK_IDLE) until the
446  * @condition evaluates to true. The @condition is checked each time
447  * the waitqueue @wq_head is woken up.
448  *
449  * wake_up() has to be called after changing any variable that could
450  * change the result of the wait condition.
451  *
452  * Returns:
453  * 0 if the @condition evaluated to %false after the @timeout elapsed,
454  * 1 if the @condition evaluated to %true after the @timeout elapsed,
455  * or the remaining jiffies (at least 1) if the @condition evaluated
456  * to %true before the @timeout elapsed.
457  */
458 #define wait_event_idle_timeout(wq_head, condition, timeout)            \
459 ({                                                                      \
460         long __ret = timeout;                                           \
461         might_sleep();                                                  \
462         if (!___wait_cond_timeout1(condition))                          \
463                 __ret = __wait_event_idle_timeout(wq_head, condition,   \
464                                                   timeout);             \
465         __ret;                                                          \
466 })
467 #endif
468 #endif /* TASK_IDLE */
469
470 /* ___wait_event_lifo is used for lifo exclusive 'idle' waits */
471 #ifdef TASK_NOLOAD
472
473 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
474 ({                                                                      \
475         wait_queue_entry_t       __wq_entry;                            \
476         long __ret = ret;       /* explicit shadow */                   \
477                                                                         \
478         init_wait(&__wq_entry);                                         \
479         __wq_entry.flags =  WQ_FLAG_EXCLUSIVE;                          \
480         for (;;) {                                                      \
481                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
482                 prepare_to_wait_event(&wq_head, &__wq_entry, TASK_IDLE);\
483                                                                         \
484                 if (condition)                                          \
485                         break;                                          \
486                                                                         \
487                 cmd;                                                    \
488         }                                                               \
489         finish_wait(&wq_head, &__wq_entry);                             \
490         __ret;                                                          \
491 })
492 #else
493 #define ___wait_event_lifo(wq_head, condition, ret, cmd)                \
494 ({                                                                      \
495         wait_queue_entry_t __wq_entry;                                  \
496         unsigned long flags;                                            \
497         long __ret = ret;       /* explicit shadow */                   \
498         sigset_t __old_blocked, __new_blocked;                          \
499                                                                         \
500         siginitset(&__new_blocked, LUSTRE_FATAL_SIGS);                  \
501         sigprocmask(0, &__new_blocked, &__old_blocked);                 \
502         init_wait(&__wq_entry);                                         \
503         __wq_entry.flags = WQ_FLAG_EXCLUSIVE;                           \
504         for (;;) {                                                      \
505                 prepare_to_wait_exclusive_head(&wq_head, &__wq_entry);  \
506                 prepare_to_wait_event(&wq_head, &__wq_entry,            \
507                                       TASK_INTERRUPTIBLE);              \
508                                                                         \
509                 if (condition)                                          \
510                         break;                                          \
511                 /* See justification in ___wait_event_idle */           \
512                 if (signal_pending(current)) {                          \
513                         spin_lock_irqsave(&current->sighand->siglock,   \
514                                           flags);                       \
515                         clear_tsk_thread_flag(current, TIF_SIGPENDING); \
516                         spin_unlock_irqrestore(&current->sighand->siglock,\
517                                                flags);                  \
518                 }                                                       \
519                 cmd;                                                    \
520         }                                                               \
521         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
522         finish_wait(&wq_head, &__wq_entry);                             \
523         __ret;                                                          \
524 })
525 #endif
526
527 #define wait_event_idle_exclusive_lifo(wq_head, condition)              \
528 do {                                                                    \
529         might_sleep();                                                  \
530         if (!(condition))                                               \
531                 ___wait_event_lifo(wq_head, condition, 0, schedule());  \
532 } while (0)
533
534 #define __wait_event_idle_lifo_timeout(wq_head, condition, timeout)     \
535         ___wait_event_lifo(wq_head, ___wait_cond_timeout1(condition),   \
536                            timeout,                                     \
537                            __ret = schedule_timeout(__ret))
538
539 #define wait_event_idle_exclusive_lifo_timeout(wq_head, condition, timeout)\
540 ({                                                                      \
541         long __ret = timeout;                                           \
542         might_sleep();                                                  \
543         if (!___wait_cond_timeout1(condition))                          \
544                 __ret = __wait_event_idle_lifo_timeout(wq_head,         \
545                                                        condition,       \
546                                                        timeout);        \
547         __ret;                                                          \
548 })
549
550 /* l_wait_event_abortable() is a bit like wait_event_killable()
551  * except there is a fixed set of signals which will abort:
552  * LUSTRE_FATAL_SIGS
553  */
554 #define LUSTRE_FATAL_SIGS                                        \
555         (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
556          sigmask(SIGQUIT) | sigmask(SIGALRM))
557
558 #define l_wait_event_abortable(wq, condition)                           \
559 ({                                                                      \
560         sigset_t __new_blocked, __old_blocked;                          \
561         int __ret = 0;                                                  \
562         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
563         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
564         __ret = wait_event_interruptible(wq, condition);                \
565         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
566         __ret;                                                          \
567 })
568
569 #define l_wait_event_abortable_exclusive(wq, condition)                 \
570 ({                                                                      \
571         sigset_t __new_blocked, __old_blocked;                          \
572         int __ret = 0;                                                  \
573         siginitsetinv(&__new_blocked, LUSTRE_FATAL_SIGS);               \
574         sigprocmask(SIG_BLOCK, &__new_blocked, &__old_blocked);         \
575         __ret = wait_event_interruptible_exclusive(wq, condition);      \
576         sigprocmask(SIG_SETMASK, &__old_blocked, NULL);                 \
577         __ret;                                                          \
578 })
579
580 #ifndef HAVE_WAIT_WOKEN
581 #define WQ_FLAG_WOKEN           0x02
582 long wait_woken(wait_queue_entry_t *wait, unsigned int mode, long timeout);
583 int woken_wake_function(wait_queue_entry_t *wait, unsigned int mode,
584                         int sync, void *key);
585 #endif /* HAVE_WAIT_WOKEN */
586
587 #endif /* __LICBFS_LINUX_WAIT_BIT_H */