Whamcloud - gitweb
dd08cacf2d34ef52f3e18c884407e3845cdaa60d
[fs/lustre-release.git] / libcfs / libcfs / linux / linux-wait.c
1 /*
2  * The implementation of the wait_bit*() and related waiting APIs:
3  */
4 #include <linux/hash.h>
5 #include <linux/sched.h>
6 #ifdef HAVE_SCHED_HEADERS
7 #include <linux/sched/signal.h>
8 #endif
9 #include <libcfs/linux/linux-wait.h>
10
11 #ifndef HAVE_PREPARE_TO_WAIT_EVENT
12
13 #define __add_wait_queue_entry_tail __add_wait_queue_tail
14
15 long prepare_to_wait_event(wait_queue_head_t *wq_head,
16                            wait_queue_entry_t *wq_entry, int state)
17 {
18         unsigned long flags;
19         long ret = 0;
20
21         spin_lock_irqsave(&wq_head->lock, flags);
22         if (unlikely(signal_pending_state(state, current))) {
23                 /*
24                  * Exclusive waiter must not fail if it was selected by wakeup,
25                  * it should "consume" the condition we were waiting for.
26                  *
27                  * The caller will recheck the condition and return success if
28                  * we were already woken up, we can not miss the event because
29                  * wakeup locks/unlocks the same wq_head->lock.
30                  *
31                  * But we need to ensure that set-condition + wakeup after that
32                  * can't see us, it should wake up another exclusive waiter if
33                  * we fail.
34                  */
35                 list_del_init(&wq_entry->task_list);
36                 ret = -ERESTARTSYS;
37         } else {
38                 if (list_empty(&wq_entry->task_list)) {
39                         if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
40                                 __add_wait_queue_entry_tail(wq_head, wq_entry);
41                         else
42                                 __add_wait_queue(wq_head, wq_entry);
43                 }
44                 set_current_state(state);
45         }
46         spin_unlock_irqrestore(&wq_head->lock, flags);
47
48         return ret;
49 }
50 EXPORT_SYMBOL(prepare_to_wait_event);
51 #endif /* !HAVE_PREPARE_TO_WAIT_EVENT */
52
53 #ifndef HAVE_WAIT_VAR_EVENT
54
55 #define WAIT_TABLE_BITS 8
56 #define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
57
58 static wait_queue_head_t bit_wait_table[WAIT_TABLE_SIZE] __cacheline_aligned;
59
60 wait_queue_head_t *__var_waitqueue(void *p)
61 {
62         return bit_wait_table + hash_ptr(p, WAIT_TABLE_BITS);
63 }
64 EXPORT_SYMBOL(__var_waitqueue);
65
66 static int
67 var_wake_function(wait_queue_entry_t *wq_entry, unsigned int mode,
68                   int sync, void *arg)
69 {
70         struct wait_bit_key *key = arg;
71         struct wait_bit_queue_entry *wbq_entry =
72                 container_of(wq_entry, struct wait_bit_queue_entry, wq_entry);
73
74         if (wbq_entry->key.flags != key->flags ||
75             wbq_entry->key.bit_nr != key->bit_nr)
76                 return 0;
77
78         return autoremove_wake_function(wq_entry, mode, sync, key);
79 }
80
81 void init_wait_var_entry(struct wait_bit_queue_entry *wbq_entry, void *var,
82                          int flags)
83 {
84         *wbq_entry = (struct wait_bit_queue_entry){
85                 .key = {
86                         .flags  = (var),
87                         .bit_nr = -1,
88                 },
89                 .wq_entry = {
90                         .private = current,
91                         .func = var_wake_function,
92 #ifdef HAVE_WAIT_QUEUE_ENTRY_LIST
93                         .entry = LIST_HEAD_INIT(wbq_entry->wq_entry.entry),
94 #else
95                         .task_list = LIST_HEAD_INIT(wbq_entry->wq_entry.task_list),
96 #endif
97                 },
98         };
99 }
100 EXPORT_SYMBOL(init_wait_var_entry);
101
102 void wake_up_var(void *var)
103 {
104         __wake_up_bit(__var_waitqueue(var), var, -1);
105 }
106 EXPORT_SYMBOL(wake_up_var);
107 #endif /* ! HAVE_WAIT_VAR_EVENT */