4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/include/lustre_lib.h
34 * Basic Lustre library routines.
45 #ifdef HAVE_SCHED_HEADERS
46 #include <linux/sched/signal.h>
47 #include <linux/sched/mm.h>
50 #include <libcfs/libcfs.h>
51 #include <uapi/linux/lustre/lustre_idl.h>
52 #include <uapi/linux/lustre/lustre_ver.h>
53 #include <uapi/linux/lustre/lustre_cfg.h>
56 struct ptlrpc_request;
60 #include <lustre_ha.h>
61 #include <lustre_net.h>
63 #define LI_POISON 0x5a5a5a5a
64 #if BITS_PER_LONG > 32
65 # define LL_POISON 0x5a5a5a5a5a5a5a5aL
67 # define LL_POISON 0x5a5a5a5aL
69 #define LP_POISON ((void *)LL_POISON)
71 #ifdef HAVE_SERVER_SUPPORT
72 int rev_import_init(struct obd_export *exp);
73 int target_handle_connect(struct ptlrpc_request *req);
74 int target_handle_disconnect(struct ptlrpc_request *req);
75 void target_destroy_export(struct obd_export *exp);
76 void target_committed_to_req(struct ptlrpc_request *req);
77 void target_cancel_recovery_timer(struct obd_device *obd);
78 void target_stop_recovery_thread(struct obd_device *obd);
79 void target_cleanup_recovery(struct obd_device *obd);
80 int target_queue_recovery_request(struct ptlrpc_request *req,
81 struct obd_device *obd);
82 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
83 struct l_wait_info *lwi);
86 int target_pack_pool_reply(struct ptlrpc_request *req);
87 int do_set_info_async(struct obd_import *imp,
88 int opcode, int version,
89 size_t keylen, void *key,
90 size_t vallen, void *val,
91 struct ptlrpc_request_set *set);
93 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
96 * l_wait_event is a flexible sleeping function, permitting simple caller
97 * configuration of interrupt and timeout sensitivity along with actions to
98 * be performed in the event of either exception.
100 * The first form of usage looks like this:
102 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
103 * intr_handler, callback_data);
104 * rc = l_wait_event(waitq, condition, &lwi);
106 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
107 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
108 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
109 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
110 * if not NULL, and returns -EINTR.
112 * If a non-zero timeout is specified, signals are ignored until the timeout
113 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
114 * If it returns FALSE l_wait_event() continues to wait as described above with
115 * signals enabled. Otherwise it returns -ETIMEDOUT.
117 * LWI_INTR(intr_handler, callback_data) is shorthand for
118 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
120 * The second form of usage looks like this:
122 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
123 * rc = l_wait_event(waitq, condition, &lwi);
125 * This form is the same as the first except that it COMPLETELY IGNORES
126 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
127 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
128 * can unblock the current process is 'condition' becoming TRUE.
130 * Another form of usage is:
131 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
133 * rc = l_wait_event(waitq, condition, &lwi);
134 * This is the same as previous case, but condition is checked once every
135 * 'interval' jiffies (if non-zero).
137 * Subtle synchronization point: this macro does *not* necessary takes
138 * wait-queue spin-lock before returning, and, hence, following idiom is safe
139 * ONLY when caller provides some external locking:
143 * l_wait_event(&obj->wq, ....); (1)
145 * wake_up(&obj->wq): (2)
146 * spin_lock(&q->lock); (2.1)
147 * __wake_up_common(q, ...); (2.2)
148 * spin_unlock(&q->lock, flags); (2.3)
150 * OBD_FREE_PTR(obj); (3)
152 * As l_wait_event() may "short-cut" execution and return without taking
153 * wait-queue spin-lock, some additional synchronization is necessary to
154 * guarantee that step (3) can begin only after (2.3) finishes.
156 * XXX nikita: some ptlrpc daemon threads have races of that sort.
159 static inline int back_to_sleep(void *arg)
164 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
170 int (*lwi_on_timeout)(void *);
171 void (*lwi_on_signal)(void *);
175 /* NB: LWI_TIMEOUT ignores signals completely */
176 #define LWI_TIMEOUT(time, cb, data) \
177 ((struct l_wait_info) { \
178 .lwi_timeout = time, \
179 .lwi_on_timeout = cb, \
180 .lwi_cb_data = data, \
182 .lwi_allow_intr = 0 \
185 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
186 ((struct l_wait_info) { \
187 .lwi_timeout = time, \
188 .lwi_on_timeout = cb, \
189 .lwi_cb_data = data, \
190 .lwi_interval = interval, \
191 .lwi_allow_intr = 0 \
194 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
195 ((struct l_wait_info) { \
196 .lwi_timeout = time, \
197 .lwi_on_timeout = time_cb, \
198 .lwi_on_signal = sig_cb, \
199 .lwi_cb_data = data, \
201 .lwi_allow_intr = 0 \
204 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
205 ((struct l_wait_info) { \
206 .lwi_timeout = time, \
207 .lwi_on_timeout = time_cb, \
208 .lwi_on_signal = sig_cb, \
209 .lwi_cb_data = data, \
211 .lwi_allow_intr = 1 \
214 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
216 #define LUSTRE_FATAL_SIGS \
217 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
218 sigmask(SIGQUIT) | sigmask(SIGALRM))
223 #if !defined(HAVE___ADD_WAIT_QUEUE_EXCLUSIVE) && !defined(HAVE_WAIT_QUEUE_ENTRY)
224 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
227 wait->flags |= WQ_FLAG_EXCLUSIVE;
228 __add_wait_queue(q, wait);
230 #endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
233 * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
234 * waiting threads, which is not always desirable because all threads will
235 * be waken up again and again, even user only needs a few of them to be
236 * active most time. This is not good for performance because cache can
237 * be polluted by different threads.
239 * LIFO list can resolve this problem because we always wakeup the most
240 * recent active thread by default.
242 * NB: please don't call non-exclusive & exclusive wait on the same
243 * waitq if add_wait_queue_exclusive_head is used.
245 #define add_wait_queue_exclusive_head(waitq, link) \
247 unsigned long flags; \
249 spin_lock_irqsave(&((waitq)->lock), flags); \
250 __add_wait_queue_exclusive(waitq, link); \
251 spin_unlock_irqrestore(&((waitq)->lock), flags); \
255 * wait for @condition to become true, but no longer than timeout, specified
258 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
260 wait_queue_entry_t __wait; \
261 long __timeout = info->lwi_timeout; \
262 sigset_t __blocked; \
263 int __allow_intr = info->lwi_allow_intr; \
269 init_waitqueue_entry(&__wait, current); \
270 l_add_wait(&wq, &__wait); \
272 /* Block all signals (just the non-fatal ones if no timeout). */ \
273 if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
274 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
276 __blocked = cfs_block_sigsinv(0); \
279 set_current_state(TASK_INTERRUPTIBLE); \
281 /* To guarantee that the condition check will be done */ \
282 /* after setting the thread state as TASK_INTERRUPTIBLE. */ \
283 /* Otherwise, out-of-order execution may cause some race. */ \
284 /* Consider the following real execution order: */ \
286 /* 1. Thread1 checks condition on CPU1, gets false. */ \
287 /* 2. Thread2 sets condition on CPU2. */ \
288 /* 3. Thread2 calls wake_up() on CPU2 to wake the threads */ \
289 /* with state TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE. */ \
290 /* But the Thread1's state is TASK_RUNNING at that time. */ \
291 /* 4. Thread1 sets its state as TASK_INTERRUPTIBLE on CPU1, */ \
292 /* then schedule. */ \
294 /* If the '__timeout' variable is zero, the Thread1 will */ \
295 /* have no chance to check the condition again. */ \
297 /* Generally, the interval between out-of-ordered step1 and */ \
298 /* step4 is very tiny, as to above step2 and step3 cannot */ \
299 /* happen. On some degree, it can explain why we seldom hit */ \
300 /* related trouble. But such race really exists, especially */ \
301 /* consider that the step1 and step4 can be interruptible. */ \
302 /* So add barrier to avoid Thread1 out-of-order execution. */ \
308 if (__timeout == 0) { \
311 long interval = info->lwi_interval ? \
312 min_t(long, info->lwi_interval,\
313 __timeout) : __timeout; \
314 long remaining = schedule_timeout(interval); \
316 __timeout -= interval - remaining; \
317 if (__timeout == 0) { \
318 if (info->lwi_on_timeout == NULL || \
319 info->lwi_on_timeout(info->lwi_cb_data)) { \
323 /* Take signals after the timeout expires. */ \
324 if (info->lwi_on_signal != NULL) \
325 (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
331 if (signal_pending(current)) { \
332 if (info->lwi_on_signal != NULL && \
333 (__timeout == 0 || __allow_intr)) { \
334 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
335 info->lwi_on_signal(info->lwi_cb_data);\
339 /* We have to do this here because some signals */ \
340 /* are not blockable - ie from strace(1). */ \
341 /* In these cases we want to schedule_timeout() */ \
342 /* again, because we don't want that to return */ \
343 /* -EINTR when the RPC actually succeeded. */ \
344 /* the recalc_sigpending() below will deliver the */ \
345 /* signal properly. */ \
346 cfs_clear_sigpending(); \
350 cfs_restore_sigs(__blocked); \
352 set_current_state(TASK_RUNNING); \
353 remove_wait_queue(&wq, &__wait); \
357 #define l_wait_event(wq, condition, info) \
360 struct l_wait_info *__info = (info); \
362 __l_wait_event(wq, condition, __info, \
363 __ret, add_wait_queue); \
367 #define l_wait_event_exclusive(wq, condition, info) \
370 struct l_wait_info *__info = (info); \
372 __l_wait_event(wq, condition, __info, \
373 __ret, add_wait_queue_exclusive); \
377 #define l_wait_event_exclusive_head(wq, condition, info) \
380 struct l_wait_info *__info = (info); \
382 __l_wait_event(wq, condition, __info, \
383 __ret, add_wait_queue_exclusive_head); \
387 #define l_wait_condition(wq, condition) \
389 struct l_wait_info lwi = { 0 }; \
390 l_wait_event(wq, condition, &lwi); \
393 #define l_wait_condition_exclusive(wq, condition) \
395 struct l_wait_info lwi = { 0 }; \
396 l_wait_event_exclusive(wq, condition, &lwi); \
399 #define l_wait_condition_exclusive_head(wq, condition) \
401 struct l_wait_info lwi = { 0 }; \
402 l_wait_event_exclusive_head(wq, condition, &lwi); \
407 #endif /* _LUSTRE_LIB_H */