4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre_lib.h
38 * Basic Lustre library routines.
49 #include <linux/signal.h>
50 #include <libcfs/libcfs.h>
51 #include <lustre/lustre_idl.h>
52 #include <lustre_ver.h>
53 #include <lustre_cfg.h>
56 struct ptlrpc_request;
60 #include <lustre_ha.h>
61 #include <lustre_net.h>
63 #ifdef HAVE_SERVER_SUPPORT
64 void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
66 int target_handle_connect(struct ptlrpc_request *req);
67 int target_handle_disconnect(struct ptlrpc_request *req);
68 void target_destroy_export(struct obd_export *exp);
69 int target_handle_ping(struct ptlrpc_request *req);
70 void target_committed_to_req(struct ptlrpc_request *req);
71 void target_cancel_recovery_timer(struct obd_device *obd);
72 void target_stop_recovery_thread(struct obd_device *obd);
73 void target_cleanup_recovery(struct obd_device *obd);
74 int target_queue_recovery_request(struct ptlrpc_request *req,
75 struct obd_device *obd);
76 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
77 struct l_wait_info *lwi);
80 int target_pack_pool_reply(struct ptlrpc_request *req);
81 int do_set_info_async(struct obd_import *imp,
82 int opcode, int version,
83 obd_count keylen, void *key,
84 obd_count vallen, void *val,
85 struct ptlrpc_request_set *set);
87 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
90 * l_wait_event is a flexible sleeping function, permitting simple caller
91 * configuration of interrupt and timeout sensitivity along with actions to
92 * be performed in the event of either exception.
94 * The first form of usage looks like this:
96 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
97 * intr_handler, callback_data);
98 * rc = l_wait_event(waitq, condition, &lwi);
100 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
101 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
102 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
103 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
104 * if not NULL, and returns -EINTR.
106 * If a non-zero timeout is specified, signals are ignored until the timeout
107 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
108 * If it returns FALSE l_wait_event() continues to wait as described above with
109 * signals enabled. Otherwise it returns -ETIMEDOUT.
111 * LWI_INTR(intr_handler, callback_data) is shorthand for
112 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
114 * The second form of usage looks like this:
116 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
117 * rc = l_wait_event(waitq, condition, &lwi);
119 * This form is the same as the first except that it COMPLETELY IGNORES
120 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
121 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
122 * can unblock the current process is 'condition' becoming TRUE.
124 * Another form of usage is:
125 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
127 * rc = l_wait_event(waitq, condition, &lwi);
128 * This is the same as previous case, but condition is checked once every
129 * 'interval' jiffies (if non-zero).
131 * Subtle synchronization point: this macro does *not* necessary takes
132 * wait-queue spin-lock before returning, and, hence, following idiom is safe
133 * ONLY when caller provides some external locking:
137 * l_wait_event(&obj->wq, ....); (1)
139 * wake_up(&obj->wq): (2)
140 * spin_lock(&q->lock); (2.1)
141 * __wake_up_common(q, ...); (2.2)
142 * spin_unlock(&q->lock, flags); (2.3)
144 * OBD_FREE_PTR(obj); (3)
146 * As l_wait_event() may "short-cut" execution and return without taking
147 * wait-queue spin-lock, some additional synchronization is necessary to
148 * guarantee that step (3) can begin only after (2.3) finishes.
150 * XXX nikita: some ptlrpc daemon threads have races of that sort.
153 static inline int back_to_sleep(void *arg)
158 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
161 cfs_duration_t lwi_timeout;
162 cfs_duration_t lwi_interval;
164 int (*lwi_on_timeout)(void *);
165 void (*lwi_on_signal)(void *);
169 /* NB: LWI_TIMEOUT ignores signals completely */
170 #define LWI_TIMEOUT(time, cb, data) \
171 ((struct l_wait_info) { \
172 .lwi_timeout = time, \
173 .lwi_on_timeout = cb, \
174 .lwi_cb_data = data, \
176 .lwi_allow_intr = 0 \
179 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
180 ((struct l_wait_info) { \
181 .lwi_timeout = time, \
182 .lwi_on_timeout = cb, \
183 .lwi_cb_data = data, \
184 .lwi_interval = interval, \
185 .lwi_allow_intr = 0 \
188 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
189 ((struct l_wait_info) { \
190 .lwi_timeout = time, \
191 .lwi_on_timeout = time_cb, \
192 .lwi_on_signal = sig_cb, \
193 .lwi_cb_data = data, \
195 .lwi_allow_intr = 0 \
198 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
199 ((struct l_wait_info) { \
200 .lwi_timeout = time, \
201 .lwi_on_timeout = time_cb, \
202 .lwi_on_signal = sig_cb, \
203 .lwi_cb_data = data, \
205 .lwi_allow_intr = 1 \
208 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
210 #define LUSTRE_FATAL_SIGS \
211 (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
212 sigmask(SIGQUIT) | sigmask(SIGALRM))
215 * wait for @condition to become true, but no longer than timeout, specified
218 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
220 wait_queue_t __wait; \
221 cfs_duration_t __timeout = info->lwi_timeout; \
222 sigset_t __blocked; \
223 int __allow_intr = info->lwi_allow_intr; \
229 init_waitqueue_entry_current(&__wait); \
230 l_add_wait(&wq, &__wait); \
232 /* Block all signals (just the non-fatal ones if no timeout). */ \
233 if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
234 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
236 __blocked = cfs_block_sigsinv(0); \
241 __wstate = info->lwi_on_signal != NULL && \
242 (__timeout == 0 || __allow_intr) ? \
243 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
245 set_current_state(TASK_INTERRUPTIBLE); \
250 if (__timeout == 0) { \
251 waitq_wait(&__wait, __wstate); \
253 cfs_duration_t interval = info->lwi_interval? \
254 min_t(cfs_duration_t, \
255 info->lwi_interval,__timeout):\
257 cfs_duration_t remaining = waitq_timedwait(&__wait, \
260 __timeout = cfs_time_sub(__timeout, \
261 cfs_time_sub(interval, remaining));\
262 if (__timeout == 0) { \
263 if (info->lwi_on_timeout == NULL || \
264 info->lwi_on_timeout(info->lwi_cb_data)) { \
268 /* Take signals after the timeout expires. */ \
269 if (info->lwi_on_signal != NULL) \
270 (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
276 if (cfs_signal_pending()) { \
277 if (info->lwi_on_signal != NULL && \
278 (__timeout == 0 || __allow_intr)) { \
279 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
280 info->lwi_on_signal(info->lwi_cb_data);\
284 /* We have to do this here because some signals */ \
285 /* are not blockable - ie from strace(1). */ \
286 /* In these cases we want to schedule_timeout() */ \
287 /* again, because we don't want that to return */ \
288 /* -EINTR when the RPC actually succeeded. */ \
289 /* the recalc_sigpending() below will deliver the */ \
290 /* signal properly. */ \
291 cfs_clear_sigpending(); \
295 cfs_restore_sigs(__blocked); \
297 set_current_state(TASK_RUNNING); \
298 remove_wait_queue(&wq, &__wait); \
303 #define l_wait_event(wq, condition, info) \
306 struct l_wait_info *__info = (info); \
308 __l_wait_event(wq, condition, __info, \
309 __ret, add_wait_queue); \
313 #define l_wait_event_exclusive(wq, condition, info) \
316 struct l_wait_info *__info = (info); \
318 __l_wait_event(wq, condition, __info, \
319 __ret, add_wait_queue_exclusive); \
323 #define l_wait_event_exclusive_head(wq, condition, info) \
326 struct l_wait_info *__info = (info); \
328 __l_wait_event(wq, condition, __info, \
329 __ret, add_wait_queue_exclusive_head); \
333 #define l_wait_condition(wq, condition) \
335 struct l_wait_info lwi = { 0 }; \
336 l_wait_event(wq, condition, &lwi); \
339 #define l_wait_condition_exclusive(wq, condition) \
341 struct l_wait_info lwi = { 0 }; \
342 l_wait_event_exclusive(wq, condition, &lwi); \
345 #define l_wait_condition_exclusive_head(wq, condition) \
347 struct l_wait_info lwi = { 0 }; \
348 l_wait_event_exclusive_head(wq, condition, &lwi); \
353 #endif /* _LUSTRE_LIB_H */