4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre_lib.h
38 * Basic Lustre library routines.
49 #include <libcfs/libcfs.h>
50 #include <lustre/lustre_idl.h>
51 #include <lustre_ver.h>
52 #include <lustre_cfg.h>
53 #if defined(__linux__)
54 #include <linux/lustre_lib.h>
56 #error Unsupported operating system.
60 struct ptlrpc_request;
64 #include <lustre_ha.h>
65 #include <lustre_net.h>
67 #ifdef HAVE_SERVER_SUPPORT
68 void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
70 int target_handle_connect(struct ptlrpc_request *req);
71 int target_handle_disconnect(struct ptlrpc_request *req);
72 void target_destroy_export(struct obd_export *exp);
73 int target_handle_ping(struct ptlrpc_request *req);
74 void target_committed_to_req(struct ptlrpc_request *req);
75 void target_cancel_recovery_timer(struct obd_device *obd);
76 void target_stop_recovery_thread(struct obd_device *obd);
77 void target_cleanup_recovery(struct obd_device *obd);
78 int target_queue_recovery_request(struct ptlrpc_request *req,
79 struct obd_device *obd);
80 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
81 struct l_wait_info *lwi);
84 int target_pack_pool_reply(struct ptlrpc_request *req);
85 int do_set_info_async(struct obd_import *imp,
86 int opcode, int version,
87 obd_count keylen, void *key,
88 obd_count vallen, void *val,
89 struct ptlrpc_request_set *set);
91 #define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
93 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
97 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
98 struct client_obd *client_conn2cli(struct lustre_handle *conn);
101 struct obd_client_handle {
102 struct lustre_handle och_fh;
103 struct lu_fid och_fid;
104 struct md_open_data *och_mod;
105 struct lustre_handle och_lease_handle; /* open lock for lease */
109 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
112 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
113 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
115 /* Until such time as we get_info the per-stripe maximum from the OST,
116 * we define this to be 2T - 4k, which is the ext3 maxbytes. */
117 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
119 /* Special values for remove LOV EA from disk */
120 #define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
121 offset == (typeof(offset))(-1))
123 #define LMVEA_DELETE_VALUES(count, offset) ((count) == 0 && \
124 (offset) == (typeof(offset))(-1))
125 /* #define POISON_BULK 0 */
128 * l_wait_event is a flexible sleeping function, permitting simple caller
129 * configuration of interrupt and timeout sensitivity along with actions to
130 * be performed in the event of either exception.
132 * The first form of usage looks like this:
134 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
135 * intr_handler, callback_data);
136 * rc = l_wait_event(waitq, condition, &lwi);
138 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
139 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
140 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
141 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
142 * if not NULL, and returns -EINTR.
144 * If a non-zero timeout is specified, signals are ignored until the timeout
145 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
146 * If it returns FALSE l_wait_event() continues to wait as described above with
147 * signals enabled. Otherwise it returns -ETIMEDOUT.
149 * LWI_INTR(intr_handler, callback_data) is shorthand for
150 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
152 * The second form of usage looks like this:
154 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
155 * rc = l_wait_event(waitq, condition, &lwi);
157 * This form is the same as the first except that it COMPLETELY IGNORES
158 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
159 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
160 * can unblock the current process is 'condition' becoming TRUE.
162 * Another form of usage is:
163 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
165 * rc = l_wait_event(waitq, condition, &lwi);
166 * This is the same as previous case, but condition is checked once every
167 * 'interval' jiffies (if non-zero).
169 * Subtle synchronization point: this macro does *not* necessary takes
170 * wait-queue spin-lock before returning, and, hence, following idiom is safe
171 * ONLY when caller provides some external locking:
175 * l_wait_event(&obj->wq, ....); (1)
177 * wake_up(&obj->wq): (2)
178 * spin_lock(&q->lock); (2.1)
179 * __wake_up_common(q, ...); (2.2)
180 * spin_unlock(&q->lock, flags); (2.3)
182 * OBD_FREE_PTR(obj); (3)
184 * As l_wait_event() may "short-cut" execution and return without taking
185 * wait-queue spin-lock, some additional synchronization is necessary to
186 * guarantee that step (3) can begin only after (2.3) finishes.
188 * XXX nikita: some ptlrpc daemon threads have races of that sort.
191 static inline int back_to_sleep(void *arg)
196 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
199 cfs_duration_t lwi_timeout;
200 cfs_duration_t lwi_interval;
202 int (*lwi_on_timeout)(void *);
203 void (*lwi_on_signal)(void *);
207 /* NB: LWI_TIMEOUT ignores signals completely */
208 #define LWI_TIMEOUT(time, cb, data) \
209 ((struct l_wait_info) { \
210 .lwi_timeout = time, \
211 .lwi_on_timeout = cb, \
212 .lwi_cb_data = data, \
214 .lwi_allow_intr = 0 \
217 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
218 ((struct l_wait_info) { \
219 .lwi_timeout = time, \
220 .lwi_on_timeout = cb, \
221 .lwi_cb_data = data, \
222 .lwi_interval = interval, \
223 .lwi_allow_intr = 0 \
226 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
227 ((struct l_wait_info) { \
228 .lwi_timeout = time, \
229 .lwi_on_timeout = time_cb, \
230 .lwi_on_signal = sig_cb, \
231 .lwi_cb_data = data, \
233 .lwi_allow_intr = 0 \
236 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
237 ((struct l_wait_info) { \
238 .lwi_timeout = time, \
239 .lwi_on_timeout = time_cb, \
240 .lwi_on_signal = sig_cb, \
241 .lwi_cb_data = data, \
243 .lwi_allow_intr = 1 \
246 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
251 * wait for @condition to become true, but no longer than timeout, specified
254 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
256 wait_queue_t __wait; \
257 cfs_duration_t __timeout = info->lwi_timeout; \
258 sigset_t __blocked; \
259 int __allow_intr = info->lwi_allow_intr; \
265 init_waitqueue_entry_current(&__wait); \
266 l_add_wait(&wq, &__wait); \
268 /* Block all signals (just the non-fatal ones if no timeout). */ \
269 if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
270 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
272 __blocked = cfs_block_sigsinv(0); \
277 __wstate = info->lwi_on_signal != NULL && \
278 (__timeout == 0 || __allow_intr) ? \
279 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
281 set_current_state(TASK_INTERRUPTIBLE); \
286 if (__timeout == 0) { \
287 waitq_wait(&__wait, __wstate); \
289 cfs_duration_t interval = info->lwi_interval? \
290 min_t(cfs_duration_t, \
291 info->lwi_interval,__timeout):\
293 cfs_duration_t remaining = waitq_timedwait(&__wait, \
296 __timeout = cfs_time_sub(__timeout, \
297 cfs_time_sub(interval, remaining));\
298 if (__timeout == 0) { \
299 if (info->lwi_on_timeout == NULL || \
300 info->lwi_on_timeout(info->lwi_cb_data)) { \
304 /* Take signals after the timeout expires. */ \
305 if (info->lwi_on_signal != NULL) \
306 (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
312 if (cfs_signal_pending()) { \
313 if (info->lwi_on_signal != NULL && \
314 (__timeout == 0 || __allow_intr)) { \
315 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
316 info->lwi_on_signal(info->lwi_cb_data);\
320 /* We have to do this here because some signals */ \
321 /* are not blockable - ie from strace(1). */ \
322 /* In these cases we want to schedule_timeout() */ \
323 /* again, because we don't want that to return */ \
324 /* -EINTR when the RPC actually succeeded. */ \
325 /* the recalc_sigpending() below will deliver the */ \
326 /* signal properly. */ \
327 cfs_clear_sigpending(); \
331 cfs_restore_sigs(__blocked); \
333 set_current_state(TASK_RUNNING); \
334 remove_wait_queue(&wq, &__wait); \
337 #else /* !__KERNEL__ */
339 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
341 long __timeout = info->lwi_timeout; \
344 int __timed_out = 0; \
345 int __interval = obd_timeout; \
351 if (__timeout != 0) \
352 __then = time(NULL); \
354 if (__timeout && __timeout < __interval) \
355 __interval = __timeout; \
356 if (info->lwi_interval && info->lwi_interval < __interval) \
357 __interval = info->lwi_interval; \
359 while (!(condition)) { \
360 liblustre_wait_event(__interval); \
364 if (!__timed_out && info->lwi_timeout != 0) { \
365 __now = time(NULL); \
366 __timeout -= __now - __then; \
374 if (info->lwi_on_timeout == NULL || \
375 info->lwi_on_timeout(info->lwi_cb_data)) { \
383 #endif /* __KERNEL__ */
386 #define l_wait_event(wq, condition, info) \
389 struct l_wait_info *__info = (info); \
391 __l_wait_event(wq, condition, __info, \
392 __ret, add_wait_queue); \
396 #define l_wait_event_exclusive(wq, condition, info) \
399 struct l_wait_info *__info = (info); \
401 __l_wait_event(wq, condition, __info, \
402 __ret, add_wait_queue_exclusive); \
406 #define l_wait_event_exclusive_head(wq, condition, info) \
409 struct l_wait_info *__info = (info); \
411 __l_wait_event(wq, condition, __info, \
412 __ret, add_wait_queue_exclusive_head); \
416 #define l_wait_condition(wq, condition) \
418 struct l_wait_info lwi = { 0 }; \
419 l_wait_event(wq, condition, &lwi); \
422 #define l_wait_condition_exclusive(wq, condition) \
424 struct l_wait_info lwi = { 0 }; \
425 l_wait_event_exclusive(wq, condition, &lwi); \
428 #define l_wait_condition_exclusive_head(wq, condition) \
430 struct l_wait_info lwi = { 0 }; \
431 l_wait_event_exclusive_head(wq, condition, &lwi); \
435 #define LIBLUSTRE_CLIENT (0)
437 #define LIBLUSTRE_CLIENT (1)
442 #endif /* _LUSTRE_LIB_H */