4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2013, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre_lib.h
38 * Basic Lustre library routines.
49 #include <libcfs/libcfs.h>
50 #include <lustre/lustre_idl.h>
51 #include <lustre_ver.h>
52 #include <lustre_cfg.h>
53 #if defined(__linux__)
54 #include <linux/lustre_lib.h>
55 #elif defined(__APPLE__)
56 #include <darwin/lustre_lib.h>
57 #elif defined(__WINNT__)
58 #include <winnt/lustre_lib.h>
60 #error Unsupported operating system.
64 struct ptlrpc_request;
68 #include <lustre_ha.h>
69 #include <lustre_net.h>
71 #ifdef HAVE_SERVER_SUPPORT
72 void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
74 int target_handle_connect(struct ptlrpc_request *req);
75 int target_handle_disconnect(struct ptlrpc_request *req);
76 void target_destroy_export(struct obd_export *exp);
77 int target_handle_ping(struct ptlrpc_request *req);
78 void target_committed_to_req(struct ptlrpc_request *req);
79 void target_cancel_recovery_timer(struct obd_device *obd);
80 void target_stop_recovery_thread(struct obd_device *obd);
81 void target_cleanup_recovery(struct obd_device *obd);
82 int target_queue_recovery_request(struct ptlrpc_request *req,
83 struct obd_device *obd);
84 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
85 struct l_wait_info *lwi);
88 int target_pack_pool_reply(struct ptlrpc_request *req);
89 int do_set_info_async(struct obd_import *imp,
90 int opcode, int version,
91 obd_count keylen, void *key,
92 obd_count vallen, void *val,
93 struct ptlrpc_request_set *set);
95 #define OBD_RECOVERY_MAX_TIME (obd_timeout * 18) /* b13079 */
97 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
101 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
102 struct client_obd *client_conn2cli(struct lustre_handle *conn);
105 struct obd_client_handle {
106 struct lustre_handle och_fh;
107 struct lu_fid och_fid;
108 struct md_open_data *och_mod;
109 struct lustre_handle och_lease_handle; /* open lock for lease */
113 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
116 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
117 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
119 /* Until such time as we get_info the per-stripe maximum from the OST,
120 * we define this to be 2T - 4k, which is the ext3 maxbytes. */
121 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
123 /* Special values for remove LOV EA from disk */
124 #define LOVEA_DELETE_VALUES(size, count, offset) (size == 0 && count == 0 && \
125 offset == (typeof(offset))(-1))
127 #define LMVEA_DELETE_VALUES(count, offset) ((count) == 0 && \
128 (offset) == (typeof(offset))(-1))
129 /* #define POISON_BULK 0 */
132 * l_wait_event is a flexible sleeping function, permitting simple caller
133 * configuration of interrupt and timeout sensitivity along with actions to
134 * be performed in the event of either exception.
136 * The first form of usage looks like this:
138 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
139 * intr_handler, callback_data);
140 * rc = l_wait_event(waitq, condition, &lwi);
142 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
143 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
144 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
145 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
146 * if not NULL, and returns -EINTR.
148 * If a non-zero timeout is specified, signals are ignored until the timeout
149 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
150 * If it returns FALSE l_wait_event() continues to wait as described above with
151 * signals enabled. Otherwise it returns -ETIMEDOUT.
153 * LWI_INTR(intr_handler, callback_data) is shorthand for
154 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
156 * The second form of usage looks like this:
158 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
159 * rc = l_wait_event(waitq, condition, &lwi);
161 * This form is the same as the first except that it COMPLETELY IGNORES
162 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
163 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
164 * can unblock the current process is 'condition' becoming TRUE.
166 * Another form of usage is:
167 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
169 * rc = l_wait_event(waitq, condition, &lwi);
170 * This is the same as previous case, but condition is checked once every
171 * 'interval' jiffies (if non-zero).
173 * Subtle synchronization point: this macro does *not* necessary takes
174 * wait-queue spin-lock before returning, and, hence, following idiom is safe
175 * ONLY when caller provides some external locking:
179 * l_wait_event(&obj->wq, ....); (1)
181 * wake_up(&obj->wq): (2)
182 * spin_lock(&q->lock); (2.1)
183 * __wake_up_common(q, ...); (2.2)
184 * spin_unlock(&q->lock, flags); (2.3)
186 * OBD_FREE_PTR(obj); (3)
188 * As l_wait_event() may "short-cut" execution and return without taking
189 * wait-queue spin-lock, some additional synchronization is necessary to
190 * guarantee that step (3) can begin only after (2.3) finishes.
192 * XXX nikita: some ptlrpc daemon threads have races of that sort.
195 static inline int back_to_sleep(void *arg)
200 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
203 cfs_duration_t lwi_timeout;
204 cfs_duration_t lwi_interval;
206 int (*lwi_on_timeout)(void *);
207 void (*lwi_on_signal)(void *);
211 /* NB: LWI_TIMEOUT ignores signals completely */
212 #define LWI_TIMEOUT(time, cb, data) \
213 ((struct l_wait_info) { \
214 .lwi_timeout = time, \
215 .lwi_on_timeout = cb, \
216 .lwi_cb_data = data, \
218 .lwi_allow_intr = 0 \
221 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
222 ((struct l_wait_info) { \
223 .lwi_timeout = time, \
224 .lwi_on_timeout = cb, \
225 .lwi_cb_data = data, \
226 .lwi_interval = interval, \
227 .lwi_allow_intr = 0 \
230 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
231 ((struct l_wait_info) { \
232 .lwi_timeout = time, \
233 .lwi_on_timeout = time_cb, \
234 .lwi_on_signal = sig_cb, \
235 .lwi_cb_data = data, \
237 .lwi_allow_intr = 0 \
240 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data) \
241 ((struct l_wait_info) { \
242 .lwi_timeout = time, \
243 .lwi_on_timeout = time_cb, \
244 .lwi_on_signal = sig_cb, \
245 .lwi_cb_data = data, \
247 .lwi_allow_intr = 1 \
250 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
255 * wait for @condition to become true, but no longer than timeout, specified
258 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
260 wait_queue_t __wait; \
261 cfs_duration_t __timeout = info->lwi_timeout; \
262 sigset_t __blocked; \
263 int __allow_intr = info->lwi_allow_intr; \
269 init_waitqueue_entry_current(&__wait); \
270 l_add_wait(&wq, &__wait); \
272 /* Block all signals (just the non-fatal ones if no timeout). */ \
273 if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr)) \
274 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS); \
276 __blocked = cfs_block_sigsinv(0); \
281 __wstate = info->lwi_on_signal != NULL && \
282 (__timeout == 0 || __allow_intr) ? \
283 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; \
285 set_current_state(TASK_INTERRUPTIBLE); \
290 if (__timeout == 0) { \
291 waitq_wait(&__wait, __wstate); \
293 cfs_duration_t interval = info->lwi_interval? \
294 min_t(cfs_duration_t, \
295 info->lwi_interval,__timeout):\
297 cfs_duration_t remaining = waitq_timedwait(&__wait, \
300 __timeout = cfs_time_sub(__timeout, \
301 cfs_time_sub(interval, remaining));\
302 if (__timeout == 0) { \
303 if (info->lwi_on_timeout == NULL || \
304 info->lwi_on_timeout(info->lwi_cb_data)) { \
308 /* Take signals after the timeout expires. */ \
309 if (info->lwi_on_signal != NULL) \
310 (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
316 if (cfs_signal_pending()) { \
317 if (info->lwi_on_signal != NULL && \
318 (__timeout == 0 || __allow_intr)) { \
319 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
320 info->lwi_on_signal(info->lwi_cb_data);\
324 /* We have to do this here because some signals */ \
325 /* are not blockable - ie from strace(1). */ \
326 /* In these cases we want to schedule_timeout() */ \
327 /* again, because we don't want that to return */ \
328 /* -EINTR when the RPC actually succeeded. */ \
329 /* the recalc_sigpending() below will deliver the */ \
330 /* signal properly. */ \
331 cfs_clear_sigpending(); \
335 cfs_restore_sigs(__blocked); \
337 set_current_state(TASK_RUNNING); \
338 remove_wait_queue(&wq, &__wait); \
341 #else /* !__KERNEL__ */
343 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
345 long __timeout = info->lwi_timeout; \
348 int __timed_out = 0; \
349 int __interval = obd_timeout; \
355 if (__timeout != 0) \
356 __then = time(NULL); \
358 if (__timeout && __timeout < __interval) \
359 __interval = __timeout; \
360 if (info->lwi_interval && info->lwi_interval < __interval) \
361 __interval = info->lwi_interval; \
363 while (!(condition)) { \
364 liblustre_wait_event(__interval); \
368 if (!__timed_out && info->lwi_timeout != 0) { \
369 __now = time(NULL); \
370 __timeout -= __now - __then; \
378 if (info->lwi_on_timeout == NULL || \
379 info->lwi_on_timeout(info->lwi_cb_data)) { \
387 #endif /* __KERNEL__ */
390 #define l_wait_event(wq, condition, info) \
393 struct l_wait_info *__info = (info); \
395 __l_wait_event(wq, condition, __info, \
396 __ret, add_wait_queue); \
400 #define l_wait_event_exclusive(wq, condition, info) \
403 struct l_wait_info *__info = (info); \
405 __l_wait_event(wq, condition, __info, \
406 __ret, add_wait_queue_exclusive); \
410 #define l_wait_event_exclusive_head(wq, condition, info) \
413 struct l_wait_info *__info = (info); \
415 __l_wait_event(wq, condition, __info, \
416 __ret, add_wait_queue_exclusive_head); \
420 #define l_wait_condition(wq, condition) \
422 struct l_wait_info lwi = { 0 }; \
423 l_wait_event(wq, condition, &lwi); \
426 #define l_wait_condition_exclusive(wq, condition) \
428 struct l_wait_info lwi = { 0 }; \
429 l_wait_event_exclusive(wq, condition, &lwi); \
432 #define l_wait_condition_exclusive_head(wq, condition) \
434 struct l_wait_info lwi = { 0 }; \
435 l_wait_event_exclusive_head(wq, condition, &lwi); \
439 #define LIBLUSTRE_CLIENT (0)
441 #define LIBLUSTRE_CLIENT (1)
446 #endif /* _LUSTRE_LIB_H */