1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Basic Lustre library routines.
28 #include <libcfs/kp30.h>
29 #include <lustre/lustre_idl.h>
30 #include <lustre_cfg.h>
31 #if defined(__linux__)
32 #include <linux/lustre_lib.h>
33 #elif defined(__APPLE__)
34 #include <darwin/lustre_lib.h>
35 #elif defined(__WINNT__)
36 #include <winnt/lustre_lib.h>
38 #error Unsupported operating system.
42 unsigned int ll_rand(void); /* returns a random 32-bit integer */
43 void ll_srand(unsigned int, unsigned int); /* seed the generator */
46 struct ptlrpc_request;
50 #include <lustre_ha.h>
51 #include <lustre_net.h>
54 int target_handle_connect(struct ptlrpc_request *req);
55 int target_handle_disconnect(struct ptlrpc_request *req);
56 void target_destroy_export(struct obd_export *exp);
57 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
58 struct obd_uuid *cluuid);
59 int target_handle_ping(struct ptlrpc_request *req);
60 void target_committed_to_req(struct ptlrpc_request *req);
62 #ifdef HAVE_QUOTA_SUPPORT
63 /* quotacheck callback, dqacq/dqrel callback handler */
64 int target_handle_qc_callback(struct ptlrpc_request *req);
65 int target_handle_dqacq_callback(struct ptlrpc_request *req);
67 #define target_handle_dqacq_callback(req) ldlm_callback_reply(req, -ENOTSUPP)
68 #define target_handle_qc_callback(req) (0)
71 void target_cancel_recovery_timer(struct obd_device *obd);
73 #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 / 2) /* *waves hands* */
74 void target_start_recovery_timer(struct obd_device *obd);
75 int target_start_recovery_thread(struct obd_device *obd,
76 svc_handler_t handler);
77 void target_stop_recovery_thread(struct obd_device *obd);
78 void target_cleanup_recovery(struct obd_device *obd);
79 int target_queue_recovery_request(struct ptlrpc_request *req,
80 struct obd_device *obd);
81 int target_queue_final_reply(struct ptlrpc_request *req, int rc);
82 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
86 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
87 struct client_obd *client_conn2cli(struct lustre_handle *conn);
90 struct obd_client_handle {
91 struct lustre_handle och_fh;
92 struct lu_fid *och_fid;
93 struct llog_cookie och_cookie;
94 struct mdc_open_data *och_mod;
98 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
101 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
102 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
108 struct semaphore l_sem;
112 void l_lock_init(struct lustre_lock *);
113 void l_lock(struct lustre_lock *);
114 void l_unlock(struct lustre_lock *);
115 int l_has_lock(struct lustre_lock *);
121 #define OBD_IOCTL_VERSION 0x00010004
123 struct obd_ioctl_data {
125 uint32_t ioc_version;
131 struct obdo ioc_obdo1;
132 struct obdo ioc_obdo2;
137 uint32_t ioc_command;
143 /* buffers the kernel will treat as user pointers */
149 /* inline buffers for various arguments */
150 uint32_t ioc_inllen1;
152 uint32_t ioc_inllen2;
154 uint32_t ioc_inllen3;
156 uint32_t ioc_inllen4;
162 struct obd_ioctl_hdr {
164 uint32_t ioc_version;
167 static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
169 int len = size_round(sizeof(struct obd_ioctl_data));
170 len += size_round(data->ioc_inllen1);
171 len += size_round(data->ioc_inllen2);
172 len += size_round(data->ioc_inllen3);
173 len += size_round(data->ioc_inllen4);
178 static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
180 if (data->ioc_len > (1<<30)) {
181 CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
184 if (data->ioc_inllen1 > (1<<30)) {
185 CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
188 if (data->ioc_inllen2 > (1<<30)) {
189 CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
192 if (data->ioc_inllen3 > (1<<30)) {
193 CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
196 if (data->ioc_inllen4 > (1<<30)) {
197 CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
200 if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
201 CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
204 if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
205 CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
208 if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
209 CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
212 if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
213 CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
216 if (data->ioc_pbuf1 && !data->ioc_plen1) {
217 CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
220 if (data->ioc_pbuf2 && !data->ioc_plen2) {
221 CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
224 if (data->ioc_plen1 && !data->ioc_pbuf1) {
225 CERROR("OBD ioctl: plen1 set but NULL pointer\n");
228 if (data->ioc_plen2 && !data->ioc_pbuf2) {
229 CERROR("OBD ioctl: plen2 set but NULL pointer\n");
232 if (obd_ioctl_packlen(data) > data->ioc_len) {
233 CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
234 obd_ioctl_packlen(data), data->ioc_len);
241 static inline int obd_ioctl_pack(struct obd_ioctl_data *data, char **pbuf,
245 struct obd_ioctl_data *overlay;
246 data->ioc_len = obd_ioctl_packlen(data);
247 data->ioc_version = OBD_IOCTL_VERSION;
249 if (*pbuf && data->ioc_len > max)
252 *pbuf = malloc(data->ioc_len);
256 overlay = (struct obd_ioctl_data *)*pbuf;
257 memcpy(*pbuf, data, sizeof(*data));
259 ptr = overlay->ioc_bulk;
260 if (data->ioc_inlbuf1)
261 LOGL(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
262 if (data->ioc_inlbuf2)
263 LOGL(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
264 if (data->ioc_inlbuf3)
265 LOGL(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
266 if (data->ioc_inlbuf4)
267 LOGL(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
268 if (obd_ioctl_is_invalid(overlay))
274 static inline int obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf,
278 struct obd_ioctl_data *overlay;
282 overlay = (struct obd_ioctl_data *)pbuf;
284 /* Preserve the caller's buffer pointers */
285 overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
286 overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
287 overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
288 overlay->ioc_inlbuf4 = data->ioc_inlbuf4;
290 memcpy(data, pbuf, sizeof(*data));
292 ptr = overlay->ioc_bulk;
293 if (data->ioc_inlbuf1)
294 LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
295 if (data->ioc_inlbuf2)
296 LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
297 if (data->ioc_inlbuf3)
298 LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
299 if (data->ioc_inlbuf4)
300 LOGU(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
306 #include <obd_support.h>
309 /* function defined in lustre/obdclass/<platform>/<platform>-module.c */
310 int obd_ioctl_getdata(char **buf, int *len, void *arg);
311 int obd_ioctl_popdata(void *arg, void *data, int len);
313 /* buffer MUST be at least the size of obd_ioctl_hdr */
314 static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
316 struct obd_ioctl_hdr hdr;
317 struct obd_ioctl_data *data;
322 err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
326 if (hdr.ioc_version != OBD_IOCTL_VERSION) {
327 CERROR("Version mismatch kernel vs application\n");
331 if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
332 CERROR("User buffer len %d exceeds %d max buffer\n",
333 hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
337 if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
338 CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len);
342 /* XXX allocate this more intelligently, using kmalloc when
344 OBD_VMALLOC(*buf, hdr.ioc_len);
346 CERROR("Cannot allocate control buffer of len %d\n",
351 data = (struct obd_ioctl_data *)*buf;
353 err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
355 OBD_VFREE(*buf, hdr.ioc_len);
359 if (obd_ioctl_is_invalid(data)) {
360 CERROR("ioctl not correctly formatted\n");
361 OBD_VFREE(*buf, hdr.ioc_len);
365 if (data->ioc_inllen1) {
366 data->ioc_inlbuf1 = &data->ioc_bulk[0];
367 offset += size_round(data->ioc_inllen1);
370 if (data->ioc_inllen2) {
371 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
372 offset += size_round(data->ioc_inllen2);
375 if (data->ioc_inllen3) {
376 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
377 offset += size_round(data->ioc_inllen3);
380 if (data->ioc_inllen4) {
381 data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
387 static inline int obd_ioctl_popdata(void *arg, void *data, int len)
389 int err = copy_to_user(arg, data, len);
396 static inline void obd_ioctl_freedata(char *buf, int len)
406 * BSD ioctl description:
407 * #define IOC_V1 _IOR(g, n1, long)
408 * #define IOC_V2 _IOW(g, n2, long)
410 * ioctl(f, IOC_V1, arg);
411 * arg will be treated as a long value,
413 * ioctl(f, IOC_V2, arg)
414 * arg will be treated as a pointer, bsd will call
415 * copyin(buf, arg, sizeof(long))
417 * To make BSD ioctl handles argument correctly and simplely,
418 * we change _IOR to _IOWR so BSD will copyin obd_ioctl_data
419 * for us. Does this change affect Linux? (XXX Liang)
421 #define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
422 #define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
423 #define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
425 #define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
426 #define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
427 #define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
428 #define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
431 #define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
432 #define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
433 #define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
434 #define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
435 #define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
436 #define OBD_IOC_COPY _IOWR('f', 120, OBD_IOC_DATA_TYPE)
437 #define OBD_IOC_MIGR _IOWR('f', 121, OBD_IOC_DATA_TYPE)
438 #define OBD_IOC_PUNCH _IOWR('f', 122, OBD_IOC_DATA_TYPE)
440 #define OBD_IOC_MODULE_DEBUG _IOWR('f', 124, OBD_IOC_DATA_TYPE)
441 #define OBD_IOC_BRW_READ _IOWR('f', 125, OBD_IOC_DATA_TYPE)
442 #define OBD_IOC_BRW_WRITE _IOWR('f', 126, OBD_IOC_DATA_TYPE)
443 #define OBD_IOC_NAME2DEV _IOWR('f', 127, OBD_IOC_DATA_TYPE)
444 #define OBD_IOC_UUID2DEV _IOWR('f', 130, OBD_IOC_DATA_TYPE)
445 #define OBD_IOC_GETNAME _IOWR('f', 131, OBD_IOC_DATA_TYPE)
447 #define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
448 #define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
450 #define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139 )
451 #define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
452 #define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
453 #define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
455 #define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
457 #define OBD_GET_VERSION _IOWR ('f', 144, OBD_IOC_DATA_TYPE)
459 #define OBD_IOC_GSS_SUPPORT _IOWR('f', 145, OBD_IOC_DATA_TYPE)
461 #define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
463 #define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
465 #define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
466 #define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
467 #define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
469 #define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
470 #define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
471 #define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl *)
473 #define OBD_IOC_MOUNTOPT _IOWR('f', 170, OBD_IOC_DATA_TYPE)
475 #define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
476 #define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
477 #define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
478 #define OBD_IOC_DORECORD _IOWR('f', 183, OBD_IOC_DATA_TYPE)
479 #define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
480 #define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
481 #define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
482 #define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
484 #define OBD_IOC_CATLOGLIST _IOWR('f', 190, OBD_IOC_DATA_TYPE)
485 #define OBD_IOC_LLOG_INFO _IOWR('f', 191, OBD_IOC_DATA_TYPE)
486 #define OBD_IOC_LLOG_PRINT _IOWR('f', 192, OBD_IOC_DATA_TYPE)
487 #define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, OBD_IOC_DATA_TYPE)
488 #define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, OBD_IOC_DATA_TYPE)
489 #define OBD_IOC_LLOG_CHECK _IOWR('f', 195, OBD_IOC_DATA_TYPE)
490 #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, OBD_IOC_DATA_TYPE)
492 #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, OBD_IOC_DATA_TYPE)
493 #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, OBD_IOC_DATA_TYPE)
494 #define ECHO_IOC_ENQUEUE _IOWR('f', 202, OBD_IOC_DATA_TYPE)
495 #define ECHO_IOC_CANCEL _IOWR('f', 203, OBD_IOC_DATA_TYPE)
497 /* XXX _IOWR('f', 250, long) has been defined in
498 * lnet/include/libcfs/kp30.h for debug, don't use it
501 /* Until such time as we get_info the per-stripe maximum from the OST,
502 * we define this to be 2T - 4k, which is the ext3 maxbytes. */
503 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
505 #define POISON_BULK 0
508 * l_wait_event is a flexible sleeping function, permitting simple caller
509 * configuration of interrupt and timeout sensitivity along with actions to
510 * be performed in the event of either exception.
512 * The first form of usage looks like this:
514 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
515 * intr_handler, callback_data);
516 * rc = l_wait_event(waitq, condition, &lwi);
518 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
519 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
520 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
521 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
522 * if not NULL, and returns -EINTR.
524 * If a non-zero timeout is specified, signals are ignored until the timeout
525 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
526 * If it returns FALSE l_wait_event() continues to wait as described above with
527 * signals enabled. Otherwise it returns -ETIMEDOUT.
529 * LWI_INTR(intr_handler, callback_data) is shorthand for
530 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
532 * The second form of usage looks like this:
534 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
535 * rc = l_wait_event(waitq, condition, &lwi);
537 * This form is the same as the first except that it COMPLETELY IGNORES
538 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
539 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
540 * can unblock the current process is 'condition' becoming TRUE.
542 * Another form of usage is:
543 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
545 * rc = l_wait_event(waitq, condition, &lwi);
546 * This is the same as previous case, but condition is checked once every
547 * 'interval' jiffies (if non-zero).
549 * Subtle synchronization point: this macro does *not* necessary takes
550 * wait-queue spin-lock before returning, and, hence, following idiom is safe
551 * ONLY when caller provides some external locking:
555 * l_wait_event(&obj->wq, ....); (1)
557 * wake_up(&obj->wq): (2)
558 * spin_lock(&q->lock); (2.1)
559 * __wake_up_common(q, ...); (2.2)
560 * spin_unlock(&q->lock, flags); (2.3)
562 * OBD_FREE_PTR(obj); (3)
564 * As l_wait_event() may "short-cut" execution and return without taking
565 * wait-queue spin-lock, some additional synchronization is necessary to
566 * guarantee that step (3) can begin only after (2.3) finishes.
568 * XXX nikita: some ptlrpc daemon threads have races of that sort.
572 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
575 cfs_duration_t lwi_timeout;
576 cfs_duration_t lwi_interval;
577 int (*lwi_on_timeout)(void *);
578 void (*lwi_on_signal)(void *);
582 /* NB: LWI_TIMEOUT ignores signals completely */
583 #define LWI_TIMEOUT(time, cb, data) \
584 ((struct l_wait_info) { \
585 .lwi_timeout = time, \
586 .lwi_on_timeout = cb, \
587 .lwi_cb_data = data, \
591 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
592 ((struct l_wait_info) { \
593 .lwi_timeout = time, \
594 .lwi_on_timeout = cb, \
595 .lwi_cb_data = data, \
596 .lwi_interval = interval \
599 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
600 ((struct l_wait_info) { \
601 .lwi_timeout = time, \
602 .lwi_on_timeout = time_cb, \
603 .lwi_on_signal = sig_cb, \
604 .lwi_cb_data = data, \
608 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
613 * wait for @condition to become true, but no longer than timeout, specified
616 #define __l_wait_event(wq, condition, info, ret, excl) \
618 cfs_waitlink_t __wait; \
619 cfs_duration_t __timeout = info->lwi_timeout; \
620 cfs_sigset_t __blocked; \
626 cfs_waitlink_init(&__wait); \
628 cfs_waitq_add_exclusive(&wq, &__wait); \
630 cfs_waitq_add(&wq, &__wait); \
632 /* Block all signals (just the non-fatal ones if no timeout). */ \
633 if (info->lwi_on_signal != NULL && __timeout == 0) \
634 __blocked = l_w_e_set_sigs(LUSTRE_FATAL_SIGS); \
636 __blocked = l_w_e_set_sigs(0); \
639 set_current_state(TASK_INTERRUPTIBLE); \
644 if (__timeout == 0) { \
645 cfs_waitq_wait(&__wait, CFS_TASK_INTERRUPTIBLE); \
647 cfs_duration_t interval = info->lwi_interval? \
648 min_t(cfs_duration_t, \
649 info->lwi_interval,__timeout):\
651 cfs_duration_t remaining = cfs_waitq_timedwait(&__wait,\
652 CFS_TASK_INTERRUPTIBLE, \
654 __timeout = cfs_time_sub(__timeout, \
655 cfs_time_sub(interval, remaining));\
656 if (__timeout == 0) { \
657 if (info->lwi_on_timeout == NULL || \
658 info->lwi_on_timeout(info->lwi_cb_data)) { \
662 /* Take signals after the timeout expires. */ \
663 if (info->lwi_on_signal != NULL) \
664 (void)l_w_e_set_sigs(LUSTRE_FATAL_SIGS); \
670 if (cfs_signal_pending()) { \
671 if (info->lwi_on_signal != NULL && __timeout == 0) { \
672 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
673 info->lwi_on_signal(info->lwi_cb_data);\
677 /* We have to do this here because some signals */ \
678 /* are not blockable - ie from strace(1). */ \
679 /* In these cases we want to schedule_timeout() */ \
680 /* again, because we don't want that to return */ \
681 /* -EINTR when the RPC actually succeeded. */ \
682 /* the RECALC_SIGPENDING below will deliver the */ \
683 /* signal properly. */ \
684 cfs_clear_sigpending(); \
688 cfs_block_sigs(__blocked); \
690 set_current_state(TASK_RUNNING); \
691 cfs_waitq_del(&wq, &__wait); \
694 #else /* !__KERNEL__ */
695 #define __l_wait_event(wq, condition, info, ret, excl) \
697 long __timeout = info->lwi_timeout; \
700 int __timed_out = 0; \
706 if (__timeout == 0) \
707 __timeout = 1000000000; \
709 __then = time(NULL); \
711 while (!(condition)) { \
712 if (liblustre_wait_event(info->lwi_interval?:__timeout) || \
713 (info->lwi_interval && info->lwi_interval < __timeout)) {\
714 if (__timeout != 0 && info->lwi_timeout != 0) { \
715 __now = time(NULL); \
716 __timeout -= __now - __then; \
724 if (info->lwi_timeout != 0 && !__timed_out) { \
726 if (info->lwi_on_timeout == NULL || \
727 info->lwi_on_timeout(info->lwi_cb_data)) { \
735 #endif /* __KERNEL__ */
737 #define l_wait_event(wq, condition, info) \
740 struct l_wait_info *__info = (info); \
742 __l_wait_event(wq, condition, __info, __ret, 0); \
746 #define l_wait_event_exclusive(wq, condition, info) \
749 struct l_wait_info *__info = (info); \
751 __l_wait_event(wq, condition, __info, __ret, 1); \
756 #define LIBLUSTRE_CLIENT (0)
758 #define LIBLUSTRE_CLIENT (1)
761 #endif /* _LUSTRE_LIB_H */