1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 * Basic Lustre library routines.
30 # include <sys/types.h>
32 # include <asm/semaphore.h>
33 # include <linux/rwsem.h>
34 # include <linux/sched.h>
35 # include <linux/signal.h>
36 # include <linux/types.h>
38 #include <libcfs/kp30.h>
39 #include <linux/lustre_idl.h>
40 #include <linux/lustre_cfg.h>
43 #if BITS_PER_LONG > 32
44 # define LI_POISON ((int)0x5a5a5a5a5a5a5a5a)
45 # define LL_POISON ((long)0x5a5a5a5a5a5a5a5a)
46 # define LP_POISON ((void *)(long)0x5a5a5a5a5a5a5a5a)
48 # define LI_POISON ((int)0x5a5a5a5a)
49 # define LL_POISON ((long)0x5a5a5a5a)
50 # define LP_POISON ((void *)(long)0x5a5a5a5a)
55 unsigned int ll_rand(void); /* returns a random 32-bit integer */
56 void ll_srand(unsigned int, unsigned int); /* seed the generator */
59 struct ptlrpc_request;
63 #include <linux/lustre_ha.h>
64 #include <linux/lustre_net.h>
65 #include <linux/lustre_compat25.h>
66 #include <linux/lvfs.h>
68 int target_handle_connect(struct ptlrpc_request *req, svc_handler_t handler);
69 int target_handle_disconnect(struct ptlrpc_request *req);
70 void target_destroy_export(struct obd_export *exp);
71 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
72 struct obd_uuid *cluuid);
73 int target_handle_ping(struct ptlrpc_request *req);
74 void target_committed_to_req(struct ptlrpc_request *req);
76 #ifdef HAVE_QUOTA_SUPPORT
77 /* quotacheck callback, dqacq/dqrel callback handler */
78 int target_handle_qc_callback(struct ptlrpc_request *req);
79 int target_handle_dqacq_callback(struct ptlrpc_request *req);
81 #define target_handle_dqacq_callback(req) ldlm_callback_reply(req, -ENOTSUPP)
82 #define target_handle_qc_callback(req) (0)
85 void target_cancel_recovery_timer(struct obd_device *obd);
87 #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 * HZ / 2) /* *waves hands* */
88 void target_start_recovery_timer(struct obd_device *obd, svc_handler_t handler);
89 void target_abort_recovery(void *data);
90 void target_cleanup_recovery(struct obd_device *obd);
91 int target_queue_recovery_request(struct ptlrpc_request *req,
92 struct obd_device *obd);
93 int target_queue_final_reply(struct ptlrpc_request *req, int rc);
94 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
98 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
99 struct client_obd *client_conn2cli(struct lustre_handle *conn);
101 struct mdc_open_data;
102 struct obd_client_handle {
103 struct lustre_handle och_fh;
104 struct llog_cookie och_cookie;
105 struct mdc_open_data *och_mod;
108 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
113 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
114 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
119 struct task_struct *l_owner;
120 struct semaphore l_sem;
124 void l_lock_init(struct lustre_lock *);
125 void l_lock(struct lustre_lock *);
126 void l_unlock(struct lustre_lock *);
127 int l_has_lock(struct lustre_lock *);
133 #define OBD_IOCTL_VERSION 0x00010004
135 struct obd_ioctl_data {
137 uint32_t ioc_version;
143 struct obdo ioc_obdo1;
144 struct obdo ioc_obdo2;
149 uint32_t ioc_command;
155 /* buffers the kernel will treat as user pointers */
161 /* inline buffers for various arguments */
162 uint32_t ioc_inllen1;
164 uint32_t ioc_inllen2;
166 uint32_t ioc_inllen3;
168 uint32_t ioc_inllen4;
174 struct obd_ioctl_hdr {
176 uint32_t ioc_version;
179 static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
181 int len = size_round(sizeof(struct obd_ioctl_data));
182 len += size_round(data->ioc_inllen1);
183 len += size_round(data->ioc_inllen2);
184 len += size_round(data->ioc_inllen3);
185 len += size_round(data->ioc_inllen4);
190 static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
192 if (data->ioc_len > (1<<30)) {
193 CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
196 if (data->ioc_inllen1 > (1<<30)) {
197 CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
200 if (data->ioc_inllen2 > (1<<30)) {
201 CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
204 if (data->ioc_inllen3 > (1<<30)) {
205 CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
208 if (data->ioc_inllen4 > (1<<30)) {
209 CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
212 if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
213 CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
216 if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
217 CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
220 if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
221 CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
224 if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
225 CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
228 if (data->ioc_pbuf1 && !data->ioc_plen1) {
229 CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
232 if (data->ioc_pbuf2 && !data->ioc_plen2) {
233 CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
236 if (data->ioc_plen1 && !data->ioc_pbuf1) {
237 CERROR("OBD ioctl: plen1 set but NULL pointer\n");
240 if (data->ioc_plen2 && !data->ioc_pbuf2) {
241 CERROR("OBD ioctl: plen2 set but NULL pointer\n");
244 if (obd_ioctl_packlen(data) > data->ioc_len) {
245 CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
246 obd_ioctl_packlen(data), data->ioc_len);
253 static inline int obd_ioctl_pack(struct obd_ioctl_data *data, char **pbuf,
257 struct obd_ioctl_data *overlay;
258 data->ioc_len = obd_ioctl_packlen(data);
259 data->ioc_version = OBD_IOCTL_VERSION;
261 if (*pbuf && data->ioc_len > max)
264 *pbuf = malloc(data->ioc_len);
268 overlay = (struct obd_ioctl_data *)*pbuf;
269 memcpy(*pbuf, data, sizeof(*data));
271 ptr = overlay->ioc_bulk;
272 if (data->ioc_inlbuf1)
273 LOGL(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
274 if (data->ioc_inlbuf2)
275 LOGL(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
276 if (data->ioc_inlbuf3)
277 LOGL(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
278 if (data->ioc_inlbuf4)
279 LOGL(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
280 if (obd_ioctl_is_invalid(overlay))
286 static inline int obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf,
290 struct obd_ioctl_data *overlay;
294 overlay = (struct obd_ioctl_data *)pbuf;
296 /* Preserve the caller's buffer pointers */
297 overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
298 overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
299 overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
300 overlay->ioc_inlbuf4 = data->ioc_inlbuf4;
302 memcpy(data, pbuf, sizeof(*data));
304 ptr = overlay->ioc_bulk;
305 if (data->ioc_inlbuf1)
306 LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
307 if (data->ioc_inlbuf2)
308 LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
309 if (data->ioc_inlbuf3)
310 LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
311 if (data->ioc_inlbuf4)
312 LOGU(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
318 #include <linux/obd_support.h>
320 /* buffer MUST be at least the size of obd_ioctl_hdr */
321 static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
323 struct obd_ioctl_hdr hdr;
324 struct obd_ioctl_data *data;
329 err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
333 if (hdr.ioc_version != OBD_IOCTL_VERSION) {
334 CERROR("Version mismatch kernel vs application\n");
338 if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
339 CERROR("User buffer len %d exceeds %d max buffer\n",
340 hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
344 if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
345 CERROR("user buffer too small for ioctl (%d)\n", hdr.ioc_len);
349 /* XXX allocate this more intelligently, using kmalloc when
351 OBD_VMALLOC(*buf, hdr.ioc_len);
353 CERROR("Cannot allocate control buffer of len %d\n",
358 data = (struct obd_ioctl_data *)*buf;
360 err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
362 OBD_VFREE(*buf, hdr.ioc_len);
366 if (obd_ioctl_is_invalid(data)) {
367 CERROR("ioctl not correctly formatted\n");
368 OBD_VFREE(*buf, hdr.ioc_len);
372 if (data->ioc_inllen1) {
373 data->ioc_inlbuf1 = &data->ioc_bulk[0];
374 offset += size_round(data->ioc_inllen1);
377 if (data->ioc_inllen2) {
378 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
379 offset += size_round(data->ioc_inllen2);
382 if (data->ioc_inllen3) {
383 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
384 offset += size_round(data->ioc_inllen3);
387 if (data->ioc_inllen4) {
388 data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
394 static inline void obd_ioctl_freedata(char *buf, int len)
403 #define OBD_IOC_CREATE _IOR ('f', 101, long)
404 #define OBD_IOC_DESTROY _IOW ('f', 104, long)
405 #define OBD_IOC_PREALLOCATE _IOWR('f', 105, long)
407 #define OBD_IOC_SETATTR _IOW ('f', 107, long)
408 #define OBD_IOC_GETATTR _IOR ('f', 108, long)
409 #define OBD_IOC_READ _IOWR('f', 109, long)
410 #define OBD_IOC_WRITE _IOWR('f', 110, long)
413 #define OBD_IOC_STATFS _IOWR('f', 113, long)
414 #define OBD_IOC_SYNC _IOW ('f', 114, long)
415 #define OBD_IOC_READ2 _IOWR('f', 115, long)
416 #define OBD_IOC_FORMAT _IOWR('f', 116, long)
417 #define OBD_IOC_PARTITION _IOWR('f', 117, long)
418 #define OBD_IOC_COPY _IOWR('f', 120, long)
419 #define OBD_IOC_MIGR _IOWR('f', 121, long)
420 #define OBD_IOC_PUNCH _IOWR('f', 122, long)
422 #define OBD_IOC_MODULE_DEBUG _IOWR('f', 124, long)
423 #define OBD_IOC_BRW_READ _IOWR('f', 125, long)
424 #define OBD_IOC_BRW_WRITE _IOWR('f', 126, long)
425 #define OBD_IOC_NAME2DEV _IOWR('f', 127, long)
426 #define OBD_IOC_UUID2DEV _IOWR('f', 130, long)
427 #define OBD_IOC_GETNAME _IOR ('f', 131, long)
429 #define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, long)
430 #define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, long)
432 #define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139 )
433 #define OBD_IOC_NO_TRANSNO _IOW ('f', 140, long)
434 #define OBD_IOC_SET_READONLY _IOW ('f', 141, long)
435 #define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, long)
437 #define OBD_GET_VERSION _IOWR ('f', 144, long)
439 #define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, long)
441 #define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
442 #define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
443 #define OBD_IOC_LOV_SETEA _IOW ('f', 156, long)
445 #define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
446 #define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
447 #define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl *)
449 #define OBD_IOC_MOUNTOPT _IOWR('f', 170, long)
451 #define OBD_IOC_RECORD _IOWR('f', 180, long)
452 #define OBD_IOC_ENDRECORD _IOWR('f', 181, long)
453 #define OBD_IOC_PARSE _IOWR('f', 182, long)
454 #define OBD_IOC_DORECORD _IOWR('f', 183, long)
455 #define OBD_IOC_PROCESS_CFG _IOWR('f', 184, long)
456 #define OBD_IOC_DUMP_LOG _IOWR('f', 185, long)
457 #define OBD_IOC_CLEAR_LOG _IOWR('f', 186, long)
458 #define OBD_IOC_PARAM _IOW ('f', 187, long)
460 #define OBD_IOC_CATLOGLIST _IOWR('f', 190, long)
461 #define OBD_IOC_LLOG_INFO _IOWR('f', 191, long)
462 #define OBD_IOC_LLOG_PRINT _IOWR('f', 192, long)
463 #define OBD_IOC_LLOG_CANCEL _IOWR('f', 193, long)
464 #define OBD_IOC_LLOG_REMOVE _IOWR('f', 194, long)
465 #define OBD_IOC_LLOG_CHECK _IOWR('f', 195, long)
466 #define OBD_IOC_LLOG_CATINFO _IOWR('f', 196, long)
468 #define ECHO_IOC_GET_STRIPE _IOWR('f', 200, long)
469 #define ECHO_IOC_SET_STRIPE _IOWR('f', 201, long)
470 #define ECHO_IOC_ENQUEUE _IOWR('f', 202, long)
471 #define ECHO_IOC_CANCEL _IOWR('f', 203, long)
473 /* XXX _IOWR('f', 250, long) has been defined in
474 * lnet/include/libcfs/kp30.h for debug, don't use it
477 /* Until such time as we get_info the per-stripe maximum from the OST,
478 * we define this to be 2T - 4k, which is the ext3 maxbytes. */
479 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
481 #define POISON_BULK 0
484 * l_wait_event is a flexible sleeping function, permitting simple caller
485 * configuration of interrupt and timeout sensitivity along with actions to
486 * be performed in the event of either exception.
488 * The first form of usage looks like this:
490 * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
491 * intr_handler, callback_data);
492 * rc = l_wait_event(waitq, condition, &lwi);
494 * l_wait_event() makes the current process wait on 'waitq' until 'condition'
495 * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending. It
496 * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
497 * 'condition' becomes true, it optionally calls the specified 'intr_handler'
498 * if not NULL, and returns -EINTR.
500 * If a non-zero timeout is specified, signals are ignored until the timeout
501 * has expired. At this time, if 'timeout_handler' is not NULL it is called.
502 * If it returns FALSE l_wait_event() continues to wait as described above with
503 * signals enabled. Otherwise it returns -ETIMEDOUT.
505 * LWI_INTR(intr_handler, callback_data) is shorthand for
506 * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
508 * The second form of usage looks like this:
510 * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
511 * rc = l_wait_event(waitq, condition, &lwi);
513 * This form is the same as the first except that it COMPLETELY IGNORES
514 * SIGNALS. The caller must therefore beware that if 'timeout' is zero, or if
515 * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
516 * can unblock the current process is 'condition' becoming TRUE.
518 * Another form of usage is:
519 * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
521 * rc = l_wait_event(waitq, condition, &lwi);
522 * This is the same as previous case, but condition is checked once every
523 * 'interval' jiffies (if non-zero).
525 * Subtle synchronization point: this macro does *not* necessary takes
526 * wait-queue spin-lock before returning, and, hence, following idiom is safe
527 * ONLY when caller provides some external locking:
531 * l_wait_event(&obj->wq, ....); (1)
533 * wake_up(&obj->wq): (2)
534 * spin_lock(&q->lock); (2.1)
535 * __wake_up_common(q, ...); (2.2)
536 * spin_unlock(&q->lock, flags); (2.3)
538 * OBD_FREE_PTR(obj); (3)
540 * As l_wait_event() may "short-cut" execution and return without taking
541 * wait-queue spin-lock, some additional synchronization is necessary to
542 * guarantee that step (3) can begin only after (2.3) finishes.
544 * XXX nikita: some ptlrpc daemon threads have races of that sort.
548 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
553 int (*lwi_on_timeout)(void *);
554 void (*lwi_on_signal)(void *);
558 /* NB: LWI_TIMEOUT ignores signals completely */
559 #define LWI_TIMEOUT(time, cb, data) \
560 ((struct l_wait_info) { \
561 .lwi_timeout = time, \
562 .lwi_on_timeout = cb, \
563 .lwi_cb_data = data, \
567 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data) \
568 ((struct l_wait_info) { \
569 .lwi_timeout = time, \
570 .lwi_on_timeout = cb, \
571 .lwi_cb_data = data, \
572 .lwi_interval = interval \
576 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data) \
577 ((struct l_wait_info) { \
578 .lwi_timeout = time, \
579 .lwi_on_timeout = time_cb, \
580 .lwi_on_signal = (sig_cb == NULL) ? LWI_ON_SIGNAL_NOOP : sig_cb, \
581 .lwi_cb_data = data, \
585 #define LWI_INTR(cb, data) LWI_TIMEOUT_INTR(0, NULL, cb, data)
587 #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | \
588 sigmask(SIGTERM) | sigmask(SIGQUIT) | \
592 static inline sigset_t l_w_e_set_sigs(int sigs)
595 unsigned long irqflags;
597 SIGNAL_MASK_LOCK(current, irqflags);
598 old = current->blocked;
599 siginitsetinv(¤t->blocked, sigs);
601 SIGNAL_MASK_UNLOCK(current, irqflags);
607 * wait for @condition to become true, but no longer than timeout, specified
610 #define __l_wait_event(wq, condition, info, ret, excl) \
612 wait_queue_t __wait; \
613 unsigned long __timeout = info->lwi_timeout; \
614 unsigned long __irqflags; \
615 sigset_t __blocked; \
621 init_waitqueue_entry(&__wait, current); \
623 add_wait_queue_exclusive(&wq, &__wait); \
625 add_wait_queue(&wq, &__wait); \
627 /* Block all signals (just the non-fatal ones if no timeout). */ \
628 if (info->lwi_on_signal != NULL && __timeout == 0) \
629 __blocked = l_w_e_set_sigs(LUSTRE_FATAL_SIGS); \
631 __blocked = l_w_e_set_sigs(0); \
634 set_current_state(TASK_INTERRUPTIBLE); \
639 if (__timeout == 0) { \
642 unsigned long interval = info->lwi_interval? \
643 min_t(unsigned long, \
644 info->lwi_interval,__timeout):\
646 __timeout -= interval - schedule_timeout(interval); \
647 if (__timeout == 0) { \
648 if (info->lwi_on_timeout == NULL || \
649 info->lwi_on_timeout(info->lwi_cb_data)) { \
653 /* Take signals after the timeout expires. */ \
654 if (info->lwi_on_signal != NULL) \
655 (void)l_w_e_set_sigs(LUSTRE_FATAL_SIGS); \
662 if (signal_pending(current)) { \
663 if (info->lwi_on_signal != NULL && __timeout == 0) { \
664 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
665 info->lwi_on_signal(info->lwi_cb_data);\
669 /* We have to do this here because some signals */ \
670 /* are not blockable - ie from strace(1). */ \
671 /* In these cases we want to schedule_timeout() */ \
672 /* again, because we don't want that to return */ \
673 /* -EINTR when the RPC actually succeeded. */ \
674 /* the RECALC_SIGPENDING below will deliver the */ \
675 /* signal properly. */ \
676 SIGNAL_MASK_LOCK(current, __irqflags); \
678 SIGNAL_MASK_UNLOCK(current, __irqflags); \
682 SIGNAL_MASK_LOCK(current, __irqflags); \
683 current->blocked = __blocked; \
685 SIGNAL_MASK_UNLOCK(current, __irqflags); \
687 current->state = TASK_RUNNING; \
688 remove_wait_queue(&wq, &__wait); \
691 #else /* !__KERNEL__ */
692 #define __l_wait_event(wq, condition, info, ret, excl) \
694 long __timeout = info->lwi_timeout; \
697 int __timed_out = 0; \
703 if (__timeout == 0) \
704 __timeout = 1000000000; \
706 __then = time(NULL); \
708 while (!(condition)) { \
709 if (liblustre_wait_event(info->lwi_interval?:__timeout) || \
710 (info->lwi_interval && info->lwi_interval < __timeout)) {\
711 if (__timeout != 0 && info->lwi_timeout != 0) { \
712 __now = time(NULL); \
713 __timeout -= __now - __then; \
721 if (info->lwi_timeout != 0 && !__timed_out) { \
723 if (info->lwi_on_timeout == NULL || \
724 info->lwi_on_timeout(info->lwi_cb_data)) { \
732 #endif /* __KERNEL__ */
734 #define l_wait_event(wq, condition, info) \
737 struct l_wait_info *__info = (info); \
739 __l_wait_event(wq, condition, __info, __ret, 0); \
743 #define l_wait_event_exclusive(wq, condition, info) \
746 struct l_wait_info *__info = (info); \
748 __l_wait_event(wq, condition, __info, __ret, 1); \
753 /* initialize ost_lvb according to inode */
754 static inline void inode_init_lvb(struct inode *inode, struct ost_lvb *lvb)
756 lvb->lvb_size = inode->i_size;
757 lvb->lvb_blocks = inode->i_blocks;
758 lvb->lvb_mtime = LTIME_S(inode->i_mtime);
759 lvb->lvb_atime = LTIME_S(inode->i_atime);
760 lvb->lvb_ctime = LTIME_S(inode->i_ctime);
763 /* defined in liblustre/llite_lib.h */
767 #define LIBLUSTRE_CLIENT (0)
769 #define LIBLUSTRE_CLIENT (1)
772 #endif /* _LUSTRE_LIB_H */