Whamcloud - gitweb
libcfs: fixes to CDEBUG(), LASSERT() and friends to reduce stack consumption (Attempt 2.)
[fs/lustre-release.git] / lustre / include / linux / lustre_lib.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  * Basic Lustre library routines.
22  *
23  */
24
25 #ifndef _LUSTRE_LIB_H
26 #define _LUSTRE_LIB_H
27
28 #ifndef __KERNEL__
29 # include <string.h>
30 # include <sys/types.h>
31 #else
32 # include <asm/semaphore.h>
33 # include <linux/rwsem.h>
34 # include <linux/sched.h>
35 # include <linux/signal.h>
36 # include <linux/types.h>
37 #endif
38 #include <libcfs/kp30.h>
39 #include <linux/lustre_idl.h>
40 #include <linux/lustre_cfg.h>
41
42 #ifndef LP_POISON
43 #if BITS_PER_LONG > 32
44 # define LI_POISON ((int)0x5a5a5a5a5a5a5a5a)
45 # define LL_POISON ((long)0x5a5a5a5a5a5a5a5a)
46 # define LP_POISON ((void *)(long)0x5a5a5a5a5a5a5a5a)
47 #else
48 # define LI_POISON ((int)0x5a5a5a5a)
49 # define LL_POISON ((long)0x5a5a5a5a)
50 # define LP_POISON ((void *)(long)0x5a5a5a5a)
51 #endif
52 #endif
53
54 /* prng.c */
55 unsigned int ll_rand(void);        /* returns a random 32-bit integer */
56 void ll_srand(unsigned int, unsigned int);     /* seed the generator */
57
58 /* target.c */
59 struct ptlrpc_request;
60 struct recovd_data;
61 struct recovd_obd;
62 struct obd_export;
63 #include <linux/lustre_ha.h>
64 #include <linux/lustre_net.h>
65 #include <linux/lustre_compat25.h>
66 #include <linux/lvfs.h>
67
68 int target_handle_connect(struct ptlrpc_request *req, svc_handler_t handler);
69 int target_handle_disconnect(struct ptlrpc_request *req);
70 void target_destroy_export(struct obd_export *exp);
71 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
72                             struct obd_uuid *cluuid);
73 int target_handle_ping(struct ptlrpc_request *req);
74 void target_committed_to_req(struct ptlrpc_request *req);
75
76 #ifdef HAVE_QUOTA_SUPPORT
77 /* quotacheck callback, dqacq/dqrel callback handler */
78 int target_handle_qc_callback(struct ptlrpc_request *req);
79 int target_handle_dqacq_callback(struct ptlrpc_request *req);
80 #else
81 #define target_handle_dqacq_callback(req) ldlm_callback_reply(req, -ENOTSUPP)
82 #define target_handle_qc_callback(req) (0)
83 #endif
84
85 void target_cancel_recovery_timer(struct obd_device *obd);
86
87 #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 * HZ / 2) /* *waves hands* */
88 void target_start_recovery_timer(struct obd_device *obd, svc_handler_t handler);
89 void target_abort_recovery(void *data);
90 void target_cleanup_recovery(struct obd_device *obd);
91 int target_queue_recovery_request(struct ptlrpc_request *req,
92                                   struct obd_device *obd);
93 int target_queue_final_reply(struct ptlrpc_request *req, int rc);
94 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
95
96 /* client.c */
97
98 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
99 struct client_obd *client_conn2cli(struct lustre_handle *conn);
100
101 struct mdc_open_data;
102 struct obd_client_handle {
103         struct lustre_handle och_fh;
104         struct llog_cookie och_cookie;
105         struct mdc_open_data *och_mod;
106         __u32 och_magic;
107 };
108 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
109
110 /* statfs_pack.c */
111 struct obd_statfs;
112 struct kstatfs;
113 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
114 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
115
116 /* l_lock.c */
117 struct lustre_lock {
118         int l_depth;
119         struct task_struct *l_owner;
120         struct semaphore l_sem;
121         spinlock_t l_spin;
122 };
123
124 void l_lock_init(struct lustre_lock *);
125 void l_lock(struct lustre_lock *);
126 void l_unlock(struct lustre_lock *);
127 int l_has_lock(struct lustre_lock *);
128
129
130 /*
131  *   OBD IOCTLS
132  */
133 #define OBD_IOCTL_VERSION 0x00010004
134
135 struct obd_ioctl_data {
136         uint32_t ioc_len;
137         uint32_t ioc_version;
138
139         uint64_t ioc_cookie;
140         uint32_t ioc_conn1;
141         uint32_t ioc_conn2;
142
143         struct obdo ioc_obdo1;
144         struct obdo ioc_obdo2;
145
146         obd_size         ioc_count;
147         obd_off          ioc_offset;
148         uint32_t         ioc_dev;
149         uint32_t         ioc_command;
150
151         uint64_t ioc_nid;
152         uint32_t ioc_nal;
153         uint32_t ioc_type;
154
155         /* buffers the kernel will treat as user pointers */
156         uint32_t ioc_plen1;
157         char    *ioc_pbuf1;
158         uint32_t ioc_plen2;
159         char    *ioc_pbuf2;
160
161         /* inline buffers for various arguments */
162         uint32_t ioc_inllen1;
163         char    *ioc_inlbuf1;
164         uint32_t ioc_inllen2;
165         char    *ioc_inlbuf2;
166         uint32_t ioc_inllen3;
167         char    *ioc_inlbuf3;
168         uint32_t ioc_inllen4;
169         char    *ioc_inlbuf4;
170
171         char    ioc_bulk[0];
172 };
173
174 struct obd_ioctl_hdr {
175         uint32_t ioc_len;
176         uint32_t ioc_version;
177 };
178
179 static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
180 {
181         int len = size_round(sizeof(struct obd_ioctl_data));
182         len += size_round(data->ioc_inllen1);
183         len += size_round(data->ioc_inllen2);
184         len += size_round(data->ioc_inllen3);
185         len += size_round(data->ioc_inllen4);
186         return len;
187 }
188
189
190 static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
191 {
192         if (data->ioc_len > (1<<30)) {
193                 CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
194                 return 1;
195         }
196         if (data->ioc_inllen1 > (1<<30)) {
197                 CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
198                 return 1;
199         }
200         if (data->ioc_inllen2 > (1<<30)) {
201                 CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
202                 return 1;
203         }
204         if (data->ioc_inllen3 > (1<<30)) {
205                 CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
206                 return 1;
207         }
208         if (data->ioc_inllen4 > (1<<30)) {
209                 CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
210                 return 1;
211         }
212         if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
213                 CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
214                 return 1;
215         }
216         if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
217                 CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
218                 return 1;
219         }
220         if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
221                 CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
222                 return 1;
223         }
224         if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
225                 CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
226                 return 1;
227         }
228         if (data->ioc_pbuf1 && !data->ioc_plen1) {
229                 CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
230                 return 1;
231         }
232         if (data->ioc_pbuf2 && !data->ioc_plen2) {
233                 CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
234                 return 1;
235         }
236         if (data->ioc_plen1 && !data->ioc_pbuf1) {
237                 CERROR("OBD ioctl: plen1 set but NULL pointer\n");
238                 return 1;
239         }
240         if (data->ioc_plen2 && !data->ioc_pbuf2) {
241                 CERROR("OBD ioctl: plen2 set but NULL pointer\n");
242                 return 1;
243         }
244         if (obd_ioctl_packlen(data) > data->ioc_len) {
245                 CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
246                        obd_ioctl_packlen(data), data->ioc_len);
247                 return 1;
248         }
249         return 0;
250 }
251
252 #ifndef __KERNEL__
253 static inline int obd_ioctl_pack(struct obd_ioctl_data *data, char **pbuf,
254                                  int max)
255 {
256         char *ptr;
257         struct obd_ioctl_data *overlay;
258         data->ioc_len = obd_ioctl_packlen(data);
259         data->ioc_version = OBD_IOCTL_VERSION;
260
261         if (*pbuf && data->ioc_len > max)
262                 return 1;
263         if (*pbuf == NULL) {
264                 *pbuf = malloc(data->ioc_len);
265         }
266         if (!*pbuf)
267                 return 1;
268         overlay = (struct obd_ioctl_data *)*pbuf;
269         memcpy(*pbuf, data, sizeof(*data));
270
271         ptr = overlay->ioc_bulk;
272         if (data->ioc_inlbuf1)
273                 LOGL(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
274         if (data->ioc_inlbuf2)
275                 LOGL(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
276         if (data->ioc_inlbuf3)
277                 LOGL(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
278         if (data->ioc_inlbuf4)
279                 LOGL(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
280         if (obd_ioctl_is_invalid(overlay))
281                 return 1;
282
283         return 0;
284 }
285
286 static inline int obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf,
287                                    int max)
288 {
289         char *ptr;
290         struct obd_ioctl_data *overlay;
291
292         if (!pbuf)
293                 return 1;
294         overlay = (struct obd_ioctl_data *)pbuf;
295
296         /* Preserve the caller's buffer pointers */
297         overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
298         overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
299         overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
300         overlay->ioc_inlbuf4 = data->ioc_inlbuf4;
301
302         memcpy(data, pbuf, sizeof(*data));
303
304         ptr = overlay->ioc_bulk;
305         if (data->ioc_inlbuf1)
306                 LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
307         if (data->ioc_inlbuf2)
308                 LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
309         if (data->ioc_inlbuf3)
310                 LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
311         if (data->ioc_inlbuf4)
312                 LOGU(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
313
314         return 0;
315 }
316 #endif
317
318 #include <linux/obd_support.h>
319
320 /* buffer MUST be at least the size of obd_ioctl_hdr */
321 static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
322 {
323         struct obd_ioctl_hdr hdr;
324         struct obd_ioctl_data *data;
325         int err;
326         int offset = 0;
327         ENTRY;
328
329         err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
330         if (err)
331                 RETURN(err);
332
333         if (hdr.ioc_version != OBD_IOCTL_VERSION) {
334                 CERROR("Version mismatch kernel vs application\n");
335                 RETURN(-EINVAL);
336         }
337
338         if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
339                 CERROR("User buffer len %d exceeds %d max buffer\n",
340                        hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
341                 RETURN(-EINVAL);
342         }
343
344         if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
345                 CERROR("user buffer too small for ioctl (%d)\n", hdr.ioc_len);
346                 RETURN(-EINVAL);
347         }
348
349         /* XXX allocate this more intelligently, using kmalloc when
350          * appropriate */
351         OBD_VMALLOC(*buf, hdr.ioc_len);
352         if (*buf == NULL) {
353                 CERROR("Cannot allocate control buffer of len %d\n",
354                        hdr.ioc_len);
355                 RETURN(-EINVAL);
356         }
357         *len = hdr.ioc_len;
358         data = (struct obd_ioctl_data *)*buf;
359
360         err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
361         if (err) {
362                 OBD_VFREE(*buf, hdr.ioc_len);
363                 RETURN(err);
364         }
365
366         if (obd_ioctl_is_invalid(data)) {
367                 CERROR("ioctl not correctly formatted\n");
368                 OBD_VFREE(*buf, hdr.ioc_len);
369                 RETURN(-EINVAL);
370         }
371
372         if (data->ioc_inllen1) {
373                 data->ioc_inlbuf1 = &data->ioc_bulk[0];
374                 offset += size_round(data->ioc_inllen1);
375         }
376
377         if (data->ioc_inllen2) {
378                 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
379                 offset += size_round(data->ioc_inllen2);
380         }
381
382         if (data->ioc_inllen3) {
383                 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
384                 offset += size_round(data->ioc_inllen3);
385         }
386
387         if (data->ioc_inllen4) {
388                 data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
389         }
390
391         RETURN(0);
392 }
393
394 static inline void obd_ioctl_freedata(char *buf, int len)
395 {
396         ENTRY;
397
398         OBD_VFREE(buf, len);
399         EXIT;
400         return;
401 }
402
403 #define OBD_IOC_CREATE                 _IOR ('f', 101, long)
404 #define OBD_IOC_DESTROY                _IOW ('f', 104, long)
405 #define OBD_IOC_PREALLOCATE            _IOWR('f', 105, long)
406
407 #define OBD_IOC_SETATTR                _IOW ('f', 107, long)
408 #define OBD_IOC_GETATTR                _IOR ('f', 108, long)
409 #define OBD_IOC_READ                   _IOWR('f', 109, long)
410 #define OBD_IOC_WRITE                  _IOWR('f', 110, long)
411
412
413 #define OBD_IOC_STATFS                 _IOWR('f', 113, long)
414 #define OBD_IOC_SYNC                   _IOW ('f', 114, long)
415 #define OBD_IOC_READ2                  _IOWR('f', 115, long)
416 #define OBD_IOC_FORMAT                 _IOWR('f', 116, long)
417 #define OBD_IOC_PARTITION              _IOWR('f', 117, long)
418 #define OBD_IOC_COPY                   _IOWR('f', 120, long)
419 #define OBD_IOC_MIGR                   _IOWR('f', 121, long)
420 #define OBD_IOC_PUNCH                  _IOWR('f', 122, long)
421
422 #define OBD_IOC_MODULE_DEBUG           _IOWR('f', 124, long)
423 #define OBD_IOC_BRW_READ               _IOWR('f', 125, long)
424 #define OBD_IOC_BRW_WRITE              _IOWR('f', 126, long)
425 #define OBD_IOC_NAME2DEV               _IOWR('f', 127, long)
426 #define OBD_IOC_UUID2DEV               _IOWR('f', 130, long)
427 #define OBD_IOC_GETNAME                _IOR ('f', 131, long)
428
429 #define OBD_IOC_LOV_GET_CONFIG         _IOWR('f', 132, long)
430 #define OBD_IOC_CLIENT_RECOVER         _IOW ('f', 133, long)
431
432 #define OBD_IOC_DEC_FS_USE_COUNT       _IO  ('f', 139      )
433 #define OBD_IOC_NO_TRANSNO             _IOW ('f', 140, long)
434 #define OBD_IOC_SET_READONLY           _IOW ('f', 141, long)
435 #define OBD_IOC_ABORT_RECOVERY         _IOR ('f', 142, long)
436
437 #define OBD_GET_VERSION                _IOWR ('f', 144, long)
438
439 #define OBD_IOC_CLOSE_UUID             _IOWR ('f', 147, long)
440
441 #define OBD_IOC_LOV_SETSTRIPE          _IOW ('f', 154, long)
442 #define OBD_IOC_LOV_GETSTRIPE          _IOW ('f', 155, long)
443 #define OBD_IOC_LOV_SETEA              _IOW ('f', 156, long)
444
445 #define OBD_IOC_QUOTACHECK             _IOW ('f', 160, int)
446 #define OBD_IOC_POLL_QUOTACHECK        _IOR ('f', 161, struct if_quotacheck *)
447 #define OBD_IOC_QUOTACTL               _IOWR('f', 162, struct if_quotactl *)
448
449 #define OBD_IOC_MOUNTOPT               _IOWR('f', 170, long)
450
451 #define OBD_IOC_RECORD                 _IOWR('f', 180, long)
452 #define OBD_IOC_ENDRECORD              _IOWR('f', 181, long)
453 #define OBD_IOC_PARSE                  _IOWR('f', 182, long)
454 #define OBD_IOC_DORECORD               _IOWR('f', 183, long)
455 #define OBD_IOC_PROCESS_CFG            _IOWR('f', 184, long)
456 #define OBD_IOC_DUMP_LOG               _IOWR('f', 185, long)
457 #define OBD_IOC_CLEAR_LOG              _IOWR('f', 186, long)
458 #define OBD_IOC_PARAM                  _IOW ('f', 187, long)
459
460 #define OBD_IOC_CATLOGLIST             _IOWR('f', 190, long)
461 #define OBD_IOC_LLOG_INFO              _IOWR('f', 191, long)
462 #define OBD_IOC_LLOG_PRINT             _IOWR('f', 192, long)
463 #define OBD_IOC_LLOG_CANCEL            _IOWR('f', 193, long)
464 #define OBD_IOC_LLOG_REMOVE            _IOWR('f', 194, long)
465 #define OBD_IOC_LLOG_CHECK             _IOWR('f', 195, long)
466 #define OBD_IOC_LLOG_CATINFO           _IOWR('f', 196, long)
467
468 #define ECHO_IOC_GET_STRIPE            _IOWR('f', 200, long)
469 #define ECHO_IOC_SET_STRIPE            _IOWR('f', 201, long)
470 #define ECHO_IOC_ENQUEUE               _IOWR('f', 202, long)
471 #define ECHO_IOC_CANCEL                _IOWR('f', 203, long)
472
473 /* XXX _IOWR('f', 250, long) has been defined in
474  * lnet/include/libcfs/kp30.h for debug, don't use it
475  */
476
477 /* Until such time as we get_info the per-stripe maximum from the OST,
478  * we define this to be 2T - 4k, which is the ext3 maxbytes. */
479 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
480
481 #define POISON_BULK 0
482
483 /*
484  * l_wait_event is a flexible sleeping function, permitting simple caller
485  * configuration of interrupt and timeout sensitivity along with actions to
486  * be performed in the event of either exception.
487  *
488  * The first form of usage looks like this:
489  *
490  * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
491  *                                           intr_handler, callback_data);
492  * rc = l_wait_event(waitq, condition, &lwi);
493  *
494  * l_wait_event() makes the current process wait on 'waitq' until 'condition'
495  * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending.  It
496  * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
497  * 'condition' becomes true, it optionally calls the specified 'intr_handler'
498  * if not NULL, and returns -EINTR.
499  *
500  * If a non-zero timeout is specified, signals are ignored until the timeout
501  * has expired.  At this time, if 'timeout_handler' is not NULL it is called.
502  * If it returns FALSE l_wait_event() continues to wait as described above with
503  * signals enabled.  Otherwise it returns -ETIMEDOUT.
504  *
505  * LWI_INTR(intr_handler, callback_data) is shorthand for
506  * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
507  *
508  * The second form of usage looks like this:
509  *
510  * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
511  * rc = l_wait_event(waitq, condition, &lwi);
512  *
513  * This form is the same as the first except that it COMPLETELY IGNORES
514  * SIGNALS.  The caller must therefore beware that if 'timeout' is zero, or if
515  * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
516  * can unblock the current process is 'condition' becoming TRUE.
517  *
518  * Another form of usage is:
519  * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
520  *                                               timeout_handler);
521  * rc = l_wait_event(waitq, condition, &lwi);
522  * This is the same as previous case, but condition is checked once every
523  * 'interval' jiffies (if non-zero).
524  *
525  * Subtle synchronization point: this macro does *not* necessary takes
526  * wait-queue spin-lock before returning, and, hence, following idiom is safe
527  * ONLY when caller provides some external locking:
528  *
529  *             Thread1                            Thread2
530  *
531  *   l_wait_event(&obj->wq, ....);                                       (1)
532  *
533  *                                    wake_up(&obj->wq):                 (2)
534  *                                         spin_lock(&q->lock);          (2.1)
535  *                                         __wake_up_common(q, ...);     (2.2)
536  *                                         spin_unlock(&q->lock, flags); (2.3)
537  *
538  *   OBD_FREE_PTR(obj);                                                  (3)
539  *
540  * As l_wait_event() may "short-cut" execution and return without taking
541  * wait-queue spin-lock, some additional synchronization is necessary to
542  * guarantee that step (3) can begin only after (2.3) finishes.
543  *
544  * XXX nikita: some ptlrpc daemon threads have races of that sort.
545  *
546  */
547
548 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
549
550 struct l_wait_info {
551         long   lwi_timeout;
552         long   lwi_interval;
553         int  (*lwi_on_timeout)(void *);
554         void (*lwi_on_signal)(void *);
555         void  *lwi_cb_data;
556 };
557
558 /* NB: LWI_TIMEOUT ignores signals completely */
559 #define LWI_TIMEOUT(time, cb, data)             \
560 ((struct l_wait_info) {                         \
561         .lwi_timeout    = time,                 \
562         .lwi_on_timeout = cb,                   \
563         .lwi_cb_data    = data,                 \
564         .lwi_interval   = 0                     \
565 })
566
567 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data)  \
568 ((struct l_wait_info) {                                 \
569         .lwi_timeout    = time,                         \
570         .lwi_on_timeout = cb,                           \
571         .lwi_cb_data    = data,                         \
572         .lwi_interval   = interval                      \
573 })
574
575
576 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data)                          \
577 ((struct l_wait_info) {                                                        \
578         .lwi_timeout    = time,                                                \
579         .lwi_on_timeout = time_cb,                                             \
580         .lwi_on_signal = (sig_cb == NULL) ? LWI_ON_SIGNAL_NOOP : sig_cb,       \
581         .lwi_cb_data    = data,                                                \
582         .lwi_interval    = 0                                                   \
583 })
584
585 #define LWI_INTR(cb, data)  LWI_TIMEOUT_INTR(0, NULL, cb, data)
586
587 #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) |                \
588                            sigmask(SIGTERM) | sigmask(SIGQUIT) |               \
589                            sigmask(SIGALRM))
590
591 #ifdef __KERNEL__
592 static inline sigset_t l_w_e_set_sigs(int sigs)
593 {
594         sigset_t old;
595         unsigned long irqflags;
596
597         SIGNAL_MASK_LOCK(current, irqflags);
598         old = current->blocked;
599         siginitsetinv(&current->blocked, sigs);
600         RECALC_SIGPENDING;
601         SIGNAL_MASK_UNLOCK(current, irqflags);
602
603         return old;
604 }
605
606 /*
607  * wait for @condition to become true, but no longer than timeout, specified
608  * by @info.
609  */
610 #define __l_wait_event(wq, condition, info, ret, excl)                         \
611 do {                                                                           \
612         wait_queue_t  __wait;                                                  \
613         unsigned long __timeout = info->lwi_timeout;                           \
614         unsigned long __irqflags;                                              \
615         sigset_t      __blocked;                                               \
616                                                                                \
617         ret = 0;                                                               \
618         if (condition)                                                         \
619                 break;                                                         \
620                                                                                \
621         init_waitqueue_entry(&__wait, current);                                \
622         if (excl)                                                              \
623                 add_wait_queue_exclusive(&wq, &__wait);                        \
624         else                                                                   \
625                 add_wait_queue(&wq, &__wait);                                  \
626                                                                                \
627         /* Block all signals (just the non-fatal ones if no timeout). */       \
628         if (info->lwi_on_signal != NULL && __timeout == 0)                     \
629                 __blocked = l_w_e_set_sigs(LUSTRE_FATAL_SIGS);                 \
630         else                                                                   \
631                 __blocked = l_w_e_set_sigs(0);                                 \
632                                                                                \
633         for (;;) {                                                             \
634                 set_current_state(TASK_INTERRUPTIBLE);                         \
635                                                                                \
636                 if (condition)                                                 \
637                         break;                                                 \
638                                                                                \
639                 if (__timeout == 0) {                                          \
640                         schedule();                                            \
641                 } else {                                                       \
642                         unsigned long interval = info->lwi_interval?           \
643                                              min_t(unsigned long,              \
644                                                  info->lwi_interval,__timeout):\
645                                              __timeout;                        \
646                         __timeout -= interval - schedule_timeout(interval);    \
647                         if (__timeout == 0) {                                  \
648                                 if (info->lwi_on_timeout == NULL ||            \
649                                     info->lwi_on_timeout(info->lwi_cb_data)) { \
650                                         ret = -ETIMEDOUT;                      \
651                                         break;                                 \
652                                 }                                              \
653                                 /* Take signals after the timeout expires. */  \
654                                 if (info->lwi_on_signal != NULL)               \
655                                     (void)l_w_e_set_sigs(LUSTRE_FATAL_SIGS);   \
656                         }                                                      \
657                 }                                                              \
658                                                                                \
659                 if (condition)                                                 \
660                         break;                                                 \
661                                                                                \
662                 if (signal_pending(current)) {                                 \
663                         if (info->lwi_on_signal != NULL && __timeout == 0) {   \
664                                 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
665                                         info->lwi_on_signal(info->lwi_cb_data);\
666                                 ret = -EINTR;                                  \
667                                 break;                                         \
668                         }                                                      \
669                         /* We have to do this here because some signals */     \
670                         /* are not blockable - ie from strace(1).       */     \
671                         /* In these cases we want to schedule_timeout() */     \
672                         /* again, because we don't want that to return  */     \
673                         /* -EINTR when the RPC actually succeeded.      */     \
674                         /* the RECALC_SIGPENDING below will deliver the */     \
675                         /* signal properly.                             */     \
676                         SIGNAL_MASK_LOCK(current, __irqflags);                 \
677                         CLEAR_SIGPENDING;                                      \
678                         SIGNAL_MASK_UNLOCK(current, __irqflags);               \
679                 }                                                              \
680         }                                                                      \
681                                                                                \
682         SIGNAL_MASK_LOCK(current, __irqflags);                                 \
683         current->blocked = __blocked;                                          \
684         RECALC_SIGPENDING;                                                     \
685         SIGNAL_MASK_UNLOCK(current, __irqflags);                               \
686                                                                                \
687         current->state = TASK_RUNNING;                                         \
688         remove_wait_queue(&wq, &__wait);                                       \
689 } while(0)
690
691 #else /* !__KERNEL__ */
692 #define __l_wait_event(wq, condition, info, ret, excl)                  \
693 do {                                                                    \
694         long __timeout = info->lwi_timeout;                             \
695         long __now;                                                     \
696         long __then = 0;                                                \
697         int  __timed_out = 0;                                           \
698                                                                         \
699         ret = 0;                                                        \
700         if (condition)                                                  \
701                 break;                                                  \
702                                                                         \
703         if (__timeout == 0)                                             \
704                 __timeout = 1000000000;                                 \
705         else                                                            \
706                 __then = time(NULL);                                    \
707                                                                         \
708         while (!(condition)) {                                          \
709                 if (liblustre_wait_event(info->lwi_interval?:__timeout) || \
710                     (info->lwi_interval && info->lwi_interval < __timeout)) {\
711                         if (__timeout != 0 && info->lwi_timeout != 0) { \
712                                 __now = time(NULL);                     \
713                                 __timeout -= __now - __then;            \
714                                 if (__timeout < 0)                      \
715                                         __timeout = 0;                  \
716                                 __then = __now;                         \
717                         }                                               \
718                         continue;                                       \
719                 }                                                       \
720                                                                         \
721                 if (info->lwi_timeout != 0 && !__timed_out) {           \
722                         __timed_out = 1;                                \
723                         if (info->lwi_on_timeout == NULL ||             \
724                             info->lwi_on_timeout(info->lwi_cb_data)) {  \
725                                 ret = -ETIMEDOUT;                       \
726                                 break;                                  \
727                         }                                               \
728                 }                                                       \
729         }                                                               \
730 } while (0)
731
732 #endif /* __KERNEL__ */
733
734 #define l_wait_event(wq, condition, info)                       \
735 ({                                                              \
736         int                 __ret;                              \
737         struct l_wait_info *__info = (info);                    \
738                                                                 \
739         __l_wait_event(wq, condition, __info, __ret, 0);        \
740         __ret;                                                  \
741 })
742
743 #define l_wait_event_exclusive(wq, condition, info)             \
744 ({                                                              \
745         int                 __ret;                              \
746         struct l_wait_info *__info = (info);                    \
747                                                                 \
748         __l_wait_event(wq, condition, __info, __ret, 1);        \
749         __ret;                                                  \
750 })
751
752 #ifdef __KERNEL__
753 /* initialize ost_lvb according to inode */
754 static inline void inode_init_lvb(struct inode *inode, struct ost_lvb *lvb)
755 {
756         lvb->lvb_size = inode->i_size;
757         lvb->lvb_blocks = inode->i_blocks;
758         lvb->lvb_mtime = LTIME_S(inode->i_mtime);
759         lvb->lvb_atime = LTIME_S(inode->i_atime);
760         lvb->lvb_ctime = LTIME_S(inode->i_ctime);
761 }
762 #else
763 /* defined in liblustre/llite_lib.h */
764 #endif
765
766 #ifdef __KERNEL__
767 #define LIBLUSTRE_CLIENT (0)
768 #else
769 #define LIBLUSTRE_CLIENT (1)
770 #endif
771
772 #endif /* _LUSTRE_LIB_H */
773