Whamcloud - gitweb
- many changes about fids:
[fs/lustre-release.git] / lustre / include / linux / lustre_lib.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2001 Cluster File Systems, Inc. <braam@clusterfs.com>
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  * Basic Lustre library routines.
22  *
23  */
24
25 #ifndef _LUSTRE_LIB_H
26 #define _LUSTRE_LIB_H
27
28 #ifndef __KERNEL__
29 # include <string.h>
30 # include <sys/types.h>
31 #else
32 # include <asm/semaphore.h>
33 # include <linux/rwsem.h>
34 # include <linux/sched.h>
35 # include <linux/signal.h>
36 # include <linux/types.h>
37 #endif
38 #include <libcfs/kp30.h>
39 #include <linux/lustre_idl.h>
40 #include <linux/lustre_cfg.h>
41
42 #ifndef LP_POISON
43 #if BITS_PER_LONG > 32
44 # define LI_POISON ((int)0x5a5a5a5a5a5a5a5a)
45 # define LL_POISON ((long)0x5a5a5a5a5a5a5a5a)
46 # define LP_POISON ((void *)(long)0x5a5a5a5a5a5a5a5a)
47 #else
48 # define LI_POISON ((int)0x5a5a5a5a)
49 # define LL_POISON ((long)0x5a5a5a5a)
50 # define LP_POISON ((void *)(long)0x5a5a5a5a)
51 #endif
52 #endif
53
54 /* prng.c */
55 unsigned int ll_rand(void);        /* returns a random 32-bit integer */
56 void ll_srand(unsigned int, unsigned int);     /* seed the generator */
57
58 /* target.c */
59 struct ptlrpc_request;
60 struct recovd_data;
61 struct recovd_obd;
62 struct obd_export;
63 #include <linux/lustre_ha.h>
64 #include <linux/lustre_net.h>
65 #include <linux/lustre_compat25.h>
66 #include <linux/lvfs.h>
67
68 int target_handle_connect(struct ptlrpc_request *req, svc_handler_t handler);
69 int target_handle_disconnect(struct ptlrpc_request *req);
70 void target_destroy_export(struct obd_export *exp);
71 int target_handle_reconnect(struct lustre_handle *conn, struct obd_export *exp,
72                             struct obd_uuid *cluuid);
73 int target_handle_ping(struct ptlrpc_request *req);
74 void target_committed_to_req(struct ptlrpc_request *req);
75
76 #ifdef HAVE_QUOTA_SUPPORT
77 /* quotacheck callback, dqacq/dqrel callback handler */
78 int target_handle_qc_callback(struct ptlrpc_request *req);
79 int target_handle_dqacq_callback(struct ptlrpc_request *req);
80 #else
81 #define target_handle_dqacq_callback(req) ldlm_callback_reply(req, -ENOTSUPP)
82 #define target_handle_qc_callback(req) (0)
83 #endif
84
85 void target_cancel_recovery_timer(struct obd_device *obd);
86
87 #define OBD_RECOVERY_TIMEOUT (obd_timeout * 5 * HZ / 2) /* *waves hands* */
88 void target_start_recovery_timer(struct obd_device *obd, svc_handler_t handler);
89 void target_abort_recovery(void *data);
90 void target_cleanup_recovery(struct obd_device *obd);
91 int target_queue_recovery_request(struct ptlrpc_request *req,
92                                   struct obd_device *obd);
93 int target_queue_final_reply(struct ptlrpc_request *req, int rc);
94 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
95
96 /* client.c */
97
98 int client_sanobd_setup(struct obd_device *obddev, struct lustre_cfg* lcfg);
99 struct client_obd *client_conn2cli(struct lustre_handle *conn);
100
101 struct mdc_open_data;
102 struct obd_client_handle {
103         struct lustre_handle och_fh;
104         struct llog_cookie och_cookie;
105         struct mdc_open_data *och_mod;
106         __u32 och_magic;
107 };
108 #define OBD_CLIENT_HANDLE_MAGIC 0xd15ea5ed
109
110 /* statfs_pack.c */
111 void statfs_pack(struct obd_statfs *osfs, struct kstatfs *sfs);
112 void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
113
114 /* l_lock.c */
115 struct lustre_lock {
116         int l_depth;
117         struct task_struct *l_owner;
118         struct semaphore l_sem;
119         spinlock_t l_spin;
120 };
121
122 void l_lock_init(struct lustre_lock *);
123 void l_lock(struct lustre_lock *);
124 void l_unlock(struct lustre_lock *);
125 int l_has_lock(struct lustre_lock *);
126
127
128 /*
129  *   OBD IOCTLS
130  */
131 #define OBD_IOCTL_VERSION 0x00010004
132
133 struct obd_ioctl_data {
134         uint32_t ioc_len;
135         uint32_t ioc_version;
136
137         uint64_t ioc_cookie;
138         uint32_t ioc_conn1;
139         uint32_t ioc_conn2;
140
141         struct obdo ioc_obdo1;
142         struct obdo ioc_obdo2;
143
144         obd_size         ioc_count;
145         obd_off          ioc_offset;
146         uint32_t         ioc_dev;
147         uint32_t         ioc_command;
148
149         uint64_t ioc_nid;
150         uint32_t ioc_nal;
151         uint32_t ioc_type;
152
153         /* buffers the kernel will treat as user pointers */
154         uint32_t ioc_plen1;
155         char    *ioc_pbuf1;
156         uint32_t ioc_plen2;
157         char    *ioc_pbuf2;
158
159         /* inline buffers for various arguments */
160         uint32_t ioc_inllen1;
161         char    *ioc_inlbuf1;
162         uint32_t ioc_inllen2;
163         char    *ioc_inlbuf2;
164         uint32_t ioc_inllen3;
165         char    *ioc_inlbuf3;
166         uint32_t ioc_inllen4;
167         char    *ioc_inlbuf4;
168
169         char    ioc_bulk[0];
170 };
171
172 struct obd_ioctl_hdr {
173         uint32_t ioc_len;
174         uint32_t ioc_version;
175 };
176
177 static inline int obd_ioctl_packlen(struct obd_ioctl_data *data)
178 {
179         int len = size_round(sizeof(struct obd_ioctl_data));
180         len += size_round(data->ioc_inllen1);
181         len += size_round(data->ioc_inllen2);
182         len += size_round(data->ioc_inllen3);
183         len += size_round(data->ioc_inllen4);
184         return len;
185 }
186
187
188 static inline int obd_ioctl_is_invalid(struct obd_ioctl_data *data)
189 {
190         if (data->ioc_len > (1<<30)) {
191                 CERROR("OBD ioctl: ioc_len larger than 1<<30\n");
192                 return 1;
193         }
194         if (data->ioc_inllen1 > (1<<30)) {
195                 CERROR("OBD ioctl: ioc_inllen1 larger than 1<<30\n");
196                 return 1;
197         }
198         if (data->ioc_inllen2 > (1<<30)) {
199                 CERROR("OBD ioctl: ioc_inllen2 larger than 1<<30\n");
200                 return 1;
201         }
202         if (data->ioc_inllen3 > (1<<30)) {
203                 CERROR("OBD ioctl: ioc_inllen3 larger than 1<<30\n");
204                 return 1;
205         }
206         if (data->ioc_inllen4 > (1<<30)) {
207                 CERROR("OBD ioctl: ioc_inllen4 larger than 1<<30\n");
208                 return 1;
209         }
210         if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
211                 CERROR("OBD ioctl: inlbuf1 pointer but 0 length\n");
212                 return 1;
213         }
214         if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
215                 CERROR("OBD ioctl: inlbuf2 pointer but 0 length\n");
216                 return 1;
217         }
218         if (data->ioc_inlbuf3 && !data->ioc_inllen3) {
219                 CERROR("OBD ioctl: inlbuf3 pointer but 0 length\n");
220                 return 1;
221         }
222         if (data->ioc_inlbuf4 && !data->ioc_inllen4) {
223                 CERROR("OBD ioctl: inlbuf4 pointer but 0 length\n");
224                 return 1;
225         }
226         if (data->ioc_pbuf1 && !data->ioc_plen1) {
227                 CERROR("OBD ioctl: pbuf1 pointer but 0 length\n");
228                 return 1;
229         }
230         if (data->ioc_pbuf2 && !data->ioc_plen2) {
231                 CERROR("OBD ioctl: pbuf2 pointer but 0 length\n");
232                 return 1;
233         }
234         if (data->ioc_plen1 && !data->ioc_pbuf1) {
235                 CERROR("OBD ioctl: plen1 set but NULL pointer\n");
236                 return 1;
237         }
238         if (data->ioc_plen2 && !data->ioc_pbuf2) {
239                 CERROR("OBD ioctl: plen2 set but NULL pointer\n");
240                 return 1;
241         }
242         if (obd_ioctl_packlen(data) > data->ioc_len) {
243                 CERROR("OBD ioctl: packlen exceeds ioc_len (%d > %d)\n",
244                        obd_ioctl_packlen(data), data->ioc_len);
245                 return 1;
246         }
247         return 0;
248 }
249
250 #ifndef __KERNEL__
251 static inline int obd_ioctl_pack(struct obd_ioctl_data *data, char **pbuf,
252                                  int max)
253 {
254         char *ptr;
255         struct obd_ioctl_data *overlay;
256         data->ioc_len = obd_ioctl_packlen(data);
257         data->ioc_version = OBD_IOCTL_VERSION;
258
259         if (*pbuf && data->ioc_len > max)
260                 return 1;
261         if (*pbuf == NULL) {
262                 *pbuf = malloc(data->ioc_len);
263         }
264         if (!*pbuf)
265                 return 1;
266         overlay = (struct obd_ioctl_data *)*pbuf;
267         memcpy(*pbuf, data, sizeof(*data));
268
269         ptr = overlay->ioc_bulk;
270         if (data->ioc_inlbuf1)
271                 LOGL(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
272         if (data->ioc_inlbuf2)
273                 LOGL(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
274         if (data->ioc_inlbuf3)
275                 LOGL(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
276         if (data->ioc_inlbuf4)
277                 LOGL(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
278         if (obd_ioctl_is_invalid(overlay))
279                 return 1;
280
281         return 0;
282 }
283
284 static inline int obd_ioctl_unpack(struct obd_ioctl_data *data, char *pbuf,
285                                    int max)
286 {
287         char *ptr;
288         struct obd_ioctl_data *overlay;
289
290         if (!pbuf)
291                 return 1;
292         overlay = (struct obd_ioctl_data *)pbuf;
293
294         /* Preserve the caller's buffer pointers */
295         overlay->ioc_inlbuf1 = data->ioc_inlbuf1;
296         overlay->ioc_inlbuf2 = data->ioc_inlbuf2;
297         overlay->ioc_inlbuf3 = data->ioc_inlbuf3;
298         overlay->ioc_inlbuf4 = data->ioc_inlbuf4;
299
300         memcpy(data, pbuf, sizeof(*data));
301
302         ptr = overlay->ioc_bulk;
303         if (data->ioc_inlbuf1)
304                 LOGU(data->ioc_inlbuf1, data->ioc_inllen1, ptr);
305         if (data->ioc_inlbuf2)
306                 LOGU(data->ioc_inlbuf2, data->ioc_inllen2, ptr);
307         if (data->ioc_inlbuf3)
308                 LOGU(data->ioc_inlbuf3, data->ioc_inllen3, ptr);
309         if (data->ioc_inlbuf4)
310                 LOGU(data->ioc_inlbuf4, data->ioc_inllen4, ptr);
311
312         return 0;
313 }
314 #endif
315
316 #include <linux/obd_support.h>
317
318 /* buffer MUST be at least the size of obd_ioctl_hdr */
319 static inline int obd_ioctl_getdata(char **buf, int *len, void *arg)
320 {
321         struct obd_ioctl_hdr hdr;
322         struct obd_ioctl_data *data;
323         int err;
324         int offset = 0;
325         ENTRY;
326
327         err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
328         if (err)
329                 RETURN(err);
330
331         if (hdr.ioc_version != OBD_IOCTL_VERSION) {
332                 CERROR("Version mismatch kernel vs application\n");
333                 RETURN(-EINVAL);
334         }
335
336         if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
337                 CERROR("User buffer len %d exceeds %d max buffer\n",
338                        hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
339                 RETURN(-EINVAL);
340         }
341
342         if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
343                 CERROR("user buffer too small for ioctl (%d)\n", hdr.ioc_len);
344                 RETURN(-EINVAL);
345         }
346
347         /* XXX allocate this more intelligently, using kmalloc when
348          * appropriate */
349         OBD_VMALLOC(*buf, hdr.ioc_len);
350         if (*buf == NULL) {
351                 CERROR("Cannot allocate control buffer of len %d\n",
352                        hdr.ioc_len);
353                 RETURN(-EINVAL);
354         }
355         *len = hdr.ioc_len;
356         data = (struct obd_ioctl_data *)*buf;
357
358         err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
359         if (err) {
360                 OBD_VFREE(*buf, hdr.ioc_len);
361                 RETURN(err);
362         }
363
364         if (obd_ioctl_is_invalid(data)) {
365                 CERROR("ioctl not correctly formatted\n");
366                 OBD_VFREE(*buf, hdr.ioc_len);
367                 RETURN(-EINVAL);
368         }
369
370         if (data->ioc_inllen1) {
371                 data->ioc_inlbuf1 = &data->ioc_bulk[0];
372                 offset += size_round(data->ioc_inllen1);
373         }
374
375         if (data->ioc_inllen2) {
376                 data->ioc_inlbuf2 = &data->ioc_bulk[0] + offset;
377                 offset += size_round(data->ioc_inllen2);
378         }
379
380         if (data->ioc_inllen3) {
381                 data->ioc_inlbuf3 = &data->ioc_bulk[0] + offset;
382                 offset += size_round(data->ioc_inllen3);
383         }
384
385         if (data->ioc_inllen4) {
386                 data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
387         }
388
389         RETURN(0);
390 }
391
392 static inline void obd_ioctl_freedata(char *buf, int len)
393 {
394         ENTRY;
395
396         OBD_VFREE(buf, len);
397         EXIT;
398         return;
399 }
400
401 #define OBD_IOC_CREATE                 _IOR ('f', 101, long)
402 #define OBD_IOC_DESTROY                _IOW ('f', 104, long)
403 #define OBD_IOC_PREALLOCATE            _IOWR('f', 105, long)
404
405 #define OBD_IOC_SETATTR                _IOW ('f', 107, long)
406 #define OBD_IOC_GETATTR                _IOR ('f', 108, long)
407 #define OBD_IOC_READ                   _IOWR('f', 109, long)
408 #define OBD_IOC_WRITE                  _IOWR('f', 110, long)
409
410
411 #define OBD_IOC_STATFS                 _IOWR('f', 113, long)
412 #define OBD_IOC_SYNC                   _IOW ('f', 114, long)
413 #define OBD_IOC_READ2                  _IOWR('f', 115, long)
414 #define OBD_IOC_FORMAT                 _IOWR('f', 116, long)
415 #define OBD_IOC_PARTITION              _IOWR('f', 117, long)
416 #define OBD_IOC_COPY                   _IOWR('f', 120, long)
417 #define OBD_IOC_MIGR                   _IOWR('f', 121, long)
418 #define OBD_IOC_PUNCH                  _IOWR('f', 122, long)
419
420 #define OBD_IOC_MODULE_DEBUG           _IOWR('f', 124, long)
421 #define OBD_IOC_BRW_READ               _IOWR('f', 125, long)
422 #define OBD_IOC_BRW_WRITE              _IOWR('f', 126, long)
423 #define OBD_IOC_NAME2DEV               _IOWR('f', 127, long)
424 #define OBD_IOC_UUID2DEV               _IOWR('f', 130, long)
425 #define OBD_IOC_GETNAME                _IOR ('f', 131, long)
426
427 #define OBD_IOC_LOV_GET_CONFIG         _IOWR('f', 132, long)
428 #define OBD_IOC_CLIENT_RECOVER         _IOW ('f', 133, long)
429
430 #define OBD_IOC_DEC_FS_USE_COUNT       _IO  ('f', 139      )
431 #define OBD_IOC_NO_TRANSNO             _IOW ('f', 140, long)
432 #define OBD_IOC_SET_READONLY           _IOW ('f', 141, long)
433 #define OBD_IOC_ABORT_RECOVERY         _IOR ('f', 142, long)
434
435 #define OBD_GET_VERSION                _IOWR ('f', 144, long)
436
437 #define OBD_IOC_CLOSE_UUID             _IOWR ('f', 147, long)
438
439 #define OBD_IOC_LOV_SETSTRIPE          _IOW ('f', 154, long)
440 #define OBD_IOC_LOV_GETSTRIPE          _IOW ('f', 155, long)
441 #define OBD_IOC_LOV_SETEA              _IOW ('f', 156, long)
442
443 #define OBD_IOC_QUOTACHECK             _IOW ('f', 160, int)
444 #define OBD_IOC_POLL_QUOTACHECK        _IOR ('f', 161, struct if_quotacheck *)
445 #define OBD_IOC_QUOTACTL               _IOWR('f', 162, struct if_quotactl *)
446
447 #define OBD_IOC_MOUNTOPT               _IOWR('f', 170, long)
448
449 #define OBD_IOC_RECORD                 _IOWR('f', 180, long)
450 #define OBD_IOC_ENDRECORD              _IOWR('f', 181, long)
451 #define OBD_IOC_PARSE                  _IOWR('f', 182, long)
452 #define OBD_IOC_DORECORD               _IOWR('f', 183, long)
453 #define OBD_IOC_PROCESS_CFG            _IOWR('f', 184, long)
454 #define OBD_IOC_DUMP_LOG               _IOWR('f', 185, long)
455 #define OBD_IOC_CLEAR_LOG              _IOWR('f', 186, long)
456 #define OBD_IOC_PARAM                  _IOW ('f', 187, long)
457
458 #define OBD_IOC_CATLOGLIST             _IOWR('f', 190, long)
459 #define OBD_IOC_LLOG_INFO              _IOWR('f', 191, long)
460 #define OBD_IOC_LLOG_PRINT             _IOWR('f', 192, long)
461 #define OBD_IOC_LLOG_CANCEL            _IOWR('f', 193, long)
462 #define OBD_IOC_LLOG_REMOVE            _IOWR('f', 194, long)
463 #define OBD_IOC_LLOG_CHECK             _IOWR('f', 195, long)
464 #define OBD_IOC_LLOG_CATINFO           _IOWR('f', 196, long)
465
466 #define ECHO_IOC_GET_STRIPE            _IOWR('f', 200, long)
467 #define ECHO_IOC_SET_STRIPE            _IOWR('f', 201, long)
468 #define ECHO_IOC_ENQUEUE               _IOWR('f', 202, long)
469 #define ECHO_IOC_CANCEL                _IOWR('f', 203, long)
470
471 /* XXX _IOWR('f', 250, long) has been defined in
472  * lnet/include/libcfs/kp30.h for debug, don't use it
473  */
474
475 /* Until such time as we get_info the per-stripe maximum from the OST,
476  * we define this to be 2T - 4k, which is the ext3 maxbytes. */
477 #define LUSTRE_STRIPE_MAXBYTES 0x1fffffff000ULL
478
479 #define POISON_BULK 0
480
481 /*
482  * l_wait_event is a flexible sleeping function, permitting simple caller
483  * configuration of interrupt and timeout sensitivity along with actions to
484  * be performed in the event of either exception.
485  *
486  * The first form of usage looks like this:
487  *
488  * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
489  *                                           intr_handler, callback_data);
490  * rc = l_wait_event(waitq, condition, &lwi);
491  *
492  * l_wait_event() makes the current process wait on 'waitq' until 'condition'
493  * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending.  It
494  * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
495  * 'condition' becomes true, it optionally calls the specified 'intr_handler'
496  * if not NULL, and returns -EINTR.
497  *
498  * If a non-zero timeout is specified, signals are ignored until the timeout
499  * has expired.  At this time, if 'timeout_handler' is not NULL it is called.
500  * If it returns FALSE l_wait_event() continues to wait as described above with
501  * signals enabled.  Otherwise it returns -ETIMEDOUT.
502  *
503  * LWI_INTR(intr_handler, callback_data) is shorthand for
504  * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
505  *
506  * The second form of usage looks like this:
507  *
508  * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
509  * rc = l_wait_event(waitq, condition, &lwi);
510  *
511  * This form is the same as the first except that it COMPLETELY IGNORES
512  * SIGNALS.  The caller must therefore beware that if 'timeout' is zero, or if
513  * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
514  * can unblock the current process is 'condition' becoming TRUE.
515  *
516  * Another form of usage is:
517  * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
518  *                                               timeout_handler);
519  * rc = l_wait_event(waitq, condition, &lwi);
520  * This is the same as previous case, but condition is checked once every
521  * 'interval' jiffies (if non-zero).
522  *
523  * Subtle synchronization point: this macro does *not* necessary takes
524  * wait-queue spin-lock before returning, and, hence, following idiom is safe
525  * ONLY when caller provides some external locking:
526  *
527  *             Thread1                            Thread2
528  *
529  *   l_wait_event(&obj->wq, ....);                                       (1)
530  *
531  *                                    wake_up(&obj->wq):                 (2)
532  *                                         spin_lock(&q->lock);          (2.1)
533  *                                         __wake_up_common(q, ...);     (2.2)
534  *                                         spin_unlock(&q->lock, flags); (2.3)
535  *
536  *   OBD_FREE_PTR(obj);                                                  (3)
537  *
538  * As l_wait_event() may "short-cut" execution and return without taking
539  * wait-queue spin-lock, some additional synchronization is necessary to
540  * guarantee that step (3) can begin only after (2.3) finishes.
541  *
542  * XXX nikita: some ptlrpc daemon threads have races of that sort.
543  *
544  */
545
546 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
547
548 struct l_wait_info {
549         long   lwi_timeout;
550         long   lwi_interval;
551         int  (*lwi_on_timeout)(void *);
552         void (*lwi_on_signal)(void *);
553         void  *lwi_cb_data;
554 };
555
556 /* NB: LWI_TIMEOUT ignores signals completely */
557 #define LWI_TIMEOUT(time, cb, data)             \
558 ((struct l_wait_info) {                         \
559         .lwi_timeout    = time,                 \
560         .lwi_on_timeout = cb,                   \
561         .lwi_cb_data    = data,                 \
562         .lwi_interval   = 0                     \
563 })
564
565 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data)  \
566 ((struct l_wait_info) {                                 \
567         .lwi_timeout    = time,                         \
568         .lwi_on_timeout = cb,                           \
569         .lwi_cb_data    = data,                         \
570         .lwi_interval   = interval                      \
571 })
572
573
574 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data)                          \
575 ((struct l_wait_info) {                                                        \
576         .lwi_timeout    = time,                                                \
577         .lwi_on_timeout = time_cb,                                             \
578         .lwi_on_signal = (sig_cb == NULL) ? LWI_ON_SIGNAL_NOOP : sig_cb,       \
579         .lwi_cb_data    = data,                                                \
580         .lwi_interval    = 0                                                   \
581 })
582
583 #define LWI_INTR(cb, data)  LWI_TIMEOUT_INTR(0, NULL, cb, data)
584
585 #define LUSTRE_FATAL_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) |                \
586                            sigmask(SIGTERM) | sigmask(SIGQUIT) |               \
587                            sigmask(SIGALRM))
588
589 #ifdef __KERNEL__
590 static inline sigset_t l_w_e_set_sigs(int sigs)
591 {
592         sigset_t old;
593         unsigned long irqflags;
594
595         SIGNAL_MASK_LOCK(current, irqflags);
596         old = current->blocked;
597         siginitsetinv(&current->blocked, sigs);
598         RECALC_SIGPENDING;
599         SIGNAL_MASK_UNLOCK(current, irqflags);
600
601         return old;
602 }
603
604 /*
605  * wait for @condition to become true, but no longer than timeout, specified
606  * by @info.
607  */
608 #define __l_wait_event(wq, condition, info, ret, excl)                         \
609 do {                                                                           \
610         wait_queue_t  __wait;                                                  \
611         unsigned long __timeout = info->lwi_timeout;                           \
612         unsigned long __irqflags;                                              \
613         sigset_t      __blocked;                                               \
614                                                                                \
615         ret = 0;                                                               \
616         if (condition)                                                         \
617                 break;                                                         \
618                                                                                \
619         init_waitqueue_entry(&__wait, current);                                \
620         if (excl)                                                              \
621                 add_wait_queue_exclusive(&wq, &__wait);                        \
622         else                                                                   \
623                 add_wait_queue(&wq, &__wait);                                  \
624                                                                                \
625         /* Block all signals (just the non-fatal ones if no timeout). */       \
626         if (info->lwi_on_signal != NULL && __timeout == 0)                     \
627                 __blocked = l_w_e_set_sigs(LUSTRE_FATAL_SIGS);                 \
628         else                                                                   \
629                 __blocked = l_w_e_set_sigs(0);                                 \
630                                                                                \
631         for (;;) {                                                             \
632                 set_current_state(TASK_INTERRUPTIBLE);                         \
633                                                                                \
634                 if (condition)                                                 \
635                         break;                                                 \
636                                                                                \
637                 if (__timeout == 0) {                                          \
638                         schedule();                                            \
639                 } else {                                                       \
640                         unsigned long interval = info->lwi_interval?           \
641                                              min_t(unsigned long,              \
642                                                  info->lwi_interval,__timeout):\
643                                              __timeout;                        \
644                         __timeout -= interval - schedule_timeout(interval);    \
645                         if (__timeout == 0) {                                  \
646                                 if (info->lwi_on_timeout == NULL ||            \
647                                     info->lwi_on_timeout(info->lwi_cb_data)) { \
648                                         ret = -ETIMEDOUT;                      \
649                                         break;                                 \
650                                 }                                              \
651                                 /* Take signals after the timeout expires. */  \
652                                 if (info->lwi_on_signal != NULL)               \
653                                     (void)l_w_e_set_sigs(LUSTRE_FATAL_SIGS);   \
654                         }                                                      \
655                 }                                                              \
656                                                                                \
657                 if (condition)                                                 \
658                         break;                                                 \
659                                                                                \
660                 if (signal_pending(current)) {                                 \
661                         if (info->lwi_on_signal != NULL && __timeout == 0) {   \
662                                 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
663                                         info->lwi_on_signal(info->lwi_cb_data);\
664                                 ret = -EINTR;                                  \
665                                 break;                                         \
666                         }                                                      \
667                         /* We have to do this here because some signals */     \
668                         /* are not blockable - ie from strace(1).       */     \
669                         /* In these cases we want to schedule_timeout() */     \
670                         /* again, because we don't want that to return  */     \
671                         /* -EINTR when the RPC actually succeeded.      */     \
672                         /* the RECALC_SIGPENDING below will deliver the */     \
673                         /* signal properly.                             */     \
674                         SIGNAL_MASK_LOCK(current, __irqflags);                 \
675                         CLEAR_SIGPENDING;                                      \
676                         SIGNAL_MASK_UNLOCK(current, __irqflags);               \
677                 }                                                              \
678         }                                                                      \
679                                                                                \
680         SIGNAL_MASK_LOCK(current, __irqflags);                                 \
681         current->blocked = __blocked;                                          \
682         RECALC_SIGPENDING;                                                     \
683         SIGNAL_MASK_UNLOCK(current, __irqflags);                               \
684                                                                                \
685         current->state = TASK_RUNNING;                                         \
686         remove_wait_queue(&wq, &__wait);                                       \
687 } while(0)
688
689 #else /* !__KERNEL__ */
690 #define __l_wait_event(wq, condition, info, ret, excl)                  \
691 do {                                                                    \
692         long __timeout = info->lwi_timeout;                             \
693         long __now;                                                     \
694         long __then = 0;                                                \
695         int  __timed_out = 0;                                           \
696                                                                         \
697         ret = 0;                                                        \
698         if (condition)                                                  \
699                 break;                                                  \
700                                                                         \
701         if (__timeout == 0)                                             \
702                 __timeout = 1000000000;                                 \
703         else                                                            \
704                 __then = time(NULL);                                    \
705                                                                         \
706         while (!(condition)) {                                          \
707                 if (liblustre_wait_event(info->lwi_interval?:__timeout) || \
708                     (info->lwi_interval && info->lwi_interval < __timeout)) {\
709                         if (__timeout != 0 && info->lwi_timeout != 0) { \
710                                 __now = time(NULL);                     \
711                                 __timeout -= __now - __then;            \
712                                 if (__timeout < 0)                      \
713                                         __timeout = 0;                  \
714                                 __then = __now;                         \
715                         }                                               \
716                         continue;                                       \
717                 }                                                       \
718                                                                         \
719                 if (info->lwi_timeout != 0 && !__timed_out) {           \
720                         __timed_out = 1;                                \
721                         if (info->lwi_on_timeout == NULL ||             \
722                             info->lwi_on_timeout(info->lwi_cb_data)) {  \
723                                 ret = -ETIMEDOUT;                       \
724                                 break;                                  \
725                         }                                               \
726                 }                                                       \
727         }                                                               \
728 } while (0)
729
730 #endif /* __KERNEL__ */
731
732 #define l_wait_event(wq, condition, info)                       \
733 ({                                                              \
734         int                 __ret;                              \
735         struct l_wait_info *__info = (info);                    \
736                                                                 \
737         __l_wait_event(wq, condition, __info, __ret, 0);        \
738         __ret;                                                  \
739 })
740
741 #define l_wait_event_exclusive(wq, condition, info)             \
742 ({                                                              \
743         int                 __ret;                              \
744         struct l_wait_info *__info = (info);                    \
745                                                                 \
746         __l_wait_event(wq, condition, __info, __ret, 1);        \
747         __ret;                                                  \
748 })
749
750 #ifdef __KERNEL__
751 /* initialize ost_lvb according to inode */
752 static inline void inode_init_lvb(struct inode *inode, struct ost_lvb *lvb)
753 {
754         lvb->lvb_size = inode->i_size;
755         lvb->lvb_blocks = inode->i_blocks;
756         lvb->lvb_mtime = LTIME_S(inode->i_mtime);
757         lvb->lvb_atime = LTIME_S(inode->i_atime);
758         lvb->lvb_ctime = LTIME_S(inode->i_ctime);
759 }
760 #else
761 /* defined in liblustre/llite_lib.h */
762 #endif
763
764 #ifdef __KERNEL__
765 #define LIBLUSTRE_CLIENT (0)
766 #else
767 #define LIBLUSTRE_CLIENT (1)
768 #endif
769
770 #endif /* _LUSTRE_LIB_H */
771