Whamcloud - gitweb
LU-7931 tests: setup/cleanup after every test script
[fs/lustre-release.git] / lustre / include / lustre_lib.h
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2014, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/include/lustre_lib.h
37  *
38  * Basic Lustre library routines.
39  */
40
41 #ifndef _LUSTRE_LIB_H
42 #define _LUSTRE_LIB_H
43
44 /** \defgroup lib lib
45  *
46  * @{
47  */
48
49 #include <linux/signal.h>
50 #include <libcfs/libcfs.h>
51 #include <lustre/lustre_idl.h>
52 #include <lustre_ver.h>
53 #include <lustre_cfg.h>
54
55 /* target.c */
56 struct ptlrpc_request;
57 struct obd_export;
58 struct lu_target;
59 struct l_wait_info;
60 #include <lustre_ha.h>
61 #include <lustre_net.h>
62
63 #ifdef HAVE_SERVER_SUPPORT
64 void target_client_add_cb(struct obd_device *obd, __u64 transno, void *cb_data,
65                           int error);
66 int rev_import_init(struct obd_export *exp);
67 int target_handle_connect(struct ptlrpc_request *req);
68 int target_handle_disconnect(struct ptlrpc_request *req);
69 void target_destroy_export(struct obd_export *exp);
70 int target_handle_ping(struct ptlrpc_request *req);
71 void target_committed_to_req(struct ptlrpc_request *req);
72 void target_cancel_recovery_timer(struct obd_device *obd);
73 void target_stop_recovery_thread(struct obd_device *obd);
74 void target_cleanup_recovery(struct obd_device *obd);
75 int target_queue_recovery_request(struct ptlrpc_request *req,
76                                   struct obd_device *obd);
77 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
78                    struct l_wait_info *lwi);
79 #endif
80
81 int target_pack_pool_reply(struct ptlrpc_request *req);
82 int do_set_info_async(struct obd_import *imp,
83                       int opcode, int version,
84                       size_t keylen, void *key,
85                       size_t vallen, void *val,
86                       struct ptlrpc_request_set *set);
87
88 void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id);
89
90 /*
91  * l_wait_event is a flexible sleeping function, permitting simple caller
92  * configuration of interrupt and timeout sensitivity along with actions to
93  * be performed in the event of either exception.
94  *
95  * The first form of usage looks like this:
96  *
97  * struct l_wait_info lwi = LWI_TIMEOUT_INTR(timeout, timeout_handler,
98  *                                           intr_handler, callback_data);
99  * rc = l_wait_event(waitq, condition, &lwi);
100  *
101  * l_wait_event() makes the current process wait on 'waitq' until 'condition'
102  * is TRUE or a "killable" signal (SIGTERM, SIKGILL, SIGINT) is pending.  It
103  * returns 0 to signify 'condition' is TRUE, but if a signal wakes it before
104  * 'condition' becomes true, it optionally calls the specified 'intr_handler'
105  * if not NULL, and returns -EINTR.
106  *
107  * If a non-zero timeout is specified, signals are ignored until the timeout
108  * has expired.  At this time, if 'timeout_handler' is not NULL it is called.
109  * If it returns FALSE l_wait_event() continues to wait as described above with
110  * signals enabled.  Otherwise it returns -ETIMEDOUT.
111  *
112  * LWI_INTR(intr_handler, callback_data) is shorthand for
113  * LWI_TIMEOUT_INTR(0, NULL, intr_handler, callback_data)
114  *
115  * The second form of usage looks like this:
116  *
117  * struct l_wait_info lwi = LWI_TIMEOUT(timeout, timeout_handler);
118  * rc = l_wait_event(waitq, condition, &lwi);
119  *
120  * This form is the same as the first except that it COMPLETELY IGNORES
121  * SIGNALS.  The caller must therefore beware that if 'timeout' is zero, or if
122  * 'timeout_handler' is not NULL and returns FALSE, then the ONLY thing that
123  * can unblock the current process is 'condition' becoming TRUE.
124  *
125  * Another form of usage is:
126  * struct l_wait_info lwi = LWI_TIMEOUT_INTERVAL(timeout, interval,
127  *                                               timeout_handler);
128  * rc = l_wait_event(waitq, condition, &lwi);
129  * This is the same as previous case, but condition is checked once every
130  * 'interval' jiffies (if non-zero).
131  *
132  * Subtle synchronization point: this macro does *not* necessary takes
133  * wait-queue spin-lock before returning, and, hence, following idiom is safe
134  * ONLY when caller provides some external locking:
135  *
136  *             Thread1                            Thread2
137  *
138  *   l_wait_event(&obj->wq, ....);                                       (1)
139  *
140  *                                    wake_up(&obj->wq):                 (2)
141  *                                         spin_lock(&q->lock);          (2.1)
142  *                                         __wake_up_common(q, ...);     (2.2)
143  *                                         spin_unlock(&q->lock, flags); (2.3)
144  *
145  *   OBD_FREE_PTR(obj);                                                  (3)
146  *
147  * As l_wait_event() may "short-cut" execution and return without taking
148  * wait-queue spin-lock, some additional synchronization is necessary to
149  * guarantee that step (3) can begin only after (2.3) finishes.
150  *
151  * XXX nikita: some ptlrpc daemon threads have races of that sort.
152  *
153  */
154 static inline int back_to_sleep(void *arg)
155 {
156         return 0;
157 }
158
159 #define LWI_ON_SIGNAL_NOOP ((void (*)(void *))(-1))
160
161 struct l_wait_info {
162         cfs_duration_t lwi_timeout;
163         cfs_duration_t lwi_interval;
164         int            lwi_allow_intr;
165         int  (*lwi_on_timeout)(void *);
166         void (*lwi_on_signal)(void *);
167         void  *lwi_cb_data;
168 };
169
170 /* NB: LWI_TIMEOUT ignores signals completely */
171 #define LWI_TIMEOUT(time, cb, data)             \
172 ((struct l_wait_info) {                         \
173         .lwi_timeout    = time,                 \
174         .lwi_on_timeout = cb,                   \
175         .lwi_cb_data    = data,                 \
176         .lwi_interval   = 0,                    \
177         .lwi_allow_intr = 0                     \
178 })
179
180 #define LWI_TIMEOUT_INTERVAL(time, interval, cb, data)  \
181 ((struct l_wait_info) {                                 \
182         .lwi_timeout    = time,                         \
183         .lwi_on_timeout = cb,                           \
184         .lwi_cb_data    = data,                         \
185         .lwi_interval   = interval,                     \
186         .lwi_allow_intr = 0                             \
187 })
188
189 #define LWI_TIMEOUT_INTR(time, time_cb, sig_cb, data)   \
190 ((struct l_wait_info) {                                 \
191         .lwi_timeout    = time,                         \
192         .lwi_on_timeout = time_cb,                      \
193         .lwi_on_signal  = sig_cb,                       \
194         .lwi_cb_data    = data,                         \
195         .lwi_interval   = 0,                            \
196         .lwi_allow_intr = 0                             \
197 })
198
199 #define LWI_TIMEOUT_INTR_ALL(time, time_cb, sig_cb, data)       \
200 ((struct l_wait_info) {                                         \
201         .lwi_timeout    = time,                                 \
202         .lwi_on_timeout = time_cb,                              \
203         .lwi_on_signal  = sig_cb,                               \
204         .lwi_cb_data    = data,                                 \
205         .lwi_interval   = 0,                                    \
206         .lwi_allow_intr = 1                                     \
207 })
208
209 #define LWI_INTR(cb, data)  LWI_TIMEOUT_INTR(0, NULL, cb, data)
210
211 #define LUSTRE_FATAL_SIGS                                        \
212         (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGTERM) | \
213          sigmask(SIGQUIT) | sigmask(SIGALRM))
214
215 /*
216  * Wait Queue
217  */
218 #ifndef HAVE___ADD_WAIT_QUEUE_EXCLUSIVE
219 static inline void __add_wait_queue_exclusive(wait_queue_head_t *q,
220                                               wait_queue_t *wait)
221 {
222         wait->flags |= WQ_FLAG_EXCLUSIVE;
223         __add_wait_queue(q, wait);
224 }
225 #endif /* HAVE___ADD_WAIT_QUEUE_EXCLUSIVE */
226
227 /**
228  * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
229  * waiting threads, which is not always desirable because all threads will
230  * be waken up again and again, even user only needs a few of them to be
231  * active most time. This is not good for performance because cache can
232  * be polluted by different threads.
233  *
234  * LIFO list can resolve this problem because we always wakeup the most
235  * recent active thread by default.
236  *
237  * NB: please don't call non-exclusive & exclusive wait on the same
238  * waitq if add_wait_queue_exclusive_head is used.
239  */
240 #define add_wait_queue_exclusive_head(waitq, link)              \
241 {                                                               \
242         unsigned long flags;                                    \
243                                                                 \
244         spin_lock_irqsave(&((waitq)->lock), flags);             \
245         __add_wait_queue_exclusive(waitq, link);                \
246         spin_unlock_irqrestore(&((waitq)->lock), flags);        \
247 }
248
249 /*
250  * wait for @condition to become true, but no longer than timeout, specified
251  * by @info.
252  */
253 #define __l_wait_event(wq, condition, info, ret, l_add_wait)                   \
254 do {                                                                           \
255         wait_queue_t __wait;                                                   \
256         cfs_duration_t __timeout = info->lwi_timeout;                          \
257         sigset_t   __blocked;                                              \
258         int   __allow_intr = info->lwi_allow_intr;                             \
259                                                                                \
260         ret = 0;                                                               \
261         if (condition)                                                         \
262                 break;                                                         \
263                                                                                \
264         init_waitqueue_entry(&__wait, current);                                \
265         l_add_wait(&wq, &__wait);                                              \
266                                                                                \
267         /* Block all signals (just the non-fatal ones if no timeout). */       \
268         if (info->lwi_on_signal != NULL && (__timeout == 0 || __allow_intr))   \
269                 __blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);              \
270         else                                                                   \
271                 __blocked = cfs_block_sigsinv(0);                              \
272                                                                                \
273         for (;;) {                                                             \
274                 set_current_state(TASK_INTERRUPTIBLE);                         \
275                                                                                \
276                 if (condition)                                                 \
277                         break;                                                 \
278                                                                                \
279                 if (__timeout == 0) {                                          \
280                         schedule();                                            \
281                 } else {                                                       \
282                         cfs_duration_t interval = info->lwi_interval?          \
283                                              min_t(cfs_duration_t,             \
284                                                  info->lwi_interval,__timeout):\
285                                              __timeout;                        \
286                         cfs_duration_t remaining = schedule_timeout(interval); \
287                         __timeout = cfs_time_sub(__timeout,                    \
288                                             cfs_time_sub(interval, remaining));\
289                         if (__timeout == 0) {                                  \
290                                 if (info->lwi_on_timeout == NULL ||            \
291                                     info->lwi_on_timeout(info->lwi_cb_data)) { \
292                                         ret = -ETIMEDOUT;                      \
293                                         break;                                 \
294                                 }                                              \
295                                 /* Take signals after the timeout expires. */  \
296                                 if (info->lwi_on_signal != NULL)               \
297                                     (void)cfs_block_sigsinv(LUSTRE_FATAL_SIGS);\
298                         }                                                      \
299                 }                                                              \
300                                                                                \
301                 if (condition)                                                 \
302                         break;                                                 \
303                 if (signal_pending(current)) {                                 \
304                         if (info->lwi_on_signal != NULL &&                     \
305                             (__timeout == 0 || __allow_intr)) {                \
306                                 if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
307                                         info->lwi_on_signal(info->lwi_cb_data);\
308                                 ret = -EINTR;                                  \
309                                 break;                                         \
310                         }                                                      \
311                         /* We have to do this here because some signals */     \
312                         /* are not blockable - ie from strace(1).       */     \
313                         /* In these cases we want to schedule_timeout() */     \
314                         /* again, because we don't want that to return  */     \
315                         /* -EINTR when the RPC actually succeeded.      */     \
316                         /* the recalc_sigpending() below will deliver the */   \
317                         /* signal properly.                             */     \
318                         cfs_clear_sigpending();                                \
319                 }                                                              \
320         }                                                                      \
321                                                                                \
322         cfs_restore_sigs(__blocked);                                           \
323                                                                                \
324         set_current_state(TASK_RUNNING);                                       \
325         remove_wait_queue(&wq, &__wait);                                       \
326 } while (0)
327
328
329 #define l_wait_event(wq, condition, info)                       \
330 ({                                                              \
331         int                 __ret;                              \
332         struct l_wait_info *__info = (info);                    \
333                                                                 \
334         __l_wait_event(wq, condition, __info,                   \
335                        __ret, add_wait_queue);                  \
336         __ret;                                                  \
337 })
338
339 #define l_wait_event_exclusive(wq, condition, info)             \
340 ({                                                              \
341         int                 __ret;                              \
342         struct l_wait_info *__info = (info);                    \
343                                                                 \
344         __l_wait_event(wq, condition, __info,                   \
345                        __ret, add_wait_queue_exclusive);        \
346         __ret;                                                  \
347 })
348
349 #define l_wait_event_exclusive_head(wq, condition, info)        \
350 ({                                                              \
351         int                 __ret;                              \
352         struct l_wait_info *__info = (info);                    \
353                                                                 \
354         __l_wait_event(wq, condition, __info,                   \
355                        __ret, add_wait_queue_exclusive_head);   \
356         __ret;                                                  \
357 })
358
359 #define l_wait_condition(wq, condition)                         \
360 ({                                                              \
361         struct l_wait_info lwi = { 0 };                         \
362         l_wait_event(wq, condition, &lwi);                      \
363 })
364
365 #define l_wait_condition_exclusive(wq, condition)               \
366 ({                                                              \
367         struct l_wait_info lwi = { 0 };                         \
368         l_wait_event_exclusive(wq, condition, &lwi);            \
369 })
370
371 #define l_wait_condition_exclusive_head(wq, condition)          \
372 ({                                                              \
373         struct l_wait_info lwi = { 0 };                         \
374         l_wait_event_exclusive_head(wq, condition, &lwi);       \
375 })
376
377 /** @} lib */
378
379 #endif /* _LUSTRE_LIB_H */