Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lnet / selftest / selftest.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5  *   Author: Isaac Huang <isaac@clusterfs.com>
6  *
7  */
8 #ifndef __SELFTEST_SELFTEST_H__
9 #define __SELFTEST_SELFTEST_H__
10
11 #define LNET_ONLY
12
13 #ifndef __KERNEL__
14
15 /* XXX workaround XXX */
16 #ifdef HAVE_SYS_TYPES_H
17 #include <sys/types.h>
18 #endif
19
20 /* TODO: remove these when libcfs provides proper primitives for userspace
21  *
22  * Dummy implementations of spinlock_t and atomic_t work since userspace
23  * selftest is completely single-threaded, even using multi-threaded usocklnd.
24  */
25 typedef struct { } spinlock_t;
26 static inline void spin_lock(spinlock_t *l) {return;}
27 static inline void spin_unlock(spinlock_t *l) {return;}
28 static inline void spin_lock_init(spinlock_t *l) {return;}
29
30 typedef struct { volatile int counter; } atomic_t;
31 #define atomic_read(a) ((a)->counter)
32 #define atomic_set(a,b) do {(a)->counter = b; } while (0)
33 #define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
34 #define atomic_inc(a)  (((a)->counter)++)
35 #define atomic_dec(a)  do { (a)->counter--; } while (0)
36
37 #endif
38 #include <libcfs/libcfs.h>
39 #include <lnet/lnet.h>
40 #include <lnet/lib-lnet.h>
41 #include <lnet/lib-types.h>
42 #include <lnet/lnetst.h>
43
44 #include "rpc.h"
45 #include "timer.h"
46
47 #ifndef MADE_WITHOUT_COMPROMISE
48 #define MADE_WITHOUT_COMPROMISE
49 #endif
50
51
52 #define SWI_STATE_NEWBORN                  0
53 #define SWI_STATE_REPLY_SUBMITTED          1
54 #define SWI_STATE_REPLY_SENT               2
55 #define SWI_STATE_REQUEST_SUBMITTED        3
56 #define SWI_STATE_REQUEST_SENT             4
57 #define SWI_STATE_REPLY_RECEIVED           5
58 #define SWI_STATE_BULK_STARTED             6
59 #define SWI_STATE_DONE                     10
60
61 /* forward refs */
62 struct swi_workitem;
63 struct srpc_service;
64 struct sfw_test_unit;
65 struct sfw_test_instance;
66
67 /*
68  * A workitems is deferred work with these semantics:
69  * - a workitem always runs in thread context.
70  * - a workitem can be concurrent with other workitems but is strictly
71  *   serialized with respect to itself.
72  * - no CPU affinity, a workitem does not necessarily run on the same CPU
73  *   that schedules it. However, this might change in the future.
74  * - if a workitem is scheduled again before it has a chance to run, it 
75  *   runs only once.
76  * - if a workitem is scheduled while it runs, it runs again after it 
77  *   completes; this ensures that events occurring while other events are 
78  *   being processed receive due attention. This behavior also allows a 
79  *   workitem to reschedule itself.
80  *
81  * Usage notes:
82  * - a workitem can sleep but it should be aware of how that sleep might
83  *   affect others.
84  * - a workitem runs inside a kernel thread so there's no user space to access.
85  * - do not use a workitem if the scheduling latency can't be tolerated.
86  *
87  * When wi_action returns non-zero, it means the workitem has either been
88  * freed or reused and workitem scheduler won't touch it any more.
89  */
90 typedef int (*swi_action_t) (struct swi_workitem *);
91 typedef struct swi_workitem {
92         struct list_head wi_list;        /* chain on runq */
93         int              wi_state;
94         swi_action_t     wi_action;
95         void            *wi_data;
96         unsigned int     wi_running:1;
97         unsigned int     wi_scheduled:1;
98 } swi_workitem_t;
99
100 static inline void
101 swi_init_workitem (swi_workitem_t *wi, void *data, swi_action_t action)
102 {
103         CFS_INIT_LIST_HEAD(&wi->wi_list);
104
105         wi->wi_running   = 0;
106         wi->wi_scheduled = 0;
107         wi->wi_data      = data;
108         wi->wi_action    = action;
109         wi->wi_state     = SWI_STATE_NEWBORN;
110 }
111
112 #define SWI_RESCHED    128         /* # workitem scheduler loops before reschedule */
113
114 /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
115  * services, e.g. create/modify session.
116  */
117 #define SRPC_SERVICE_DEBUG              0
118 #define SRPC_SERVICE_MAKE_SESSION       1
119 #define SRPC_SERVICE_REMOVE_SESSION     2
120 #define SRPC_SERVICE_BATCH              3
121 #define SRPC_SERVICE_TEST               4
122 #define SRPC_SERVICE_QUERY_STAT         5
123 #define SRPC_SERVICE_JOIN               6
124 #define SRPC_FRAMEWORK_SERVICE_MAX_ID   10
125 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
126 #define SRPC_SERVICE_BRW                11
127 #define SRPC_SERVICE_PING               12
128 #define SRPC_SERVICE_MAX_ID             12
129
130 #define SRPC_REQUEST_PORTAL             50
131 /* a lazy portal for framework RPC requests */
132 #define SRPC_FRAMEWORK_REQUEST_PORTAL   51
133 /* all reply/bulk RDMAs go to this portal */
134 #define SRPC_RDMA_PORTAL                52
135
136 static inline srpc_msg_type_t
137 srpc_service2request (int service)
138 {
139         switch (service) {
140         default:
141                 LBUG ();
142         case SRPC_SERVICE_DEBUG:
143                 return SRPC_MSG_DEBUG_REQST;
144
145         case SRPC_SERVICE_MAKE_SESSION:
146                 return SRPC_MSG_MKSN_REQST;
147
148         case SRPC_SERVICE_REMOVE_SESSION:
149                 return SRPC_MSG_RMSN_REQST;
150
151         case SRPC_SERVICE_BATCH:
152                 return SRPC_MSG_BATCH_REQST;
153
154         case SRPC_SERVICE_TEST:
155                 return SRPC_MSG_TEST_REQST;
156
157         case SRPC_SERVICE_QUERY_STAT:
158                 return SRPC_MSG_STAT_REQST;
159
160         case SRPC_SERVICE_BRW:
161                 return SRPC_MSG_BRW_REQST;
162
163         case SRPC_SERVICE_PING:
164                 return SRPC_MSG_PING_REQST;
165
166         case SRPC_SERVICE_JOIN:
167                 return SRPC_MSG_JOIN_REQST;
168         }
169 }
170
171 static inline srpc_msg_type_t
172 srpc_service2reply (int service)
173 {
174         return srpc_service2request(service) + 1;
175 }
176
177 typedef enum {
178         SRPC_BULK_REQ_RCVD   = 0, /* passive bulk request(PUT sink/GET source) received */
179         SRPC_BULK_PUT_SENT   = 1, /* active bulk PUT sent (source) */
180         SRPC_BULK_GET_RPLD   = 2, /* active bulk GET replied (sink) */
181         SRPC_REPLY_RCVD      = 3, /* incoming reply received */
182         SRPC_REPLY_SENT      = 4, /* outgoing reply sent */
183         SRPC_REQUEST_RCVD    = 5, /* incoming request received */
184         SRPC_REQUEST_SENT    = 6, /* outgoing request sent */
185 } srpc_event_type_t;
186
187 /* RPC event */
188 typedef struct {
189         srpc_event_type_t ev_type;   /* what's up */
190         lnet_event_kind_t ev_lnet;   /* LNet event type */
191         int               ev_fired;  /* LNet event fired? */
192         int               ev_status; /* LNet event status */
193         void             *ev_data;   /* owning server/client RPC */
194 } srpc_event_t;
195
196 typedef struct {
197         int              bk_len;  /* len of bulk data */
198         lnet_handle_md_t bk_mdh;
199         int              bk_sink; /* sink/source */
200         int              bk_niov; /* # iov in bk_iovs */
201 #ifdef __KERNEL__
202         lnet_kiov_t      bk_iovs[0];
203 #else
204         cfs_page_t     **bk_pages;
205         lnet_md_iovec_t  bk_iovs[0];
206 #endif
207 } srpc_bulk_t; /* bulk descriptor */
208
209 typedef struct srpc_peer {
210         struct list_head stp_list;     /* chain on peer hash */
211         struct list_head stp_rpcq;     /* q of non-control RPCs */
212         struct list_head stp_ctl_rpcq; /* q of control RPCs */
213         spinlock_t       stp_lock;     /* serialize */
214         lnet_nid_t       stp_nid;
215         int              stp_credits;  /* available credits */
216 } srpc_peer_t;
217
218 /* message buffer descriptor */
219 typedef struct {
220         struct list_head     buf_list; /* chain on srpc_service::*_msgq */
221         srpc_msg_t           buf_msg;
222         lnet_handle_md_t     buf_mdh;
223         lnet_nid_t           buf_self;
224         lnet_process_id_t    buf_peer;
225 } srpc_buffer_t;
226
227 /* server-side state of a RPC */
228 typedef struct srpc_server_rpc {
229         struct list_head     srpc_list;    /* chain on srpc_service::*_rpcq */
230         struct srpc_service *srpc_service;
231         swi_workitem_t       srpc_wi;
232         srpc_event_t         srpc_ev;      /* bulk/reply event */
233         lnet_nid_t           srpc_self;
234         lnet_process_id_t    srpc_peer;
235         srpc_msg_t           srpc_replymsg;
236         lnet_handle_md_t     srpc_replymdh;
237         srpc_buffer_t       *srpc_reqstbuf;
238         srpc_bulk_t         *srpc_bulk;
239
240         int                  srpc_status;
241         void               (*srpc_done)(struct srpc_server_rpc *);
242 } srpc_server_rpc_t;
243
244 /* client-side state of a RPC */
245 typedef struct srpc_client_rpc {
246         struct list_head     crpc_list;   /* chain on user's lists */
247         struct list_head     crpc_privl;  /* chain on srpc_peer_t::*rpcq */
248         spinlock_t           crpc_lock;   /* serialize */
249         int                  crpc_service;
250         atomic_t             crpc_refcount;
251         int                  crpc_timeout; /* # seconds to wait for reply */
252         stt_timer_t          crpc_timer;
253         swi_workitem_t       crpc_wi;
254         lnet_process_id_t    crpc_dest;
255         srpc_peer_t         *crpc_peer;
256
257         void               (*crpc_done)(struct srpc_client_rpc *);
258         void               (*crpc_fini)(struct srpc_client_rpc *);
259         int                  crpc_status;    /* completion status */
260         void                *crpc_priv;      /* caller data */
261
262         /* state flags */
263         unsigned int         crpc_aborted:1; /* being given up */
264         unsigned int         crpc_closed:1;  /* completed */
265
266         /* RPC events */
267         srpc_event_t         crpc_bulkev;    /* bulk event */
268         srpc_event_t         crpc_reqstev;   /* request event */
269         srpc_event_t         crpc_replyev;   /* reply event */
270
271         /* bulk, request(reqst), and reply exchanged on wire */
272         srpc_msg_t           crpc_reqstmsg;
273         srpc_msg_t           crpc_replymsg;
274         lnet_handle_md_t     crpc_reqstmdh;
275         lnet_handle_md_t     crpc_replymdh;
276         srpc_bulk_t          crpc_bulk;
277 } srpc_client_rpc_t;
278
279 #define srpc_client_rpc_size(rpc)                                       \
280 offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
281
282 #define srpc_client_rpc_addref(rpc)                                     \
283 do {                                                                    \
284         CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n",                         \
285                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
286                atomic_read(&(rpc)->crpc_refcount));                     \
287         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
288         atomic_inc(&(rpc)->crpc_refcount);                              \
289 } while (0)
290
291 #define srpc_client_rpc_decref(rpc)                                     \
292 do {                                                                    \
293         CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n",                         \
294                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
295                atomic_read(&(rpc)->crpc_refcount));                     \
296         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
297         if (atomic_dec_and_test(&(rpc)->crpc_refcount))                 \
298                 srpc_destroy_client_rpc(rpc);                           \
299 } while (0)
300
301 #define srpc_event_pending(rpc)   ((rpc)->crpc_bulkev.ev_fired == 0 ||  \
302                                    (rpc)->crpc_reqstev.ev_fired == 0 || \
303                                    (rpc)->crpc_replyev.ev_fired == 0)
304
305 typedef struct srpc_service {
306         int                sv_id;            /* service id */
307         const char        *sv_name;          /* human readable name */
308         int                sv_nprune;        /* # posted RPC to be pruned */
309         int                sv_concur;        /* max # concurrent RPCs */
310
311         spinlock_t         sv_lock;
312         int                sv_shuttingdown;
313         srpc_event_t       sv_ev;            /* LNet event */
314         int                sv_nposted_msg;   /* # posted message buffers */
315         struct list_head   sv_free_rpcq;     /* free RPC descriptors */
316         struct list_head   sv_active_rpcq;   /* in-flight RPCs */
317         struct list_head   sv_posted_msgq;   /* posted message buffers */
318         struct list_head   sv_blocked_msgq;  /* blocked for RPC descriptor */
319
320         /* Service callbacks:
321          * - sv_handler: process incoming RPC request
322          * - sv_bulk_ready: notify bulk data
323          */
324         int                (*sv_handler) (srpc_server_rpc_t *);
325         int                (*sv_bulk_ready) (srpc_server_rpc_t *, int);
326 } srpc_service_t;
327
328 #define SFW_POST_BUFFERS         8
329 #define SFW_SERVICE_CONCURRENCY  (SFW_POST_BUFFERS/2)
330
331 typedef struct {
332         struct list_head  sn_list;    /* chain on fw_zombie_sessions */
333         lst_sid_t         sn_id;      /* unique identifier */
334         unsigned int      sn_timeout; /* # seconds' inactivity to expire */
335         int               sn_timer_active;
336         stt_timer_t       sn_timer;
337         struct list_head  sn_batches; /* list of batches */
338         char              sn_name[LST_NAME_SIZE];
339         atomic_t          sn_brw_errors;
340         atomic_t          sn_ping_errors;
341 } sfw_session_t;
342
343 #define sfw_sid_equal(sid0, sid1)     ((sid0).ses_nid == (sid1).ses_nid && \
344                                        (sid0).ses_stamp == (sid1).ses_stamp)
345
346 typedef struct {
347         struct list_head  bat_list;      /* chain on sn_batches */
348         lst_bid_t         bat_id;        /* batch id */
349         int               bat_error;     /* error code of batch */
350         sfw_session_t    *bat_session;   /* batch's session */
351         atomic_t          bat_nactive;   /* # of active tests */
352         struct list_head  bat_tests;     /* test instances */
353 } sfw_batch_t;
354
355 typedef struct {
356         int  (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
357         void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
358         int  (*tso_prep_rpc)(struct sfw_test_unit *tsu,     
359                              lnet_process_id_t dest,
360                              srpc_client_rpc_t **rpc);   /* prep a tests rpc */
361         void (*tso_done_rpc)(struct sfw_test_unit *tsu,
362                              srpc_client_rpc_t *rpc);    /* done a test rpc */
363 } sfw_test_client_ops_t;
364
365 typedef struct sfw_test_instance {
366         struct list_head        tsi_list;         /* chain on batch */
367         int                     tsi_service;      /* test type */
368         sfw_batch_t            *tsi_batch;        /* batch */
369         sfw_test_client_ops_t  *tsi_ops;          /* test client operations */
370
371         /* public parameter for all test units */
372         int                     tsi_is_client:1;     /* is test client */
373         int                     tsi_stoptsu_onerr:1; /* stop tsu on error */
374         int                     tsi_concur;          /* concurrency */
375         int                     tsi_loop;            /* loop count */
376
377         /* status of test instance */
378         spinlock_t              tsi_lock;         /* serialize */
379         int                     tsi_stopping:1;   /* test is stopping */
380         atomic_t                tsi_nactive;      /* # of active test unit */
381         struct list_head        tsi_units;        /* test units */
382         struct list_head        tsi_free_rpcs;    /* free rpcs */
383         struct list_head        tsi_active_rpcs;  /* active rpcs */
384
385         union {
386                 test_bulk_req_t bulk;             /* bulk parameter */
387                 test_ping_req_t ping;             /* ping parameter */
388         } tsi_u;
389 } sfw_test_instance_t;
390
391 /* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at 
392  * the end of pages are not used */
393 #define SFW_MAX_CONCUR     LST_MAX_CONCUR
394 #define SFW_ID_PER_PAGE    (CFS_PAGE_SIZE / sizeof(lnet_process_id_t))
395 #define SFW_MAX_NDESTS     (LNET_MAX_IOV * SFW_ID_PER_PAGE)
396 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
397
398 typedef struct sfw_test_unit {
399         struct list_head        tsu_list;         /* chain on lst_test_instance */
400         lnet_process_id_t       tsu_dest;         /* id of dest node */
401         int                     tsu_loop;         /* loop count of the test */
402         sfw_test_instance_t    *tsu_instance;     /* pointer to test instance */
403         void                   *tsu_private;      /* private data */
404         swi_workitem_t          tsu_worker;       /* workitem of the test unit */
405 } sfw_test_unit_t;
406
407 typedef struct {
408         struct list_head        tsc_list;         /* chain on fw_tests */
409         srpc_service_t         *tsc_srv_service;  /* test service */
410         sfw_test_client_ops_t  *tsc_cli_ops;      /* ops of test client */
411 } sfw_test_case_t;
412
413
414 srpc_client_rpc_t *
415 sfw_create_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen,
416                void (*done) (srpc_client_rpc_t *), void *priv);
417 int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
418                         int nblk, int blklen, srpc_client_rpc_t **rpc);
419 void sfw_abort_rpc(srpc_client_rpc_t *rpc);
420 void sfw_post_rpc(srpc_client_rpc_t *rpc);
421 void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
422 void sfw_unpack_message(srpc_msg_t *msg);
423 void sfw_free_pages(srpc_server_rpc_t *rpc);
424 void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
425 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink);
426
427 srpc_client_rpc_t *
428 srpc_create_client_rpc(lnet_process_id_t peer, int service, 
429                        int nbulkiov, int bulklen,
430                        void (*rpc_done)(srpc_client_rpc_t *),
431                        void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
432 void srpc_post_rpc(srpc_client_rpc_t *rpc);
433 void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
434 void srpc_free_bulk(srpc_bulk_t *bk);
435 srpc_bulk_t *srpc_alloc_bulk(int npages, int sink);
436 int srpc_send_rpc(swi_workitem_t *wi);
437 int srpc_send_reply(srpc_server_rpc_t *rpc);
438 int srpc_add_service(srpc_service_t *sv);
439 int srpc_remove_service(srpc_service_t *sv);
440 void srpc_shutdown_service(srpc_service_t *sv);
441 int srpc_finish_service(srpc_service_t *sv);
442 int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
443 void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
444 void srpc_get_counters(srpc_counters_t *cnt);
445 void srpc_set_counters(const srpc_counters_t *cnt);
446
447 void swi_kill_workitem(swi_workitem_t *wi);
448 void swi_schedule_workitem(swi_workitem_t *wi);
449 void swi_schedule_serial_workitem(swi_workitem_t *wi);
450 int swi_startup(void);
451 int sfw_startup(void);
452 int srpc_startup(void);
453 void swi_shutdown(void);
454 void sfw_shutdown(void);
455 void srpc_shutdown(void);
456
457 static inline void
458 srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
459 {
460         LASSERT (rpc != NULL);
461         LASSERT (!srpc_event_pending(rpc));
462         LASSERT (list_empty(&rpc->crpc_privl));
463         LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
464 #ifndef __KERNEL__
465         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
466 #endif
467
468         if (rpc->crpc_fini == NULL) {
469                 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
470         } else {
471                 (*rpc->crpc_fini) (rpc);
472         }
473
474         return;
475 }
476
477 static inline void
478 srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
479                       int service, int nbulkiov, int bulklen,
480                       void (*rpc_done)(srpc_client_rpc_t *),
481                       void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
482 {
483         LASSERT (nbulkiov <= LNET_MAX_IOV);
484
485         memset(rpc, 0, offsetof(srpc_client_rpc_t,
486                                 crpc_bulk.bk_iovs[nbulkiov]));
487
488         CFS_INIT_LIST_HEAD(&rpc->crpc_list);
489         CFS_INIT_LIST_HEAD(&rpc->crpc_privl);
490         swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
491         spin_lock_init(&rpc->crpc_lock);
492         atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
493
494         rpc->crpc_dest         = peer;
495         rpc->crpc_priv         = priv;
496         rpc->crpc_service      = service;
497         rpc->crpc_bulk.bk_len  = bulklen;
498         rpc->crpc_bulk.bk_niov = nbulkiov;
499         rpc->crpc_done         = rpc_done;
500         rpc->crpc_fini         = rpc_fini;
501         rpc->crpc_reqstmdh     =
502         rpc->crpc_replymdh     =
503         rpc->crpc_bulk.bk_mdh  = LNET_INVALID_HANDLE;
504
505         /* no event is expected at this point */
506         rpc->crpc_bulkev.ev_fired  =
507         rpc->crpc_reqstev.ev_fired =
508         rpc->crpc_replyev.ev_fired = 1;
509
510         rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
511         rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
512         rpc->crpc_reqstmsg.msg_type    = srpc_service2request(service);
513         return;
514 }
515
516 static inline const char * 
517 swi_state2str (int state)
518 {
519 #define STATE2STR(x) case x: return #x
520         switch(state) {
521                 default: 
522                         LBUG();
523                 STATE2STR(SWI_STATE_NEWBORN);
524                 STATE2STR(SWI_STATE_REPLY_SUBMITTED);
525                 STATE2STR(SWI_STATE_REPLY_SENT);
526                 STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
527                 STATE2STR(SWI_STATE_REQUEST_SENT);
528                 STATE2STR(SWI_STATE_REPLY_RECEIVED);
529                 STATE2STR(SWI_STATE_BULK_STARTED);
530                 STATE2STR(SWI_STATE_DONE);
531         }
532 #undef STATE2STR
533 }
534
535 #define UNUSED(x)       ( (void)(x) )
536
537 #ifndef __KERNEL__
538
539 int stt_poll_interval(void);
540 int sfw_session_removed(void);
541
542 int stt_check_events(void);
543 int swi_check_events(void);
544 int srpc_check_event(int timeout);
545
546 int lnet_selftest_init(void);
547 void lnet_selftest_fini(void);
548 int selftest_wait_events(void);
549
550 #else
551
552 #define selftest_wait_events()    cfs_pause(cfs_time_seconds(1))
553
554 #endif
555
556 #define lst_wait_until(cond, lock, fmt, a...)                           \
557 do {                                                                    \
558         int __I = 2;                                                    \
559         while (!(cond)) {                                               \
560                 __I++;                                                  \
561                 CDEBUG(((__I & (-__I)) == __I) ? D_WARNING :            \
562                                                  D_NET,     /* 2**n? */ \
563                        fmt, ## a);                                      \
564                 spin_unlock(&(lock));                                   \
565                                                                         \
566                 selftest_wait_events();                                 \
567                                                                         \
568                 spin_lock(&(lock));                                     \
569         }                                                               \
570 } while (0)
571
572 static inline void
573 srpc_wait_service_shutdown (srpc_service_t *sv)
574 {
575         int i = 2;
576
577         spin_lock(&sv->sv_lock);
578         LASSERT (sv->sv_shuttingdown);
579         spin_unlock(&sv->sv_lock);
580
581         while (srpc_finish_service(sv) == 0) {
582                 i++;
583                 CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
584                         "Waiting for %s service to shutdown...\n",
585                         sv->sv_name);
586                 selftest_wait_events();
587         }
588 }
589
590 #endif /* __SELFTEST_SELFTEST_H__ */