Whamcloud - gitweb
Update e2fsprogs version in changelog
[fs/lustre-release.git] / lnet / selftest / selftest.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/selftest/selftest.h
37  *
38  * Author: Isaac Huang <isaac@clusterfs.com>
39  */
40 #ifndef __SELFTEST_SELFTEST_H__
41 #define __SELFTEST_SELFTEST_H__
42
43 #define LNET_ONLY
44
45 #ifndef __KERNEL__
46
47 /* XXX workaround XXX */
48 #ifdef HAVE_SYS_TYPES_H
49 #include <sys/types.h>
50 #endif
51
52 /* TODO: remove these when libcfs provides proper primitives for userspace
53  *
54  * Dummy implementations of spinlock_t and atomic_t work since userspace
55  * selftest is completely single-threaded, even using multi-threaded usocklnd.
56  */
57 typedef struct { } spinlock_t;
58 static inline void spin_lock(spinlock_t *l) {return;}
59 static inline void spin_unlock(spinlock_t *l) {return;}
60 static inline void spin_lock_init(spinlock_t *l) {return;}
61
62 typedef struct { volatile int counter; } atomic_t;
63 #define atomic_read(a) ((a)->counter)
64 #define atomic_set(a,b) do {(a)->counter = b; } while (0)
65 #define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
66 #define atomic_inc(a)  (((a)->counter)++)
67 #define atomic_dec(a)  do { (a)->counter--; } while (0)
68
69 #endif
70
71 #include <libcfs/kp30.h>
72 #include <libcfs/libcfs.h>
73 #include <lnet/lnet.h>
74 #include <lnet/lib-lnet.h>
75 #include <lnet/lib-types.h>
76 #include <lnet/lnetst.h>
77
78 #include "rpc.h"
79 #include "timer.h"
80
81 #ifndef MADE_WITHOUT_COMPROMISE
82 #define MADE_WITHOUT_COMPROMISE
83 #endif
84
85
86 #define SWI_STATE_NEWBORN                  0
87 #define SWI_STATE_REPLY_SUBMITTED          1
88 #define SWI_STATE_REPLY_SENT               2
89 #define SWI_STATE_REQUEST_SUBMITTED        3
90 #define SWI_STATE_REQUEST_SENT             4
91 #define SWI_STATE_REPLY_RECEIVED           5
92 #define SWI_STATE_BULK_STARTED             6
93 #define SWI_STATE_DONE                     10
94
95 /* forward refs */
96 struct swi_workitem;
97 struct srpc_service;
98 struct sfw_test_unit;
99 struct sfw_test_instance;
100
101 /*
102  * A workitems is deferred work with these semantics:
103  * - a workitem always runs in thread context.
104  * - a workitem can be concurrent with other workitems but is strictly
105  *   serialized with respect to itself.
106  * - no CPU affinity, a workitem does not necessarily run on the same CPU
107  *   that schedules it. However, this might change in the future.
108  * - if a workitem is scheduled again before it has a chance to run, it
109  *   runs only once.
110  * - if a workitem is scheduled while it runs, it runs again after it
111  *   completes; this ensures that events occurring while other events are
112  *   being processed receive due attention. This behavior also allows a
113  *   workitem to reschedule itself.
114  *
115  * Usage notes:
116  * - a workitem can sleep but it should be aware of how that sleep might
117  *   affect others.
118  * - a workitem runs inside a kernel thread so there's no user space to access.
119  * - do not use a workitem if the scheduling latency can't be tolerated.
120  *
121  * When wi_action returns non-zero, it means the workitem has either been
122  * freed or reused and workitem scheduler won't touch it any more.
123  */
124 typedef int (*swi_action_t) (struct swi_workitem *);
125 typedef struct swi_workitem {
126         struct list_head wi_list;        /* chain on runq */
127         int              wi_state;
128         swi_action_t     wi_action;
129         void            *wi_data;
130         unsigned int     wi_running:1;
131         unsigned int     wi_scheduled:1;
132 } swi_workitem_t;
133
134 static inline void
135 swi_init_workitem (swi_workitem_t *wi, void *data, swi_action_t action)
136 {
137         CFS_INIT_LIST_HEAD(&wi->wi_list);
138
139         wi->wi_running   = 0;
140         wi->wi_scheduled = 0;
141         wi->wi_data      = data;
142         wi->wi_action    = action;
143         wi->wi_state     = SWI_STATE_NEWBORN;
144 }
145
146 #define SWI_RESCHED    128         /* # workitem scheduler loops before reschedule */
147
148 /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
149  * services, e.g. create/modify session.
150  */
151 #define SRPC_SERVICE_DEBUG              0
152 #define SRPC_SERVICE_MAKE_SESSION       1
153 #define SRPC_SERVICE_REMOVE_SESSION     2
154 #define SRPC_SERVICE_BATCH              3
155 #define SRPC_SERVICE_TEST               4
156 #define SRPC_SERVICE_QUERY_STAT         5
157 #define SRPC_SERVICE_JOIN               6
158 #define SRPC_FRAMEWORK_SERVICE_MAX_ID   10
159 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
160 #define SRPC_SERVICE_BRW                11
161 #define SRPC_SERVICE_PING               12
162 #define SRPC_SERVICE_MAX_ID             12
163
164 #define SRPC_REQUEST_PORTAL             50
165 /* a lazy portal for framework RPC requests */
166 #define SRPC_FRAMEWORK_REQUEST_PORTAL   51
167 /* all reply/bulk RDMAs go to this portal */
168 #define SRPC_RDMA_PORTAL                52
169
170 static inline srpc_msg_type_t
171 srpc_service2request (int service)
172 {
173         switch (service) {
174         default:
175                 LBUG ();
176         case SRPC_SERVICE_DEBUG:
177                 return SRPC_MSG_DEBUG_REQST;
178
179         case SRPC_SERVICE_MAKE_SESSION:
180                 return SRPC_MSG_MKSN_REQST;
181
182         case SRPC_SERVICE_REMOVE_SESSION:
183                 return SRPC_MSG_RMSN_REQST;
184
185         case SRPC_SERVICE_BATCH:
186                 return SRPC_MSG_BATCH_REQST;
187
188         case SRPC_SERVICE_TEST:
189                 return SRPC_MSG_TEST_REQST;
190
191         case SRPC_SERVICE_QUERY_STAT:
192                 return SRPC_MSG_STAT_REQST;
193
194         case SRPC_SERVICE_BRW:
195                 return SRPC_MSG_BRW_REQST;
196
197         case SRPC_SERVICE_PING:
198                 return SRPC_MSG_PING_REQST;
199
200         case SRPC_SERVICE_JOIN:
201                 return SRPC_MSG_JOIN_REQST;
202         }
203 }
204
205 static inline srpc_msg_type_t
206 srpc_service2reply (int service)
207 {
208         return srpc_service2request(service) + 1;
209 }
210
211 typedef enum {
212         SRPC_BULK_REQ_RCVD   = 1, /* passive bulk request(PUT sink/GET source) received */
213         SRPC_BULK_PUT_SENT   = 2, /* active bulk PUT sent (source) */
214         SRPC_BULK_GET_RPLD   = 3, /* active bulk GET replied (sink) */
215         SRPC_REPLY_RCVD      = 4, /* incoming reply received */
216         SRPC_REPLY_SENT      = 5, /* outgoing reply sent */
217         SRPC_REQUEST_RCVD    = 6, /* incoming request received */
218         SRPC_REQUEST_SENT    = 7, /* outgoing request sent */
219 } srpc_event_type_t;
220
221 /* RPC event */
222 typedef struct {
223         srpc_event_type_t ev_type;   /* what's up */
224         lnet_event_kind_t ev_lnet;   /* LNet event type */
225         int               ev_fired;  /* LNet event fired? */
226         int               ev_status; /* LNet event status */
227         void             *ev_data;   /* owning server/client RPC */
228 } srpc_event_t;
229
230 typedef struct {
231         int              bk_len;  /* len of bulk data */
232         lnet_handle_md_t bk_mdh;
233         int              bk_sink; /* sink/source */
234         int              bk_niov; /* # iov in bk_iovs */
235 #ifdef __KERNEL__
236         lnet_kiov_t      bk_iovs[0];
237 #else
238         cfs_page_t     **bk_pages;
239         lnet_md_iovec_t  bk_iovs[0];
240 #endif
241 } srpc_bulk_t; /* bulk descriptor */
242
243 /* message buffer descriptor */
244 typedef struct {
245         struct list_head     buf_list; /* chain on srpc_service::*_msgq */
246         srpc_msg_t           buf_msg;
247         lnet_handle_md_t     buf_mdh;
248         lnet_nid_t           buf_self;
249         lnet_process_id_t    buf_peer;
250 } srpc_buffer_t;
251
252 /* server-side state of a RPC */
253 typedef struct srpc_server_rpc {
254         struct list_head     srpc_list;    /* chain on srpc_service::*_rpcq */
255         struct srpc_service *srpc_service;
256         swi_workitem_t       srpc_wi;
257         srpc_event_t         srpc_ev;      /* bulk/reply event */
258         lnet_nid_t           srpc_self;
259         lnet_process_id_t    srpc_peer;
260         srpc_msg_t           srpc_replymsg;
261         lnet_handle_md_t     srpc_replymdh;
262         srpc_buffer_t       *srpc_reqstbuf;
263         srpc_bulk_t         *srpc_bulk;
264
265         unsigned int         srpc_aborted; /* being given up */
266         int                  srpc_status;
267         void               (*srpc_done)(struct srpc_server_rpc *);
268 } srpc_server_rpc_t;
269
270 /* client-side state of a RPC */
271 typedef struct srpc_client_rpc {
272         struct list_head     crpc_list;   /* chain on user's lists */
273         spinlock_t           crpc_lock;   /* serialize */
274         int                  crpc_service;
275         atomic_t             crpc_refcount;
276         int                  crpc_timeout; /* # seconds to wait for reply */
277         stt_timer_t          crpc_timer;
278         swi_workitem_t       crpc_wi;
279         lnet_process_id_t    crpc_dest;
280
281         void               (*crpc_done)(struct srpc_client_rpc *);
282         void               (*crpc_fini)(struct srpc_client_rpc *);
283         int                  crpc_status;    /* completion status */
284         void                *crpc_priv;      /* caller data */
285
286         /* state flags */
287         unsigned int         crpc_aborted:1; /* being given up */
288         unsigned int         crpc_closed:1;  /* completed */
289
290         /* RPC events */
291         srpc_event_t         crpc_bulkev;    /* bulk event */
292         srpc_event_t         crpc_reqstev;   /* request event */
293         srpc_event_t         crpc_replyev;   /* reply event */
294
295         /* bulk, request(reqst), and reply exchanged on wire */
296         srpc_msg_t           crpc_reqstmsg;
297         srpc_msg_t           crpc_replymsg;
298         lnet_handle_md_t     crpc_reqstmdh;
299         lnet_handle_md_t     crpc_replymdh;
300         srpc_bulk_t          crpc_bulk;
301 } srpc_client_rpc_t;
302
303 #define srpc_client_rpc_size(rpc)                                       \
304 offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
305
306 #define srpc_client_rpc_addref(rpc)                                     \
307 do {                                                                    \
308         CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n",                         \
309                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
310                atomic_read(&(rpc)->crpc_refcount));                     \
311         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
312         atomic_inc(&(rpc)->crpc_refcount);                              \
313 } while (0)
314
315 #define srpc_client_rpc_decref(rpc)                                     \
316 do {                                                                    \
317         CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n",                         \
318                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
319                atomic_read(&(rpc)->crpc_refcount));                     \
320         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
321         if (atomic_dec_and_test(&(rpc)->crpc_refcount))                 \
322                 srpc_destroy_client_rpc(rpc);                           \
323 } while (0)
324
325 #define srpc_event_pending(rpc)   ((rpc)->crpc_bulkev.ev_fired == 0 ||  \
326                                    (rpc)->crpc_reqstev.ev_fired == 0 || \
327                                    (rpc)->crpc_replyev.ev_fired == 0)
328
329 typedef struct srpc_service {
330         int                sv_id;            /* service id */
331         const char        *sv_name;          /* human readable name */
332         int                sv_nprune;        /* # posted RPC to be pruned */
333         int                sv_concur;        /* max # concurrent RPCs */
334
335         spinlock_t         sv_lock;
336         int                sv_shuttingdown;
337         srpc_event_t       sv_ev;            /* LNet event */
338         int                sv_nposted_msg;   /* # posted message buffers */
339         struct list_head   sv_free_rpcq;     /* free RPC descriptors */
340         struct list_head   sv_active_rpcq;   /* in-flight RPCs */
341         struct list_head   sv_posted_msgq;   /* posted message buffers */
342         struct list_head   sv_blocked_msgq;  /* blocked for RPC descriptor */
343
344         /* Service callbacks:
345          * - sv_handler: process incoming RPC request
346          * - sv_bulk_ready: notify bulk data
347          */
348         int                (*sv_handler) (srpc_server_rpc_t *);
349         int                (*sv_bulk_ready) (srpc_server_rpc_t *, int);
350 } srpc_service_t;
351
352 #define SFW_POST_BUFFERS         256
353 #define SFW_SERVICE_CONCURRENCY  (SFW_POST_BUFFERS/2)
354
355 typedef struct {
356         struct list_head  sn_list;    /* chain on fw_zombie_sessions */
357         lst_sid_t         sn_id;      /* unique identifier */
358         unsigned int      sn_timeout; /* # seconds' inactivity to expire */
359         int               sn_timer_active;
360         stt_timer_t       sn_timer;
361         struct list_head  sn_batches; /* list of batches */
362         char              sn_name[LST_NAME_SIZE];
363         atomic_t          sn_refcount;
364         atomic_t          sn_brw_errors;
365         atomic_t          sn_ping_errors;
366 } sfw_session_t;
367
368 #define sfw_sid_equal(sid0, sid1)     ((sid0).ses_nid == (sid1).ses_nid && \
369                                        (sid0).ses_stamp == (sid1).ses_stamp)
370
371 typedef struct {
372         struct list_head  bat_list;      /* chain on sn_batches */
373         lst_bid_t         bat_id;        /* batch id */
374         int               bat_error;     /* error code of batch */
375         sfw_session_t    *bat_session;   /* batch's session */
376         atomic_t          bat_nactive;   /* # of active tests */
377         struct list_head  bat_tests;     /* test instances */
378 } sfw_batch_t;
379
380 typedef struct {
381         int  (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
382         void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
383         int  (*tso_prep_rpc)(struct sfw_test_unit *tsu,
384                              lnet_process_id_t dest,
385                              srpc_client_rpc_t **rpc);   /* prep a tests rpc */
386         void (*tso_done_rpc)(struct sfw_test_unit *tsu,
387                              srpc_client_rpc_t *rpc);    /* done a test rpc */
388 } sfw_test_client_ops_t;
389
390 typedef struct sfw_test_instance {
391         struct list_head        tsi_list;         /* chain on batch */
392         int                     tsi_service;      /* test type */
393         sfw_batch_t            *tsi_batch;        /* batch */
394         sfw_test_client_ops_t  *tsi_ops;          /* test client operations */
395
396         /* public parameter for all test units */
397         int                     tsi_is_client:1;     /* is test client */
398         int                     tsi_stoptsu_onerr:1; /* stop tsu on error */
399         int                     tsi_concur;          /* concurrency */
400         int                     tsi_loop;            /* loop count */
401
402         /* status of test instance */
403         spinlock_t              tsi_lock;         /* serialize */
404         int                     tsi_stopping:1;   /* test is stopping */
405         atomic_t                tsi_nactive;      /* # of active test unit */
406         struct list_head        tsi_units;        /* test units */
407         struct list_head        tsi_free_rpcs;    /* free rpcs */
408         struct list_head        tsi_active_rpcs;  /* active rpcs */
409
410         union {
411                 test_bulk_req_t bulk;             /* bulk parameter */
412                 test_ping_req_t ping;             /* ping parameter */
413         } tsi_u;
414 } sfw_test_instance_t;
415
416 /* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
417  * the end of pages are not used */
418 #define SFW_MAX_CONCUR     LST_MAX_CONCUR
419 #define SFW_ID_PER_PAGE    (CFS_PAGE_SIZE / sizeof(lnet_process_id_packed_t))
420 #define SFW_MAX_NDESTS     (LNET_MAX_IOV * SFW_ID_PER_PAGE)
421 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
422
423 typedef struct sfw_test_unit {
424         struct list_head        tsu_list;         /* chain on lst_test_instance */
425         lnet_process_id_t       tsu_dest;         /* id of dest node */
426         int                     tsu_loop;         /* loop count of the test */
427         sfw_test_instance_t    *tsu_instance;     /* pointer to test instance */
428         void                   *tsu_private;      /* private data */
429         swi_workitem_t          tsu_worker;       /* workitem of the test unit */
430 } sfw_test_unit_t;
431
432 typedef struct {
433         struct list_head        tsc_list;         /* chain on fw_tests */
434         srpc_service_t         *tsc_srv_service;  /* test service */
435         sfw_test_client_ops_t  *tsc_cli_ops;      /* ops of test client */
436 } sfw_test_case_t;
437
438
439 srpc_client_rpc_t *
440 sfw_create_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen,
441                void (*done) (srpc_client_rpc_t *), void *priv);
442 int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
443                         int nblk, int blklen, srpc_client_rpc_t **rpc);
444 void sfw_abort_rpc(srpc_client_rpc_t *rpc);
445 void sfw_post_rpc(srpc_client_rpc_t *rpc);
446 void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
447 void sfw_unpack_message(srpc_msg_t *msg);
448 void sfw_free_pages(srpc_server_rpc_t *rpc);
449 void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
450 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink);
451 int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
452
453 srpc_client_rpc_t *
454 srpc_create_client_rpc(lnet_process_id_t peer, int service,
455                        int nbulkiov, int bulklen,
456                        void (*rpc_done)(srpc_client_rpc_t *),
457                        void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
458 void srpc_post_rpc(srpc_client_rpc_t *rpc);
459 void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
460 void srpc_free_bulk(srpc_bulk_t *bk);
461 srpc_bulk_t *srpc_alloc_bulk(int npages, int sink);
462 int srpc_send_rpc(swi_workitem_t *wi);
463 int srpc_send_reply(srpc_server_rpc_t *rpc);
464 int srpc_add_service(srpc_service_t *sv);
465 int srpc_remove_service(srpc_service_t *sv);
466 void srpc_shutdown_service(srpc_service_t *sv);
467 void srpc_abort_service(srpc_service_t *sv);
468 int srpc_finish_service(srpc_service_t *sv);
469 int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
470 void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
471 void srpc_get_counters(srpc_counters_t *cnt);
472 void srpc_set_counters(const srpc_counters_t *cnt);
473
474 void swi_kill_workitem(swi_workitem_t *wi);
475 void swi_schedule_workitem(swi_workitem_t *wi);
476 void swi_schedule_serial_workitem(swi_workitem_t *wi);
477 int swi_startup(void);
478 int sfw_startup(void);
479 int srpc_startup(void);
480 void swi_shutdown(void);
481 void sfw_shutdown(void);
482 void srpc_shutdown(void);
483
484 static inline void
485 srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
486 {
487         LASSERT (rpc != NULL);
488         LASSERT (!srpc_event_pending(rpc));
489         LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
490 #ifndef __KERNEL__
491         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
492 #endif
493
494         if (rpc->crpc_fini == NULL) {
495                 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
496         } else {
497                 (*rpc->crpc_fini) (rpc);
498         }
499
500         return;
501 }
502
503 static inline void
504 srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
505                       int service, int nbulkiov, int bulklen,
506                       void (*rpc_done)(srpc_client_rpc_t *),
507                       void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
508 {
509         LASSERT (nbulkiov <= LNET_MAX_IOV);
510
511         memset(rpc, 0, offsetof(srpc_client_rpc_t,
512                                 crpc_bulk.bk_iovs[nbulkiov]));
513
514         CFS_INIT_LIST_HEAD(&rpc->crpc_list);
515         swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
516         spin_lock_init(&rpc->crpc_lock);
517         atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
518
519         rpc->crpc_dest         = peer;
520         rpc->crpc_priv         = priv;
521         rpc->crpc_service      = service;
522         rpc->crpc_bulk.bk_len  = bulklen;
523         rpc->crpc_bulk.bk_niov = nbulkiov;
524         rpc->crpc_done         = rpc_done;
525         rpc->crpc_fini         = rpc_fini;
526         rpc->crpc_reqstmdh     =
527         rpc->crpc_replymdh     =
528         rpc->crpc_bulk.bk_mdh  = LNET_INVALID_HANDLE;
529
530         /* no event is expected at this point */
531         rpc->crpc_bulkev.ev_fired  =
532         rpc->crpc_reqstev.ev_fired =
533         rpc->crpc_replyev.ev_fired = 1;
534
535         rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
536         rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
537         rpc->crpc_reqstmsg.msg_type    = srpc_service2request(service);
538         return;
539 }
540
541 static inline const char *
542 swi_state2str (int state)
543 {
544 #define STATE2STR(x) case x: return #x
545         switch(state) {
546                 default:
547                         LBUG();
548                 STATE2STR(SWI_STATE_NEWBORN);
549                 STATE2STR(SWI_STATE_REPLY_SUBMITTED);
550                 STATE2STR(SWI_STATE_REPLY_SENT);
551                 STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
552                 STATE2STR(SWI_STATE_REQUEST_SENT);
553                 STATE2STR(SWI_STATE_REPLY_RECEIVED);
554                 STATE2STR(SWI_STATE_BULK_STARTED);
555                 STATE2STR(SWI_STATE_DONE);
556         }
557 #undef STATE2STR
558 }
559
560 #define UNUSED(x)       ( (void)(x) )
561
562 #ifndef __KERNEL__
563
564 int stt_poll_interval(void);
565 int sfw_session_removed(void);
566
567 int stt_check_events(void);
568 int swi_check_events(void);
569 int srpc_check_event(int timeout);
570
571 int lnet_selftest_init(void);
572 void lnet_selftest_fini(void);
573 int selftest_wait_events(void);
574
575 #else
576
577 #define selftest_wait_events()    cfs_pause(cfs_time_seconds(1))
578
579 #endif
580
581 #define lst_wait_until(cond, lock, fmt, a...)                           \
582 do {                                                                    \
583         int __I = 2;                                                    \
584         while (!(cond)) {                                               \
585                 __I++;                                                  \
586                 CDEBUG(((__I & (-__I)) == __I) ? D_WARNING :            \
587                                                  D_NET,     /* 2**n? */ \
588                        fmt, ## a);                                      \
589                 spin_unlock(&(lock));                                   \
590                                                                         \
591                 selftest_wait_events();                                 \
592                                                                         \
593                 spin_lock(&(lock));                                     \
594         }                                                               \
595 } while (0)
596
597 static inline void
598 srpc_wait_service_shutdown (srpc_service_t *sv)
599 {
600         int i = 2;
601
602         spin_lock(&sv->sv_lock);
603         LASSERT (sv->sv_shuttingdown);
604         spin_unlock(&sv->sv_lock);
605
606         while (srpc_finish_service(sv) == 0) {
607                 i++;
608                 CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
609                         "Waiting for %s service to shutdown...\n",
610                         sv->sv_name);
611                 selftest_wait_events();
612         }
613 }
614
615 #endif /* __SELFTEST_SELFTEST_H__ */