Whamcloud - gitweb
i=liang,b=19156:
[fs/lustre-release.git] / lnet / selftest / selftest.h
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/selftest/selftest.h
37  *
38  * Author: Isaac Huang <isaac@clusterfs.com>
39  */
40 #ifndef __SELFTEST_SELFTEST_H__
41 #define __SELFTEST_SELFTEST_H__
42
43 #define LNET_ONLY
44
45 #ifndef __KERNEL__
46
47 /* XXX workaround XXX */
48 #ifdef HAVE_SYS_TYPES_H
49 #include <sys/types.h>
50 #endif
51
52 /* TODO: remove these when libcfs provides proper primitives for userspace
53  *
54  * Dummy implementations of spinlock_t and atomic_t work since userspace
55  * selftest is completely single-threaded, even using multi-threaded usocklnd.
56  */
57 typedef struct { } spinlock_t;
58 static inline void spin_lock(spinlock_t *l) {return;}
59 static inline void spin_unlock(spinlock_t *l) {return;}
60 static inline void spin_lock_init(spinlock_t *l) {return;}
61
62 typedef struct { volatile int counter; } atomic_t;
63 #define atomic_read(a) ((a)->counter)
64 #define atomic_set(a,b) do {(a)->counter = b; } while (0)
65 #define atomic_dec_and_test(a) ((--((a)->counter)) == 0)
66 #define atomic_inc(a)  (((a)->counter)++)
67 #define atomic_dec(a)  do { (a)->counter--; } while (0)
68
69 #endif
70
71 #include <libcfs/kp30.h>
72 #include <libcfs/libcfs.h>
73 #include <lnet/lnet.h>
74 #include <lnet/lib-lnet.h>
75 #include <lnet/lib-types.h>
76 #include <lnet/lnetst.h>
77
78 #include "rpc.h"
79 #include "timer.h"
80
81 #ifndef MADE_WITHOUT_COMPROMISE
82 #define MADE_WITHOUT_COMPROMISE
83 #endif
84
85
86 #define SWI_STATE_NEWBORN                  0
87 #define SWI_STATE_REPLY_SUBMITTED          1
88 #define SWI_STATE_REPLY_SENT               2
89 #define SWI_STATE_REQUEST_SUBMITTED        3
90 #define SWI_STATE_REQUEST_SENT             4
91 #define SWI_STATE_REPLY_RECEIVED           5
92 #define SWI_STATE_BULK_STARTED             6
93 #define SWI_STATE_BULK_ERRORED             7
94 #define SWI_STATE_DONE                     10
95
96 /* forward refs */
97 struct swi_workitem;
98 struct srpc_service;
99 struct sfw_test_unit;
100 struct sfw_test_instance;
101
102 /*
103  * A workitems is deferred work with these semantics:
104  * - a workitem always runs in thread context.
105  * - a workitem can be concurrent with other workitems but is strictly
106  *   serialized with respect to itself.
107  * - no CPU affinity, a workitem does not necessarily run on the same CPU
108  *   that schedules it. However, this might change in the future.
109  * - if a workitem is scheduled again before it has a chance to run, it
110  *   runs only once.
111  * - if a workitem is scheduled while it runs, it runs again after it
112  *   completes; this ensures that events occurring while other events are
113  *   being processed receive due attention. This behavior also allows a
114  *   workitem to reschedule itself.
115  *
116  * Usage notes:
117  * - a workitem can sleep but it should be aware of how that sleep might
118  *   affect others.
119  * - a workitem runs inside a kernel thread so there's no user space to access.
120  * - do not use a workitem if the scheduling latency can't be tolerated.
121  *
122  * When wi_action returns non-zero, it means the workitem has either been
123  * freed or reused and workitem scheduler won't touch it any more.
124  */
125 typedef int (*swi_action_t) (struct swi_workitem *);
126 typedef struct swi_workitem {
127         struct list_head wi_list;        /* chain on runq */
128         int              wi_state;
129         swi_action_t     wi_action;
130         void            *wi_data;
131         unsigned int     wi_running:1;
132         unsigned int     wi_scheduled:1;
133 } swi_workitem_t;
134
135 static inline void
136 swi_init_workitem (swi_workitem_t *wi, void *data, swi_action_t action)
137 {
138         CFS_INIT_LIST_HEAD(&wi->wi_list);
139
140         wi->wi_running   = 0;
141         wi->wi_scheduled = 0;
142         wi->wi_data      = data;
143         wi->wi_action    = action;
144         wi->wi_state     = SWI_STATE_NEWBORN;
145 }
146
147 #define SWI_RESCHED    128         /* # workitem scheduler loops before reschedule */
148
149 /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
150  * services, e.g. create/modify session.
151  */
152 #define SRPC_SERVICE_DEBUG              0
153 #define SRPC_SERVICE_MAKE_SESSION       1
154 #define SRPC_SERVICE_REMOVE_SESSION     2
155 #define SRPC_SERVICE_BATCH              3
156 #define SRPC_SERVICE_TEST               4
157 #define SRPC_SERVICE_QUERY_STAT         5
158 #define SRPC_SERVICE_JOIN               6
159 #define SRPC_FRAMEWORK_SERVICE_MAX_ID   10
160 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
161 #define SRPC_SERVICE_BRW                11
162 #define SRPC_SERVICE_PING               12
163 #define SRPC_SERVICE_MAX_ID             12
164
165 #define SRPC_REQUEST_PORTAL             50
166 /* a lazy portal for framework RPC requests */
167 #define SRPC_FRAMEWORK_REQUEST_PORTAL   51
168 /* all reply/bulk RDMAs go to this portal */
169 #define SRPC_RDMA_PORTAL                52
170
171 static inline srpc_msg_type_t
172 srpc_service2request (int service)
173 {
174         switch (service) {
175         default:
176                 LBUG ();
177         case SRPC_SERVICE_DEBUG:
178                 return SRPC_MSG_DEBUG_REQST;
179
180         case SRPC_SERVICE_MAKE_SESSION:
181                 return SRPC_MSG_MKSN_REQST;
182
183         case SRPC_SERVICE_REMOVE_SESSION:
184                 return SRPC_MSG_RMSN_REQST;
185
186         case SRPC_SERVICE_BATCH:
187                 return SRPC_MSG_BATCH_REQST;
188
189         case SRPC_SERVICE_TEST:
190                 return SRPC_MSG_TEST_REQST;
191
192         case SRPC_SERVICE_QUERY_STAT:
193                 return SRPC_MSG_STAT_REQST;
194
195         case SRPC_SERVICE_BRW:
196                 return SRPC_MSG_BRW_REQST;
197
198         case SRPC_SERVICE_PING:
199                 return SRPC_MSG_PING_REQST;
200
201         case SRPC_SERVICE_JOIN:
202                 return SRPC_MSG_JOIN_REQST;
203         }
204 }
205
206 static inline srpc_msg_type_t
207 srpc_service2reply (int service)
208 {
209         return srpc_service2request(service) + 1;
210 }
211
212 typedef enum {
213         SRPC_BULK_REQ_RCVD   = 0, /* passive bulk request(PUT sink/GET source) received */
214         SRPC_BULK_PUT_SENT   = 1, /* active bulk PUT sent (source) */
215         SRPC_BULK_GET_RPLD   = 2, /* active bulk GET replied (sink) */
216         SRPC_REPLY_RCVD      = 3, /* incoming reply received */
217         SRPC_REPLY_SENT      = 4, /* outgoing reply sent */
218         SRPC_REQUEST_RCVD    = 5, /* incoming request received */
219         SRPC_REQUEST_SENT    = 6, /* outgoing request sent */
220 } srpc_event_type_t;
221
222 /* RPC event */
223 typedef struct {
224         srpc_event_type_t ev_type;   /* what's up */
225         lnet_event_kind_t ev_lnet;   /* LNet event type */
226         int               ev_fired;  /* LNet event fired? */
227         int               ev_status; /* LNet event status */
228         void             *ev_data;   /* owning server/client RPC */
229 } srpc_event_t;
230
231 typedef struct {
232         int              bk_len;  /* len of bulk data */
233         lnet_handle_md_t bk_mdh;
234         int              bk_sink; /* sink/source */
235         int              bk_niov; /* # iov in bk_iovs */
236 #ifdef __KERNEL__
237         lnet_kiov_t      bk_iovs[0];
238 #else
239         cfs_page_t     **bk_pages;
240         lnet_md_iovec_t  bk_iovs[0];
241 #endif
242 } srpc_bulk_t; /* bulk descriptor */
243
244 typedef struct srpc_peer {
245         struct list_head stp_list;     /* chain on peer hash */
246         struct list_head stp_rpcq;     /* q of non-control RPCs */
247         struct list_head stp_ctl_rpcq; /* q of control RPCs */
248         spinlock_t       stp_lock;     /* serialize */
249         lnet_nid_t       stp_nid;
250         int              stp_credits;  /* available credits */
251 } srpc_peer_t;
252
253 /* message buffer descriptor */
254 typedef struct {
255         struct list_head     buf_list; /* chain on srpc_service::*_msgq */
256         srpc_msg_t           buf_msg;
257         lnet_handle_md_t     buf_mdh;
258         lnet_nid_t           buf_self;
259         lnet_process_id_t    buf_peer;
260 } srpc_buffer_t;
261
262 /* server-side state of a RPC */
263 typedef struct srpc_server_rpc {
264         struct list_head     srpc_list;    /* chain on srpc_service::*_rpcq */
265         struct srpc_service *srpc_service;
266         swi_workitem_t       srpc_wi;
267         srpc_event_t         srpc_ev;      /* bulk/reply event */
268         lnet_nid_t           srpc_self;
269         lnet_process_id_t    srpc_peer;
270         srpc_msg_t           srpc_replymsg;
271         lnet_handle_md_t     srpc_replymdh;
272         srpc_buffer_t       *srpc_reqstbuf;
273         srpc_bulk_t         *srpc_bulk;
274
275         int                  srpc_status;
276         void               (*srpc_done)(struct srpc_server_rpc *);
277 } srpc_server_rpc_t;
278
279 /* client-side state of a RPC */
280 typedef struct srpc_client_rpc {
281         struct list_head     crpc_list;   /* chain on user's lists */
282         struct list_head     crpc_privl;  /* chain on srpc_peer_t::*rpcq */
283         spinlock_t           crpc_lock;   /* serialize */
284         int                  crpc_service;
285         atomic_t             crpc_refcount;
286         int                  crpc_timeout; /* # seconds to wait for reply */
287         stt_timer_t          crpc_timer;
288         swi_workitem_t       crpc_wi;
289         lnet_process_id_t    crpc_dest;
290         srpc_peer_t         *crpc_peer;
291
292         void               (*crpc_done)(struct srpc_client_rpc *);
293         void               (*crpc_fini)(struct srpc_client_rpc *);
294         int                  crpc_status;    /* completion status */
295         void                *crpc_priv;      /* caller data */
296
297         /* state flags */
298         unsigned int         crpc_aborted:1; /* being given up */
299         unsigned int         crpc_closed:1;  /* completed */
300
301         /* RPC events */
302         srpc_event_t         crpc_bulkev;    /* bulk event */
303         srpc_event_t         crpc_reqstev;   /* request event */
304         srpc_event_t         crpc_replyev;   /* reply event */
305
306         /* bulk, request(reqst), and reply exchanged on wire */
307         srpc_msg_t           crpc_reqstmsg;
308         srpc_msg_t           crpc_replymsg;
309         lnet_handle_md_t     crpc_reqstmdh;
310         lnet_handle_md_t     crpc_replymdh;
311         srpc_bulk_t          crpc_bulk;
312 } srpc_client_rpc_t;
313
314 #define srpc_client_rpc_size(rpc)                                       \
315 offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
316
317 #define srpc_client_rpc_addref(rpc)                                     \
318 do {                                                                    \
319         CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n",                         \
320                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
321                atomic_read(&(rpc)->crpc_refcount));                     \
322         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
323         atomic_inc(&(rpc)->crpc_refcount);                              \
324 } while (0)
325
326 #define srpc_client_rpc_decref(rpc)                                     \
327 do {                                                                    \
328         CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n",                         \
329                (rpc), libcfs_id2str((rpc)->crpc_dest),                  \
330                atomic_read(&(rpc)->crpc_refcount));                     \
331         LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0);                \
332         if (atomic_dec_and_test(&(rpc)->crpc_refcount))                 \
333                 srpc_destroy_client_rpc(rpc);                           \
334 } while (0)
335
336 #define srpc_event_pending(rpc)   ((rpc)->crpc_bulkev.ev_fired == 0 ||  \
337                                    (rpc)->crpc_reqstev.ev_fired == 0 || \
338                                    (rpc)->crpc_replyev.ev_fired == 0)
339
340 typedef struct srpc_service {
341         int                sv_id;            /* service id */
342         const char        *sv_name;          /* human readable name */
343         int                sv_nprune;        /* # posted RPC to be pruned */
344         int                sv_concur;        /* max # concurrent RPCs */
345
346         spinlock_t         sv_lock;
347         int                sv_shuttingdown;
348         srpc_event_t       sv_ev;            /* LNet event */
349         int                sv_nposted_msg;   /* # posted message buffers */
350         struct list_head   sv_free_rpcq;     /* free RPC descriptors */
351         struct list_head   sv_active_rpcq;   /* in-flight RPCs */
352         struct list_head   sv_posted_msgq;   /* posted message buffers */
353         struct list_head   sv_blocked_msgq;  /* blocked for RPC descriptor */
354
355         /* Service callbacks:
356          * - sv_handler: process incoming RPC request
357          * - sv_bulk_ready: notify bulk data
358          */
359         int                (*sv_handler) (srpc_server_rpc_t *);
360         int                (*sv_bulk_ready) (srpc_server_rpc_t *, int);
361 } srpc_service_t;
362
363 #define SFW_POST_BUFFERS         8
364 #define SFW_SERVICE_CONCURRENCY  (SFW_POST_BUFFERS/2)
365
366 typedef struct {
367         struct list_head  sn_list;    /* chain on fw_zombie_sessions */
368         lst_sid_t         sn_id;      /* unique identifier */
369         unsigned int      sn_timeout; /* # seconds' inactivity to expire */
370         int               sn_timer_active;
371         stt_timer_t       sn_timer;
372         struct list_head  sn_batches; /* list of batches */
373         char              sn_name[LST_NAME_SIZE];
374         atomic_t          sn_refcount;
375         atomic_t          sn_brw_errors;
376         atomic_t          sn_ping_errors;
377 } sfw_session_t;
378
379 #define sfw_sid_equal(sid0, sid1)     ((sid0).ses_nid == (sid1).ses_nid && \
380                                        (sid0).ses_stamp == (sid1).ses_stamp)
381
382 typedef struct {
383         struct list_head  bat_list;      /* chain on sn_batches */
384         lst_bid_t         bat_id;        /* batch id */
385         int               bat_error;     /* error code of batch */
386         sfw_session_t    *bat_session;   /* batch's session */
387         atomic_t          bat_nactive;   /* # of active tests */
388         struct list_head  bat_tests;     /* test instances */
389 } sfw_batch_t;
390
391 typedef struct {
392         int  (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
393         void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
394         int  (*tso_prep_rpc)(struct sfw_test_unit *tsu,
395                              lnet_process_id_t dest,
396                              srpc_client_rpc_t **rpc);   /* prep a tests rpc */
397         void (*tso_done_rpc)(struct sfw_test_unit *tsu,
398                              srpc_client_rpc_t *rpc);    /* done a test rpc */
399 } sfw_test_client_ops_t;
400
401 typedef struct sfw_test_instance {
402         struct list_head        tsi_list;         /* chain on batch */
403         int                     tsi_service;      /* test type */
404         sfw_batch_t            *tsi_batch;        /* batch */
405         sfw_test_client_ops_t  *tsi_ops;          /* test client operations */
406
407         /* public parameter for all test units */
408         int                     tsi_is_client:1;     /* is test client */
409         int                     tsi_stoptsu_onerr:1; /* stop tsu on error */
410         int                     tsi_concur;          /* concurrency */
411         int                     tsi_loop;            /* loop count */
412
413         /* status of test instance */
414         spinlock_t              tsi_lock;         /* serialize */
415         int                     tsi_stopping:1;   /* test is stopping */
416         atomic_t                tsi_nactive;      /* # of active test unit */
417         struct list_head        tsi_units;        /* test units */
418         struct list_head        tsi_free_rpcs;    /* free rpcs */
419         struct list_head        tsi_active_rpcs;  /* active rpcs */
420
421         union {
422                 test_bulk_req_t bulk;             /* bulk parameter */
423                 test_ping_req_t ping;             /* ping parameter */
424         } tsi_u;
425 } sfw_test_instance_t;
426
427 /* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
428  * the end of pages are not used */
429 #define SFW_MAX_CONCUR     LST_MAX_CONCUR
430 #define SFW_ID_PER_PAGE    (CFS_PAGE_SIZE / sizeof(lnet_process_id_t))
431 #define SFW_MAX_NDESTS     (LNET_MAX_IOV * SFW_ID_PER_PAGE)
432 #define sfw_id_pages(n)    (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
433
434 typedef struct sfw_test_unit {
435         struct list_head        tsu_list;         /* chain on lst_test_instance */
436         lnet_process_id_t       tsu_dest;         /* id of dest node */
437         int                     tsu_loop;         /* loop count of the test */
438         sfw_test_instance_t    *tsu_instance;     /* pointer to test instance */
439         void                   *tsu_private;      /* private data */
440         swi_workitem_t          tsu_worker;       /* workitem of the test unit */
441 } sfw_test_unit_t;
442
443 typedef struct {
444         struct list_head        tsc_list;         /* chain on fw_tests */
445         srpc_service_t         *tsc_srv_service;  /* test service */
446         sfw_test_client_ops_t  *tsc_cli_ops;      /* ops of test client */
447 } sfw_test_case_t;
448
449
450 srpc_client_rpc_t *
451 sfw_create_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen,
452                void (*done) (srpc_client_rpc_t *), void *priv);
453 int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
454                         int nblk, int blklen, srpc_client_rpc_t **rpc);
455 void sfw_abort_rpc(srpc_client_rpc_t *rpc);
456 void sfw_post_rpc(srpc_client_rpc_t *rpc);
457 void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
458 void sfw_unpack_message(srpc_msg_t *msg);
459 void sfw_free_pages(srpc_server_rpc_t *rpc);
460 void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
461 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink);
462
463 srpc_client_rpc_t *
464 srpc_create_client_rpc(lnet_process_id_t peer, int service,
465                        int nbulkiov, int bulklen,
466                        void (*rpc_done)(srpc_client_rpc_t *),
467                        void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
468 void srpc_post_rpc(srpc_client_rpc_t *rpc);
469 void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
470 void srpc_free_bulk(srpc_bulk_t *bk);
471 srpc_bulk_t *srpc_alloc_bulk(int npages, int sink);
472 int srpc_send_rpc(swi_workitem_t *wi);
473 int srpc_send_reply(srpc_server_rpc_t *rpc);
474 int srpc_add_service(srpc_service_t *sv);
475 int srpc_remove_service(srpc_service_t *sv);
476 void srpc_shutdown_service(srpc_service_t *sv);
477 int srpc_finish_service(srpc_service_t *sv);
478 int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
479 void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
480 void srpc_get_counters(srpc_counters_t *cnt);
481 void srpc_set_counters(const srpc_counters_t *cnt);
482
483 void swi_kill_workitem(swi_workitem_t *wi);
484 void swi_schedule_workitem(swi_workitem_t *wi);
485 void swi_schedule_serial_workitem(swi_workitem_t *wi);
486 int swi_startup(void);
487 int sfw_startup(void);
488 int srpc_startup(void);
489 void swi_shutdown(void);
490 void sfw_shutdown(void);
491 void srpc_shutdown(void);
492
493 static inline void
494 srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
495 {
496         LASSERT (rpc != NULL);
497         LASSERT (!srpc_event_pending(rpc));
498         LASSERT (list_empty(&rpc->crpc_privl));
499         LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
500 #ifndef __KERNEL__
501         LASSERT (rpc->crpc_bulk.bk_pages == NULL);
502 #endif
503
504         if (rpc->crpc_fini == NULL) {
505                 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
506         } else {
507                 (*rpc->crpc_fini) (rpc);
508         }
509
510         return;
511 }
512
513 static inline void
514 srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
515                       int service, int nbulkiov, int bulklen,
516                       void (*rpc_done)(srpc_client_rpc_t *),
517                       void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
518 {
519         LASSERT (nbulkiov <= LNET_MAX_IOV);
520
521         memset(rpc, 0, offsetof(srpc_client_rpc_t,
522                                 crpc_bulk.bk_iovs[nbulkiov]));
523
524         CFS_INIT_LIST_HEAD(&rpc->crpc_list);
525         CFS_INIT_LIST_HEAD(&rpc->crpc_privl);
526         swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
527         spin_lock_init(&rpc->crpc_lock);
528         atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
529
530         rpc->crpc_dest         = peer;
531         rpc->crpc_priv         = priv;
532         rpc->crpc_service      = service;
533         rpc->crpc_bulk.bk_len  = bulklen;
534         rpc->crpc_bulk.bk_niov = nbulkiov;
535         rpc->crpc_done         = rpc_done;
536         rpc->crpc_fini         = rpc_fini;
537         rpc->crpc_reqstmdh     =
538         rpc->crpc_replymdh     =
539         rpc->crpc_bulk.bk_mdh  = LNET_INVALID_HANDLE;
540
541         /* no event is expected at this point */
542         rpc->crpc_bulkev.ev_fired  =
543         rpc->crpc_reqstev.ev_fired =
544         rpc->crpc_replyev.ev_fired = 1;
545
546         rpc->crpc_reqstmsg.msg_magic   = SRPC_MSG_MAGIC;
547         rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
548         rpc->crpc_reqstmsg.msg_type    = srpc_service2request(service);
549         return;
550 }
551
552 static inline const char *
553 swi_state2str (int state)
554 {
555 #define STATE2STR(x) case x: return #x
556         switch(state) {
557                 default:
558                         LBUG();
559                 STATE2STR(SWI_STATE_NEWBORN);
560                 STATE2STR(SWI_STATE_REPLY_SUBMITTED);
561                 STATE2STR(SWI_STATE_REPLY_SENT);
562                 STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
563                 STATE2STR(SWI_STATE_REQUEST_SENT);
564                 STATE2STR(SWI_STATE_REPLY_RECEIVED);
565                 STATE2STR(SWI_STATE_BULK_STARTED);
566                 STATE2STR(SWI_STATE_BULK_ERRORED);
567                 STATE2STR(SWI_STATE_DONE);
568         }
569 #undef STATE2STR
570 }
571
572 #define UNUSED(x)       ( (void)(x) )
573
574 #ifndef __KERNEL__
575
576 int stt_poll_interval(void);
577 int sfw_session_removed(void);
578
579 int stt_check_events(void);
580 int swi_check_events(void);
581 int srpc_check_event(int timeout);
582
583 int lnet_selftest_init(void);
584 void lnet_selftest_fini(void);
585 int selftest_wait_events(void);
586
587 #else
588
589 #define selftest_wait_events()    cfs_pause(cfs_time_seconds(1))
590
591 #endif
592
593 #define lst_wait_until(cond, lock, fmt, a...)                           \
594 do {                                                                    \
595         int __I = 2;                                                    \
596         while (!(cond)) {                                               \
597                 __I++;                                                  \
598                 CDEBUG(((__I & (-__I)) == __I) ? D_WARNING :            \
599                                                  D_NET,     /* 2**n? */ \
600                        fmt, ## a);                                      \
601                 spin_unlock(&(lock));                                   \
602                                                                         \
603                 selftest_wait_events();                                 \
604                                                                         \
605                 spin_lock(&(lock));                                     \
606         }                                                               \
607 } while (0)
608
609 static inline void
610 srpc_wait_service_shutdown (srpc_service_t *sv)
611 {
612         int i = 2;
613
614         spin_lock(&sv->sv_lock);
615         LASSERT (sv->sv_shuttingdown);
616         spin_unlock(&sv->sv_lock);
617
618         while (srpc_finish_service(sv) == 0) {
619                 i++;
620                 CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
621                         "Waiting for %s service to shutdown...\n",
622                         sv->sv_name);
623                 selftest_wait_events();
624         }
625 }
626
627 #endif /* __SELFTEST_SELFTEST_H__ */