1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Isaac Huang <isaac@clusterfs.com>
8 #ifndef __SELFTEST_SELFTEST_H__
9 #define __SELFTEST_SELFTEST_H__
14 #include <liblustre.h> /* userland spinlock_t and atomic_t */
16 #include <libcfs/kp30.h>
17 #include <libcfs/libcfs.h>
18 #include <lnet/lnet.h>
19 #include <lnet/lib-lnet.h>
20 #include <lnet/lib-types.h>
21 #include <lnet/lnetst.h>
26 #ifndef MADE_WITHOUT_COMPROMISE
27 #define MADE_WITHOUT_COMPROMISE
31 #define SWI_STATE_NEWBORN 0
32 #define SWI_STATE_REPLY_SUBMITTED 1
33 #define SWI_STATE_REPLY_SENT 2
34 #define SWI_STATE_REQUEST_SUBMITTED 3
35 #define SWI_STATE_REQUEST_SENT 4
36 #define SWI_STATE_REPLY_RECEIVED 5
37 #define SWI_STATE_BULK_STARTED 6
38 #define SWI_STATE_DONE 10
44 struct sfw_test_instance;
47 * A workitems is deferred work with these semantics:
48 * - a workitem always runs in thread context.
49 * - a workitem can be concurrent with other workitems but is strictly
50 * serialized with respect to itself.
51 * - no CPU affinity, a workitem does not necessarily run on the same CPU
52 * that schedules it. However, this might change in the future.
53 * - if a workitem is scheduled again before it has a chance to run, it
55 * - if a workitem is scheduled while it runs, it runs again after it
56 * completes; this ensures that events occurring while other events are
57 * being processed receive due attention. This behavior also allows a
58 * workitem to reschedule itself.
61 * - a workitem can sleep but it should be aware of how that sleep might
63 * - a workitem runs inside a kernel thread so there's no user space to access.
64 * - do not use a workitem if the scheduling latency can't be tolerated.
66 * When wi_action returns non-zero, it means the workitem has either been
67 * freed or reused and workitem scheduler won't touch it any more.
69 typedef int (*swi_action_t) (struct swi_workitem *);
70 typedef struct swi_workitem {
71 struct list_head wi_list; /* chain on runq */
73 swi_action_t wi_action;
75 unsigned int wi_running:1;
76 unsigned int wi_scheduled:1;
80 swi_init_workitem (swi_workitem_t *wi, void *data, swi_action_t action)
82 CFS_INIT_LIST_HEAD(&wi->wi_list);
87 wi->wi_action = action;
88 wi->wi_state = SWI_STATE_NEWBORN;
91 #define SWI_RESCHED 128 /* # workitem scheduler loops before reschedule */
93 /* services below SRPC_FRAMEWORK_SERVICE_MAX_ID are framework
94 * services, e.g. create/modify session.
96 #define SRPC_SERVICE_DEBUG 0
97 #define SRPC_SERVICE_MAKE_SESSION 1
98 #define SRPC_SERVICE_REMOVE_SESSION 2
99 #define SRPC_SERVICE_BATCH 3
100 #define SRPC_SERVICE_TEST 4
101 #define SRPC_SERVICE_QUERY_STAT 5
102 #define SRPC_SERVICE_JOIN 6
103 #define SRPC_FRAMEWORK_SERVICE_MAX_ID 10
104 /* other services start from SRPC_FRAMEWORK_SERVICE_MAX_ID+1 */
105 #define SRPC_SERVICE_BRW 11
106 #define SRPC_SERVICE_PING 12
107 #define SRPC_SERVICE_MAX_ID 12
109 #define SRPC_REQUEST_PORTAL 50
110 /* a lazy portal for framework RPC requests */
111 #define SRPC_FRAMEWORK_REQUEST_PORTAL 51
112 /* all reply/bulk RDMAs go to this portal */
113 #define SRPC_RDMA_PORTAL 52
115 static inline srpc_msg_type_t
116 srpc_service2request (int service)
121 case SRPC_SERVICE_DEBUG:
122 return SRPC_MSG_DEBUG_REQST;
124 case SRPC_SERVICE_MAKE_SESSION:
125 return SRPC_MSG_MKSN_REQST;
127 case SRPC_SERVICE_REMOVE_SESSION:
128 return SRPC_MSG_RMSN_REQST;
130 case SRPC_SERVICE_BATCH:
131 return SRPC_MSG_BATCH_REQST;
133 case SRPC_SERVICE_TEST:
134 return SRPC_MSG_TEST_REQST;
136 case SRPC_SERVICE_QUERY_STAT:
137 return SRPC_MSG_STAT_REQST;
139 case SRPC_SERVICE_BRW:
140 return SRPC_MSG_BRW_REQST;
142 case SRPC_SERVICE_PING:
143 return SRPC_MSG_PING_REQST;
145 case SRPC_SERVICE_JOIN:
146 return SRPC_MSG_JOIN_REQST;
150 static inline srpc_msg_type_t
151 srpc_service2reply (int service)
153 return srpc_service2request(service) + 1;
157 SRPC_BULK_REQ_RCVD = 0, /* passive bulk request(PUT sink/GET source) received */
158 SRPC_BULK_PUT_SENT = 1, /* active bulk PUT sent (source) */
159 SRPC_BULK_GET_RPLD = 2, /* active bulk GET replied (sink) */
160 SRPC_REPLY_RCVD = 3, /* incoming reply received */
161 SRPC_REPLY_SENT = 4, /* outgoing reply sent */
162 SRPC_REQUEST_RCVD = 5, /* incoming request received */
163 SRPC_REQUEST_SENT = 6, /* outgoing request sent */
168 srpc_event_type_t ev_type; /* what's up */
169 lnet_event_kind_t ev_lnet; /* LNet event type */
170 int ev_fired; /* LNet event fired? */
171 int ev_status; /* LNet event status */
172 void *ev_data; /* owning server/client RPC */
176 int bk_len; /* len of bulk data */
177 lnet_handle_md_t bk_mdh;
178 int bk_sink; /* sink/source */
179 int bk_niov; /* # iov in bk_iovs */
181 lnet_kiov_t bk_iovs[0];
183 cfs_page_t **bk_pages;
184 lnet_md_iovec_t bk_iovs[0];
186 } srpc_bulk_t; /* bulk descriptor */
188 typedef struct srpc_peer {
189 struct list_head stp_list; /* chain on peer hash */
190 struct list_head stp_rpcq; /* q of non-control RPCs */
191 struct list_head stp_ctl_rpcq; /* q of control RPCs */
192 spinlock_t stp_lock; /* serialize */
194 int stp_credits; /* available credits */
197 /* message buffer descriptor */
199 struct list_head buf_list; /* chain on srpc_service::*_msgq */
201 lnet_handle_md_t buf_mdh;
203 lnet_process_id_t buf_peer;
206 /* server-side state of a RPC */
207 typedef struct srpc_server_rpc {
208 struct list_head srpc_list; /* chain on srpc_service::*_rpcq */
209 struct srpc_service *srpc_service;
210 swi_workitem_t srpc_wi;
211 srpc_event_t srpc_ev; /* bulk/reply event */
212 lnet_nid_t srpc_self;
213 lnet_process_id_t srpc_peer;
214 srpc_msg_t srpc_replymsg;
215 lnet_handle_md_t srpc_replymdh;
216 srpc_buffer_t *srpc_reqstbuf;
217 srpc_bulk_t *srpc_bulk;
220 void (*srpc_done)(struct srpc_server_rpc *);
223 /* client-side state of a RPC */
224 typedef struct srpc_client_rpc {
225 struct list_head crpc_list; /* chain on user's lists */
226 struct list_head crpc_privl; /* chain on srpc_peer_t::*rpcq */
227 spinlock_t crpc_lock; /* serialize */
229 atomic_t crpc_refcount;
230 int crpc_timeout; /* # seconds to wait for reply */
231 stt_timer_t crpc_timer;
232 swi_workitem_t crpc_wi;
233 lnet_process_id_t crpc_dest;
234 srpc_peer_t *crpc_peer;
236 void (*crpc_done)(struct srpc_client_rpc *);
237 void (*crpc_fini)(struct srpc_client_rpc *);
238 int crpc_status; /* completion status */
239 void *crpc_priv; /* caller data */
242 unsigned int crpc_aborted:1; /* being given up */
243 unsigned int crpc_closed:1; /* completed */
246 srpc_event_t crpc_bulkev; /* bulk event */
247 srpc_event_t crpc_reqstev; /* request event */
248 srpc_event_t crpc_replyev; /* reply event */
250 /* bulk, request(reqst), and reply exchanged on wire */
251 srpc_msg_t crpc_reqstmsg;
252 srpc_msg_t crpc_replymsg;
253 lnet_handle_md_t crpc_reqstmdh;
254 lnet_handle_md_t crpc_replymdh;
255 srpc_bulk_t crpc_bulk;
258 #define srpc_client_rpc_size(rpc) \
259 offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
261 #define srpc_client_rpc_addref(rpc) \
263 CDEBUG(D_NET, "RPC[%p] -> %s (%d)++\n", \
264 (rpc), libcfs_id2str((rpc)->crpc_dest), \
265 atomic_read(&(rpc)->crpc_refcount)); \
266 LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
267 atomic_inc(&(rpc)->crpc_refcount); \
270 #define srpc_client_rpc_decref(rpc) \
272 CDEBUG(D_NET, "RPC[%p] -> %s (%d)--\n", \
273 (rpc), libcfs_id2str((rpc)->crpc_dest), \
274 atomic_read(&(rpc)->crpc_refcount)); \
275 LASSERT(atomic_read(&(rpc)->crpc_refcount) > 0); \
276 if (atomic_dec_and_test(&(rpc)->crpc_refcount)) \
277 srpc_destroy_client_rpc(rpc); \
280 #define srpc_event_pending(rpc) ((rpc)->crpc_bulkev.ev_fired == 0 || \
281 (rpc)->crpc_reqstev.ev_fired == 0 || \
282 (rpc)->crpc_replyev.ev_fired == 0)
284 typedef struct srpc_service {
285 int sv_id; /* service id */
286 const char *sv_name; /* human readable name */
287 int sv_nprune; /* # posted RPC to be pruned */
288 int sv_concur; /* max # concurrent RPCs */
292 srpc_event_t sv_ev; /* LNet event */
293 int sv_nposted_msg; /* # posted message buffers */
294 struct list_head sv_free_rpcq; /* free RPC descriptors */
295 struct list_head sv_active_rpcq; /* in-flight RPCs */
296 struct list_head sv_posted_msgq; /* posted message buffers */
297 struct list_head sv_blocked_msgq; /* blocked for RPC descriptor */
299 /* Service callbacks:
300 * - sv_handler: process incoming RPC request
301 * - sv_bulk_ready: notify bulk data
303 int (*sv_handler) (srpc_server_rpc_t *);
304 int (*sv_bulk_ready) (srpc_server_rpc_t *, int);
307 #define SFW_POST_BUFFERS 8
308 #define SFW_SERVICE_CONCURRENCY (SFW_POST_BUFFERS/2)
311 struct list_head sn_list; /* chain on fw_zombie_sessions */
312 lst_sid_t sn_id; /* unique identifier */
313 unsigned int sn_timeout; /* # seconds' inactivity to expire */
315 stt_timer_t sn_timer;
316 struct list_head sn_batches; /* list of batches */
317 char sn_name[LST_NAME_SIZE];
318 atomic_t sn_brw_errors;
319 atomic_t sn_ping_errors;
322 #define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
323 (sid0).ses_stamp == (sid1).ses_stamp)
326 struct list_head bat_list; /* chain on sn_batches */
327 lst_bid_t bat_id; /* batch id */
328 int bat_error; /* error code of batch */
329 sfw_session_t *bat_session; /* batch's session */
330 atomic_t bat_nactive; /* # of active tests */
331 struct list_head bat_tests; /* test instances */
335 int (*tso_init)(struct sfw_test_instance *tsi); /* intialize test client */
336 void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test client */
337 int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
338 lnet_process_id_t dest,
339 srpc_client_rpc_t **rpc); /* prep a tests rpc */
340 void (*tso_done_rpc)(struct sfw_test_unit *tsu,
341 srpc_client_rpc_t *rpc); /* done a test rpc */
342 } sfw_test_client_ops_t;
344 typedef struct sfw_test_instance {
345 struct list_head tsi_list; /* chain on batch */
346 int tsi_service; /* test type */
347 sfw_batch_t *tsi_batch; /* batch */
348 sfw_test_client_ops_t *tsi_ops; /* test client operations */
350 /* public parameter for all test units */
351 int tsi_is_client:1; /* is test client */
352 int tsi_stoptsu_onerr:1; /* stop tsu on error */
353 int tsi_concur; /* concurrency */
354 int tsi_loop; /* loop count */
356 /* status of test instance */
357 spinlock_t tsi_lock; /* serialize */
358 int tsi_stopping:1; /* test is stopping */
359 atomic_t tsi_nactive; /* # of active test unit */
360 struct list_head tsi_units; /* test units */
361 struct list_head tsi_free_rpcs; /* free rpcs */
362 struct list_head tsi_active_rpcs; /* active rpcs */
365 test_bulk_req_t bulk; /* bulk parameter */
366 test_ping_req_t ping; /* ping parameter */
368 } sfw_test_instance_t;
370 /* XXX: trailing (CFS_PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at
371 * the end of pages are not used */
372 #define SFW_MAX_CONCUR LST_MAX_CONCUR
373 #define SFW_ID_PER_PAGE (CFS_PAGE_SIZE / sizeof(lnet_process_id_t))
374 #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
375 #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
377 typedef struct sfw_test_unit {
378 struct list_head tsu_list; /* chain on lst_test_instance */
379 lnet_process_id_t tsu_dest; /* id of dest node */
380 int tsu_loop; /* loop count of the test */
381 sfw_test_instance_t *tsu_instance; /* pointer to test instance */
382 void *tsu_private; /* private data */
383 swi_workitem_t tsu_worker; /* workitem of the test unit */
387 struct list_head tsc_list; /* chain on fw_tests */
388 srpc_service_t *tsc_srv_service; /* test service */
389 sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
394 sfw_create_rpc(lnet_process_id_t peer, int service, int nbulkiov, int bulklen,
395 void (*done) (srpc_client_rpc_t *), void *priv);
396 int sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
397 int nblk, int blklen, srpc_client_rpc_t **rpc);
398 void sfw_abort_rpc(srpc_client_rpc_t *rpc);
399 void sfw_post_rpc(srpc_client_rpc_t *rpc);
400 void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
401 void sfw_unpack_message(srpc_msg_t *msg);
402 void sfw_free_pages(srpc_server_rpc_t *rpc);
403 void sfw_add_bulk_page(srpc_bulk_t *bk, cfs_page_t *pg, int i);
404 int sfw_alloc_pages(srpc_server_rpc_t *rpc, int npages, int sink);
407 srpc_create_client_rpc(lnet_process_id_t peer, int service,
408 int nbulkiov, int bulklen,
409 void (*rpc_done)(srpc_client_rpc_t *),
410 void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
411 void srpc_post_rpc(srpc_client_rpc_t *rpc);
412 void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
413 void srpc_free_bulk(srpc_bulk_t *bk);
414 srpc_bulk_t *srpc_alloc_bulk(int npages, int sink);
415 int srpc_send_rpc(swi_workitem_t *wi);
416 int srpc_send_reply(srpc_server_rpc_t *rpc);
417 int srpc_add_service(srpc_service_t *sv);
418 int srpc_remove_service(srpc_service_t *sv);
419 void srpc_shutdown_service(srpc_service_t *sv);
420 int srpc_finish_service(srpc_service_t *sv);
421 int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
422 void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
423 void srpc_get_counters(srpc_counters_t *cnt);
424 void srpc_set_counters(const srpc_counters_t *cnt);
426 void swi_kill_workitem(swi_workitem_t *wi);
427 void swi_schedule_workitem(swi_workitem_t *wi);
428 void swi_schedule_serial_workitem(swi_workitem_t *wi);
429 int swi_startup(void);
430 int sfw_startup(void);
431 int srpc_startup(void);
432 void swi_shutdown(void);
433 void sfw_shutdown(void);
434 void srpc_shutdown(void);
437 srpc_destroy_client_rpc (srpc_client_rpc_t *rpc)
439 LASSERT (rpc != NULL);
440 LASSERT (!srpc_event_pending(rpc));
441 LASSERT (list_empty(&rpc->crpc_privl));
442 LASSERT (atomic_read(&rpc->crpc_refcount) == 0);
444 LASSERT (rpc->crpc_bulk.bk_pages == NULL);
447 if (rpc->crpc_fini == NULL) {
448 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
450 (*rpc->crpc_fini) (rpc);
457 srpc_init_client_rpc (srpc_client_rpc_t *rpc, lnet_process_id_t peer,
458 int service, int nbulkiov, int bulklen,
459 void (*rpc_done)(srpc_client_rpc_t *),
460 void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
462 LASSERT (nbulkiov <= LNET_MAX_IOV);
464 memset(rpc, 0, offsetof(srpc_client_rpc_t,
465 crpc_bulk.bk_iovs[nbulkiov]));
467 CFS_INIT_LIST_HEAD(&rpc->crpc_list);
468 CFS_INIT_LIST_HEAD(&rpc->crpc_privl);
469 swi_init_workitem(&rpc->crpc_wi, rpc, srpc_send_rpc);
470 spin_lock_init(&rpc->crpc_lock);
471 atomic_set(&rpc->crpc_refcount, 1); /* 1 ref for caller */
473 rpc->crpc_dest = peer;
474 rpc->crpc_priv = priv;
475 rpc->crpc_service = service;
476 rpc->crpc_bulk.bk_len = bulklen;
477 rpc->crpc_bulk.bk_niov = nbulkiov;
478 rpc->crpc_done = rpc_done;
479 rpc->crpc_fini = rpc_fini;
482 rpc->crpc_bulk.bk_mdh = LNET_INVALID_HANDLE;
484 /* no event is expected at this point */
485 rpc->crpc_bulkev.ev_fired =
486 rpc->crpc_reqstev.ev_fired =
487 rpc->crpc_replyev.ev_fired = 1;
489 rpc->crpc_reqstmsg.msg_magic = SRPC_MSG_MAGIC;
490 rpc->crpc_reqstmsg.msg_version = SRPC_MSG_VERSION;
491 rpc->crpc_reqstmsg.msg_type = srpc_service2request(service);
495 static inline const char *
496 swi_state2str (int state)
498 #define STATE2STR(x) case x: return #x
502 STATE2STR(SWI_STATE_NEWBORN);
503 STATE2STR(SWI_STATE_REPLY_SUBMITTED);
504 STATE2STR(SWI_STATE_REPLY_SENT);
505 STATE2STR(SWI_STATE_REQUEST_SUBMITTED);
506 STATE2STR(SWI_STATE_REQUEST_SENT);
507 STATE2STR(SWI_STATE_REPLY_RECEIVED);
508 STATE2STR(SWI_STATE_BULK_STARTED);
509 STATE2STR(SWI_STATE_DONE);
514 #define UNUSED(x) ( (void)(x) )
518 int stt_poll_interval(void);
519 int sfw_session_removed(void);
521 int stt_check_events(void);
522 int swi_check_events(void);
523 int srpc_check_event(int timeout);
525 int lnet_selftest_init(void);
526 void lnet_selftest_fini(void);
527 int selftest_wait_events(void);
531 #define selftest_wait_events() cfs_pause(cfs_time_seconds(1))
535 #define lst_wait_until(cond, lock, fmt, a...) \
540 CDEBUG(((__I & (-__I)) == __I) ? D_WARNING : \
543 spin_unlock(&(lock)); \
545 selftest_wait_events(); \
547 spin_lock(&(lock)); \
552 srpc_wait_service_shutdown (srpc_service_t *sv)
556 spin_lock(&sv->sv_lock);
557 LASSERT (sv->sv_shuttingdown);
558 spin_unlock(&sv->sv_lock);
560 while (srpc_finish_service(sv) == 0) {
562 CDEBUG (((i & -i) == i) ? D_WARNING : D_NET,
563 "Waiting for %s service to shutdown...\n",
565 selftest_wait_events();
569 #endif /* __SELFTEST_SELFTEST_H__ */