1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
5 * Use is subject to license terms.
7 * Copyright (c) 2012, 2017, Intel Corporation.
12 * This file is part of Lustre, http://www.lustre.org/
14 * Author: Isaac Huang <isaac@clusterfs.com>
15 * Author: Liang Zhen <liangzhen@clusterfs.com>
19 #define DEBUG_SUBSYSTEM S_LNET
23 struct lst_session_id LST_INVALID_SID = { .ses_nid = LNET_ANY_NID, .ses_stamp = -1};
25 static int session_timeout = 100;
26 module_param(session_timeout, int, 0444);
27 MODULE_PARM_DESC(session_timeout, "test session timeout in seconds (100 by default, 0 == never)");
29 static int rpc_timeout = 64;
30 module_param(rpc_timeout, int, 0644);
31 MODULE_PARM_DESC(rpc_timeout, "rpc timeout in seconds (64 by default, 0 == never)");
33 #define sfw_unpack_id(id) \
35 __swab64s(&(id).nid); \
36 __swab32s(&(id).pid); \
39 #define sfw_unpack_sid(sid) \
41 __swab64s(&(sid).ses_nid); \
42 __swab64s(&(sid).ses_stamp); \
45 #define sfw_unpack_fw_counters(fc) \
47 __swab32s(&(fc).running_ms); \
48 __swab32s(&(fc).active_batches); \
49 __swab32s(&(fc).zombie_sessions); \
50 __swab32s(&(fc).brw_errors); \
51 __swab32s(&(fc).ping_errors); \
54 #define sfw_unpack_rpc_counters(rc) \
56 __swab32s(&(rc).errors); \
57 __swab32s(&(rc).rpcs_sent); \
58 __swab32s(&(rc).rpcs_rcvd); \
59 __swab32s(&(rc).rpcs_dropped); \
60 __swab32s(&(rc).rpcs_expired); \
61 __swab64s(&(rc).bulk_get); \
62 __swab64s(&(rc).bulk_put); \
65 #define sfw_unpack_lnet_counters(lc) \
67 __swab32s(&(lc).lcc_errors); \
68 __swab32s(&(lc).lcc_msgs_max); \
69 __swab32s(&(lc).lcc_msgs_alloc); \
70 __swab32s(&(lc).lcc_send_count); \
71 __swab32s(&(lc).lcc_recv_count); \
72 __swab32s(&(lc).lcc_drop_count); \
73 __swab32s(&(lc).lcc_route_count); \
74 __swab64s(&(lc).lcc_send_length); \
75 __swab64s(&(lc).lcc_recv_length); \
76 __swab64s(&(lc).lcc_drop_length); \
77 __swab64s(&(lc).lcc_route_length); \
80 #define sfw_test_active(t) (atomic_read(&(t)->tsi_nactive) != 0)
81 #define sfw_batch_active(b) (atomic_read(&(b)->bat_nactive) != 0)
83 static struct smoketest_framework {
84 /* RPCs to be recycled */
85 struct list_head fw_zombie_rpcs;
86 /* stopping sessions */
87 struct list_head fw_zombie_sessions;
88 /* registered test cases */
89 struct list_head fw_tests;
90 /* # zombie sessions */
95 struct sfw_session *fw_session;
96 /* shutdown in progress */
99 struct srpc_server_rpc *fw_active_srpc;
102 static struct srpc_service sfw_services[] = {
103 { .sv_id = SRPC_SERVICE_DEBUG, .sv_name = "debug", },
104 { .sv_id = SRPC_SERVICE_QUERY_STAT, .sv_name = "query stats", },
105 { .sv_id = SRPC_SERVICE_MAKE_SESSION, .sv_name = "make session", },
106 { .sv_id = SRPC_SERVICE_REMOVE_SESSION, .sv_name = "remove session", },
107 { .sv_id = SRPC_SERVICE_BATCH, .sv_name = "batch service", },
108 { .sv_id = SRPC_SERVICE_TEST, .sv_name = "test service", },
113 static int sfw_stop_batch(struct sfw_batch *tsb, int force);
114 static void sfw_destroy_session(struct sfw_session *sn);
116 static inline struct sfw_test_case *
117 sfw_find_test_case(int id)
119 struct sfw_test_case *tsc;
121 LASSERT(id <= SRPC_SERVICE_MAX_ID);
122 LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
124 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
125 if (tsc->tsc_srv_service->sv_id == id)
133 sfw_register_test(struct srpc_service *service,
134 struct sfw_test_client_ops *cliops)
136 struct sfw_test_case *tsc;
138 if (sfw_find_test_case(service->sv_id) != NULL) {
139 CERROR("Failed to register test %s (%d): rc = %d\n",
140 service->sv_name, service->sv_id, -EEXIST);
144 LIBCFS_ALLOC(tsc, sizeof(*tsc));
148 tsc->tsc_cli_ops = cliops;
149 tsc->tsc_srv_service = service;
151 list_add_tail(&tsc->tsc_list, &sfw_data.fw_tests);
156 sfw_add_session_timer(void)
158 struct sfw_session *sn = sfw_data.fw_session;
159 struct stt_timer *timer = &sn->sn_timer;
161 LASSERT(!sfw_data.fw_shuttingdown);
163 if (sn == NULL || sn->sn_timeout == 0)
166 LASSERT(!sn->sn_timer_active);
168 sn->sn_timer_active = 1;
169 timer->stt_expires = ktime_get_real_seconds() + sn->sn_timeout;
170 stt_add_timer(timer);
174 sfw_del_session_timer(void)
176 struct sfw_session *sn = sfw_data.fw_session;
178 if (sn == NULL || !sn->sn_timer_active)
181 LASSERT(sn->sn_timeout != 0);
183 if (stt_del_timer(&sn->sn_timer)) { /* timer defused */
184 sn->sn_timer_active = 0;
188 return -EBUSY; /* racing with sfw_session_expired() */
191 /* called with sfw_data.fw_lock held */
193 sfw_deactivate_session(void)
194 __must_hold(&sfw_data.fw_lock)
196 struct sfw_session *sn = sfw_data.fw_session;
198 struct sfw_batch *tsb;
199 struct sfw_test_case *tsc;
204 LASSERT(!sn->sn_timer_active);
206 sfw_data.fw_session = NULL;
207 atomic_inc(&sfw_data.fw_nzombies);
208 list_add(&sn->sn_list, &sfw_data.fw_zombie_sessions);
210 spin_unlock(&sfw_data.fw_lock);
212 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
213 srpc_abort_service(tsc->tsc_srv_service);
216 spin_lock(&sfw_data.fw_lock);
218 list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
219 if (sfw_batch_active(tsb)) {
221 sfw_stop_batch(tsb, 1);
226 return; /* wait for active batches to stop */
228 list_del_init(&sn->sn_list);
229 spin_unlock(&sfw_data.fw_lock);
231 sfw_destroy_session(sn);
233 spin_lock(&sfw_data.fw_lock);
237 sfw_session_expired(void *data)
239 struct sfw_session *sn = data;
241 spin_lock(&sfw_data.fw_lock);
243 LASSERT(sn->sn_timer_active);
244 LASSERT(sn == sfw_data.fw_session);
246 CWARN("Session expired! sid: %s-%llu, name: %s\n",
247 libcfs_nidstr(&sn->sn_id.ses_nid),
248 sn->sn_id.ses_stamp, &sn->sn_name[0]);
250 sn->sn_timer_active = 0;
251 sfw_deactivate_session();
253 spin_unlock(&sfw_data.fw_lock);
257 sfw_init_session(struct sfw_session *sn, struct lst_sid sid,
258 unsigned features, const char *name)
260 struct stt_timer *timer = &sn->sn_timer;
262 memset(sn, 0, sizeof(struct sfw_session));
263 INIT_LIST_HEAD(&sn->sn_list);
264 INIT_LIST_HEAD(&sn->sn_batches);
265 refcount_set(&sn->sn_refcount, 1); /* +1 for caller */
266 atomic_set(&sn->sn_brw_errors, 0);
267 atomic_set(&sn->sn_ping_errors, 0);
268 strscpy(&sn->sn_name[0], name, sizeof(sn->sn_name));
270 sn->sn_timer_active = 0;
271 sn->sn_id.ses_stamp = sid.ses_stamp;
272 lnet_nid4_to_nid(sid.ses_nid, &sn->sn_id.ses_nid);
273 sn->sn_features = features;
274 sn->sn_timeout = session_timeout;
275 sn->sn_started = ktime_get();
277 timer->stt_data = sn;
278 timer->stt_func = sfw_session_expired;
279 INIT_LIST_HEAD(&timer->stt_list);
282 /* completion handler for incoming framework RPCs */
284 sfw_server_rpc_done(struct srpc_server_rpc *rpc)
286 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
287 int status = rpc->srpc_status;
290 "Incoming framework RPC done: service %s, peer %s, status %s:%d\n",
291 sv->sv_name, libcfs_id2str(rpc->srpc_peer),
292 swi_state2str(rpc->srpc_wi.swi_state), status);
294 if (rpc->srpc_bulk) {
295 srpc_free_bulk(rpc->srpc_bulk);
296 rpc->srpc_bulk = NULL;
301 sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
303 LASSERT(rpc->crpc_bulk.bk_niov == 0);
304 LASSERT(list_empty(&rpc->crpc_list));
305 LASSERT(atomic_read(&rpc->crpc_refcount) == 0);
307 CDEBUG(D_NET, "Outgoing framework RPC done: "
308 "service %d, peer %s, status %s:%d:%d\n",
309 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
310 swi_state2str(rpc->crpc_wi.swi_state),
311 rpc->crpc_aborted, rpc->crpc_status);
313 spin_lock(&sfw_data.fw_lock);
315 /* my callers must finish all RPCs before shutting me down */
316 LASSERT(!sfw_data.fw_shuttingdown);
317 list_add(&rpc->crpc_list, &sfw_data.fw_zombie_rpcs);
319 spin_unlock(&sfw_data.fw_lock);
322 static struct sfw_batch *
323 sfw_find_batch(struct lst_bid bid)
325 struct sfw_session *sn = sfw_data.fw_session;
326 struct sfw_batch *bat;
330 list_for_each_entry(bat, &sn->sn_batches, bat_list) {
331 if (bat->bat_id.bat_id == bid.bat_id)
338 static struct sfw_batch *
339 sfw_bid2batch(struct lst_bid bid)
341 struct sfw_session *sn = sfw_data.fw_session;
342 struct sfw_batch *bat;
346 bat = sfw_find_batch(bid);
350 LIBCFS_ALLOC(bat, sizeof(*bat));
355 bat->bat_session = sn;
357 atomic_set(&bat->bat_nactive, 0);
358 INIT_LIST_HEAD(&bat->bat_tests);
360 list_add_tail(&bat->bat_list, &sn->sn_batches);
364 static struct lst_sid get_old_sid(struct sfw_session *sn)
366 struct lst_sid sid = { .ses_nid = LNET_NID_ANY, .ses_stamp = -1 };
369 sid.ses_stamp = sn->sn_id.ses_stamp;
370 sid.ses_nid = lnet_nid_to_nid4(&sn->sn_id.ses_nid);
377 sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
379 struct sfw_session *sn = sfw_data.fw_session;
380 struct sfw_counters *cnt = &reply->str_fw;
381 struct sfw_batch *bat;
383 reply->str_sid = get_old_sid(sn);
385 if (request->str_sid.ses_nid == LNET_NID_ANY) {
386 reply->str_status = EINVAL;
390 if (sn == NULL || !sfw_sid_equal(request->str_sid, sn->sn_id)) {
391 reply->str_status = ESRCH;
395 lnet_counters_get_common(&reply->str_lnet);
396 srpc_get_counters(&reply->str_rpc);
398 /* send over the msecs since the session was started
399 * - with 32 bits to send, this is ~49 days */
400 cnt->running_ms = ktime_ms_delta(ktime_get(), sn->sn_started);
401 cnt->brw_errors = atomic_read(&sn->sn_brw_errors);
402 cnt->ping_errors = atomic_read(&sn->sn_ping_errors);
403 cnt->zombie_sessions = atomic_read(&sfw_data.fw_nzombies);
405 cnt->active_batches = 0;
406 list_for_each_entry(bat, &sn->sn_batches, bat_list) {
407 if (atomic_read(&bat->bat_nactive) > 0)
408 cnt->active_batches++;
411 reply->str_status = 0;
416 sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
418 struct sfw_session *sn = sfw_data.fw_session;
419 struct srpc_msg *msg = container_of(request, struct srpc_msg,
420 msg_body.mksn_reqst);
423 if (request->mksn_sid.ses_nid == LNET_NID_ANY) {
424 reply->mksn_sid = get_old_sid(sn);
425 reply->mksn_status = EINVAL;
430 reply->mksn_status = 0;
431 reply->mksn_sid = get_old_sid(sn);
432 reply->mksn_timeout = sn->sn_timeout;
434 if (sfw_sid_equal(request->mksn_sid, sn->sn_id)) {
435 refcount_inc(&sn->sn_refcount);
439 if (!request->mksn_force) {
440 reply->mksn_status = EBUSY;
441 cplen = strscpy(&reply->mksn_name[0], &sn->sn_name[0],
442 sizeof(reply->mksn_name));
449 /* reject the request if it requires unknown features
450 * NB: old version will always accept all features because it's not
451 * aware of struct srpc_msg::msg_ses_feats, it's a defect but it's also
452 * harmless because it will return zero feature to console, and it's
453 * console's responsibility to make sure all nodes in a session have
454 * same feature mask. */
455 if ((msg->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
456 reply->mksn_status = EPROTO;
460 /* brand new or create by force */
461 LIBCFS_ALLOC(sn, sizeof(*sn));
463 CERROR("dropping RPC mksn under memory pressure: rc = %d\n",
468 sfw_init_session(sn, request->mksn_sid,
469 msg->msg_ses_feats, &request->mksn_name[0]);
471 spin_lock(&sfw_data.fw_lock);
473 sfw_deactivate_session();
474 LASSERT(sfw_data.fw_session == NULL);
475 sfw_data.fw_session = sn;
477 spin_unlock(&sfw_data.fw_lock);
479 reply->mksn_status = 0;
480 reply->mksn_sid = get_old_sid(sn);
481 reply->mksn_timeout = sn->sn_timeout;
486 sfw_remove_session(struct srpc_rmsn_reqst *request,
487 struct srpc_rmsn_reply *reply)
489 struct sfw_session *sn = sfw_data.fw_session;
491 reply->rmsn_sid = get_old_sid(sn);
493 if (request->rmsn_sid.ses_nid == LNET_NID_ANY) {
494 reply->rmsn_status = EINVAL;
498 if (sn == NULL || !sfw_sid_equal(request->rmsn_sid, sn->sn_id)) {
499 reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY;
503 if (!refcount_dec_and_test(&sn->sn_refcount)) {
504 reply->rmsn_status = 0;
508 spin_lock(&sfw_data.fw_lock);
509 sfw_deactivate_session();
510 spin_unlock(&sfw_data.fw_lock);
512 reply->rmsn_status = 0;
513 reply->rmsn_sid = get_old_sid(NULL);
514 LASSERT(sfw_data.fw_session == NULL);
519 sfw_debug_session(struct srpc_debug_reqst *request,
520 struct srpc_debug_reply *reply)
522 struct sfw_session *sn = sfw_data.fw_session;
526 reply->dbg_status = ESRCH;
527 reply->dbg_sid = get_old_sid(NULL);
531 reply->dbg_status = 0;
532 reply->dbg_sid = get_old_sid(sn);
533 reply->dbg_timeout = sn->sn_timeout;
534 cplen = strscpy(reply->dbg_name, &sn->sn_name[0],
535 sizeof(reply->dbg_name));
543 sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
545 struct sfw_test_unit *tsu = rpc->crpc_priv;
546 struct sfw_test_instance *tsi = tsu->tsu_instance;
548 /* Called with hold of tsi->tsi_lock */
549 LASSERT(list_empty(&rpc->crpc_list));
550 rpc->crpc_wi.swi_state = SWI_STATE_DONE;
551 list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
555 sfw_test_buffers(struct sfw_test_instance *tsi)
557 struct sfw_test_case *tsc;
558 struct srpc_service *svc;
561 LASSERT(tsi != NULL);
562 tsc = sfw_find_test_case(tsi->tsi_service);
563 LASSERT(tsc != NULL);
564 svc = tsc->tsc_srv_service;
565 LASSERT(svc != NULL);
567 nbuf = min(svc->sv_wi_total, tsi->tsi_loop) / svc->sv_ncpts;
568 return max(SFW_TEST_WI_MIN, nbuf + SFW_TEST_WI_EXTRA);
572 sfw_load_test(struct sfw_test_instance *tsi)
574 struct sfw_test_case *tsc;
575 struct srpc_service *svc;
579 LASSERT(tsi != NULL);
580 tsc = sfw_find_test_case(tsi->tsi_service);
581 nbuf = sfw_test_buffers(tsi);
582 LASSERT(tsc != NULL);
583 svc = tsc->tsc_srv_service;
585 if (tsi->tsi_is_client) {
586 tsi->tsi_ops = tsc->tsc_cli_ops;
590 rc = srpc_service_add_buffers(svc, nbuf);
592 CWARN("Failed to reserve enough buffers: "
593 "service %s, %d needed: %d\n", svc->sv_name, nbuf, rc);
594 /* NB: this error handler is not strictly correct, because
595 * it may release more buffers than already allocated,
596 * but it doesn't matter because request portal should
597 * be lazy portal and will grow buffers if necessary. */
598 srpc_service_remove_buffers(svc, nbuf);
602 CDEBUG(D_NET, "Reserved %d buffers for test %s\n",
603 nbuf * (srpc_serv_is_framework(svc) ?
604 1 : cfs_cpt_number(cfs_cpt_tab)), svc->sv_name);
609 sfw_unload_test(struct sfw_test_instance *tsi)
611 struct sfw_test_case *tsc;
613 LASSERT(tsi != NULL);
614 tsc = sfw_find_test_case(tsi->tsi_service);
615 LASSERT(tsc != NULL);
617 if (tsi->tsi_is_client)
620 /* shrink buffers, because request portal is lazy portal
621 * which can grow buffers at runtime so we may leave
622 * some buffers behind, but never mind... */
623 srpc_service_remove_buffers(tsc->tsc_srv_service,
624 sfw_test_buffers(tsi));
628 sfw_destroy_test_instance(struct sfw_test_instance *tsi)
630 struct srpc_client_rpc *rpc;
631 struct sfw_test_unit *tsu;
633 if (!tsi->tsi_is_client)
636 tsi->tsi_ops->tso_fini(tsi);
638 LASSERT(!tsi->tsi_stopping);
639 LASSERT(list_empty(&tsi->tsi_active_rpcs));
640 LASSERT(!sfw_test_active(tsi));
642 while (!list_empty(&tsi->tsi_units)) {
643 tsu = list_first_entry(&tsi->tsi_units,
644 struct sfw_test_unit, tsu_list);
645 list_del(&tsu->tsu_list);
646 LIBCFS_FREE(tsu, sizeof(*tsu));
649 while (!list_empty(&tsi->tsi_free_rpcs)) {
650 rpc = list_first_entry(&tsi->tsi_free_rpcs,
651 struct srpc_client_rpc, crpc_list);
652 list_del(&rpc->crpc_list);
653 swi_cancel_workitem(&rpc->crpc_wi);
654 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
658 sfw_unload_test(tsi);
659 LIBCFS_FREE(tsi, sizeof(*tsi));
663 sfw_destroy_batch(struct sfw_batch *tsb)
665 struct sfw_test_instance *tsi;
667 LASSERT(!sfw_batch_active(tsb));
668 LASSERT(list_empty(&tsb->bat_list));
670 while (!list_empty(&tsb->bat_tests)) {
671 tsi = list_first_entry(&tsb->bat_tests,
672 struct sfw_test_instance, tsi_list);
673 list_del_init(&tsi->tsi_list);
674 sfw_destroy_test_instance(tsi);
677 LIBCFS_FREE(tsb, sizeof(*tsb));
681 sfw_destroy_session(struct sfw_session *sn)
683 struct sfw_batch *batch;
685 LASSERT(list_empty(&sn->sn_list));
686 LASSERT(sn != sfw_data.fw_session);
688 while (!list_empty(&sn->sn_batches)) {
689 batch = list_first_entry(&sn->sn_batches,
690 struct sfw_batch, bat_list);
691 list_del_init(&batch->bat_list);
692 sfw_destroy_batch(batch);
695 LIBCFS_FREE(sn, sizeof(*sn));
696 atomic_dec(&sfw_data.fw_nzombies);
700 sfw_unpack_addtest_req(struct srpc_msg *msg)
702 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
704 LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
705 LASSERT(req->tsr_is_client);
707 if (msg->msg_magic == SRPC_MSG_MAGIC)
708 return; /* no flipping needed */
710 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
712 if (req->tsr_service == SRPC_SERVICE_BRW) {
713 if ((msg->msg_ses_feats & LST_FEAT_BULK_LEN) == 0) {
714 struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
716 __swab32s(&bulk->blk_opc);
717 __swab32s(&bulk->blk_npg);
718 __swab32s(&bulk->blk_flags);
721 struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
723 __swab16s(&bulk->blk_opc);
724 __swab16s(&bulk->blk_flags);
725 __swab32s(&bulk->blk_offset);
726 __swab32s(&bulk->blk_len);
732 if (req->tsr_service == SRPC_SERVICE_PING) {
733 struct test_ping_req *ping = &req->tsr_u.ping;
735 __swab32s(&ping->png_size);
736 __swab32s(&ping->png_flags);
744 sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
746 struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
747 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
748 struct srpc_bulk *bk = rpc->srpc_bulk;
749 int ndest = req->tsr_ndest;
750 struct sfw_test_unit *tsu;
751 struct sfw_test_instance *tsi;
755 LIBCFS_ALLOC(tsi, sizeof(*tsi));
757 CERROR("Can't allocate test instance for batch: %llu: rc = %d\n",
758 tsb->bat_id.bat_id, -ENOMEM);
762 spin_lock_init(&tsi->tsi_lock);
763 atomic_set(&tsi->tsi_nactive, 0);
764 INIT_LIST_HEAD(&tsi->tsi_units);
765 INIT_LIST_HEAD(&tsi->tsi_free_rpcs);
766 INIT_LIST_HEAD(&tsi->tsi_active_rpcs);
768 tsi->tsi_stopping = 0;
769 tsi->tsi_batch = tsb;
770 tsi->tsi_loop = req->tsr_loop;
771 tsi->tsi_concur = req->tsr_concur;
772 tsi->tsi_service = req->tsr_service;
773 tsi->tsi_is_client = !!(req->tsr_is_client);
774 tsi->tsi_stoptsu_onerr = !!(req->tsr_stop_onerr);
776 rc = sfw_load_test(tsi);
778 LIBCFS_FREE(tsi, sizeof(*tsi));
782 LASSERT(!sfw_batch_active(tsb));
784 if (!tsi->tsi_is_client) {
785 /* it's test server, just add it to tsb */
786 list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
791 LASSERT(bk->bk_niov * SFW_ID_PER_PAGE >= (unsigned int)ndest);
792 LASSERT((unsigned int)bk->bk_len >=
793 sizeof(struct lnet_process_id_packed) * ndest);
795 sfw_unpack_addtest_req(msg);
796 memcpy(&tsi->tsi_u, &req->tsr_u, sizeof(tsi->tsi_u));
798 for (i = 0; i < ndest; i++) {
799 struct lnet_process_id_packed *dests;
800 struct lnet_process_id_packed id;
803 dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
804 LASSERT(dests != NULL); /* my pages are within KVM always */
805 id = dests[i % SFW_ID_PER_PAGE];
806 if (msg->msg_magic != SRPC_MSG_MAGIC)
809 for (j = 0; j < tsi->tsi_concur; j++) {
810 LIBCFS_ALLOC(tsu, sizeof(*tsu));
813 CERROR("Can't allocate tsu for %d: rc = %d\n",
814 tsi->tsi_service, rc);
818 tsu->tsu_dest.nid = id.nid;
819 tsu->tsu_dest.pid = id.pid;
820 tsu->tsu_instance = tsi;
821 tsu->tsu_private = NULL;
822 list_add_tail(&tsu->tsu_list, &tsi->tsi_units);
826 rc = tsi->tsi_ops->tso_init(tsi);
828 list_add_tail(&tsi->tsi_list, &tsb->bat_tests);
834 sfw_destroy_test_instance(tsi);
839 sfw_test_unit_done(struct sfw_test_unit *tsu)
841 struct sfw_test_instance *tsi = tsu->tsu_instance;
842 struct sfw_batch *tsb = tsi->tsi_batch;
843 struct sfw_session *sn = tsb->bat_session;
845 LASSERT(sfw_test_active(tsi));
847 if (!atomic_dec_and_test(&tsi->tsi_nactive))
850 /* the test instance is done */
851 spin_lock(&tsi->tsi_lock);
853 tsi->tsi_stopping = 0;
855 spin_unlock(&tsi->tsi_lock);
857 spin_lock(&sfw_data.fw_lock);
859 if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
860 sn == sfw_data.fw_session) { /* sn also active */
861 spin_unlock(&sfw_data.fw_lock);
865 LASSERT(!list_empty(&sn->sn_list)); /* I'm a zombie! */
867 list_for_each_entry(tsb, &sn->sn_batches, bat_list) {
868 if (sfw_batch_active(tsb)) {
869 spin_unlock(&sfw_data.fw_lock);
874 list_del_init(&sn->sn_list);
875 spin_unlock(&sfw_data.fw_lock);
877 sfw_destroy_session(sn);
881 sfw_test_rpc_done(struct srpc_client_rpc *rpc)
883 struct sfw_test_unit *tsu = rpc->crpc_priv;
884 struct sfw_test_instance *tsi = tsu->tsu_instance;
887 tsi->tsi_ops->tso_done_rpc(tsu, rpc);
889 spin_lock(&tsi->tsi_lock);
891 LASSERT(sfw_test_active(tsi));
892 LASSERT(!list_empty(&rpc->crpc_list));
894 list_del_init(&rpc->crpc_list);
896 /* batch is stopping or loop is done or get error */
897 if (tsi->tsi_stopping || tsu->tsu_loop == 0 ||
898 (rpc->crpc_status != 0 && tsi->tsi_stoptsu_onerr))
901 /* dec ref for poster */
902 srpc_client_rpc_decref(rpc);
904 spin_unlock(&tsi->tsi_lock);
907 swi_schedule_workitem(&tsu->tsu_worker);
911 sfw_test_unit_done(tsu);
915 sfw_create_test_rpc(struct sfw_test_unit *tsu, struct lnet_process_id peer,
916 unsigned features, int nblk, int blklen,
917 struct srpc_client_rpc **rpcpp)
919 struct srpc_client_rpc *rpc = NULL;
920 struct sfw_test_instance *tsi = tsu->tsu_instance;
922 spin_lock(&tsi->tsi_lock);
924 LASSERT(sfw_test_active(tsi));
926 if (!list_empty(&tsi->tsi_free_rpcs)) {
927 /* pick request from buffer */
928 rpc = list_first_entry(&tsi->tsi_free_rpcs,
929 struct srpc_client_rpc, crpc_list);
930 LASSERT(nblk == rpc->crpc_bulk.bk_niov);
931 list_del_init(&rpc->crpc_list);
934 spin_unlock(&tsi->tsi_lock);
937 rpc = srpc_create_client_rpc(peer, tsi->tsi_service, nblk,
938 blklen, sfw_test_rpc_done,
939 sfw_test_rpc_fini, tsu);
941 swi_cancel_workitem(&rpc->crpc_wi);
942 srpc_init_client_rpc(rpc, peer, tsi->tsi_service, nblk,
943 blklen, sfw_test_rpc_done,
944 sfw_test_rpc_fini, tsu);
948 CERROR("Can't create rpc for test %d: rc = %d\n",
949 tsi->tsi_service, -ENOMEM);
953 rpc->crpc_reqstmsg.msg_ses_feats = features;
960 sfw_run_test(struct swi_workitem *wi)
962 struct sfw_test_unit *tsu = container_of(wi, struct sfw_test_unit,
964 struct sfw_test_instance *tsi = tsu->tsu_instance;
965 struct srpc_client_rpc *rpc = NULL;
967 if (tsi->tsi_ops->tso_prep_rpc(tsu, tsu->tsu_dest, &rpc) != 0) {
968 LASSERT(rpc == NULL);
969 wi->swi_state = SWI_STATE_DONE;
973 LASSERT(rpc != NULL);
975 spin_lock(&tsi->tsi_lock);
976 if (wi->swi_state == SWI_STATE_DONE) {
977 spin_unlock(&tsi->tsi_lock);
981 if (tsi->tsi_stopping) {
982 wi->swi_state = SWI_STATE_DONE;
983 list_add(&rpc->crpc_list, &tsi->tsi_free_rpcs);
984 spin_unlock(&tsi->tsi_lock);
988 if (tsu->tsu_loop > 0)
991 list_add_tail(&rpc->crpc_list, &tsi->tsi_active_rpcs);
992 wi->swi_state = SWI_STATE_RUNNING;
993 spin_unlock(&tsi->tsi_lock);
995 spin_lock(&rpc->crpc_lock);
996 rpc->crpc_timeout = rpc_timeout;
998 spin_unlock(&rpc->crpc_lock);
1003 * No one can schedule me now since:
1004 * - previous RPC, if any, has done and
1005 * - no new RPC is initiated.
1006 * - my batch is still active; no one can run it again now.
1007 * Cancel pending schedules and prevent future schedule attempts:
1009 sfw_test_unit_done(tsu);
1013 sfw_run_batch(struct sfw_batch *tsb)
1015 struct swi_workitem *wi;
1016 struct sfw_test_unit *tsu;
1017 struct sfw_test_instance *tsi;
1019 if (sfw_batch_active(tsb)) {
1020 CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
1021 tsb->bat_id.bat_id, atomic_read(&tsb->bat_nactive));
1025 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1026 if (!tsi->tsi_is_client) /* skip server instances */
1029 LASSERT(!tsi->tsi_stopping);
1030 LASSERT(!sfw_test_active(tsi));
1032 atomic_inc(&tsb->bat_nactive);
1034 list_for_each_entry(tsu, &tsi->tsi_units, tsu_list) {
1035 atomic_inc(&tsi->tsi_nactive);
1036 tsu->tsu_loop = tsi->tsi_loop;
1037 wi = &tsu->tsu_worker;
1038 swi_init_workitem(wi, sfw_run_test,
1039 lst_test_wq[lnet_cpt_of_nid(
1042 swi_schedule_workitem(wi);
1050 sfw_stop_batch(struct sfw_batch *tsb, int force)
1052 struct sfw_test_instance *tsi;
1053 struct srpc_client_rpc *rpc;
1055 if (!sfw_batch_active(tsb)) {
1056 CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
1060 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1061 spin_lock(&tsi->tsi_lock);
1063 if (!tsi->tsi_is_client ||
1064 !sfw_test_active(tsi) || tsi->tsi_stopping) {
1065 spin_unlock(&tsi->tsi_lock);
1069 tsi->tsi_stopping = 1;
1072 spin_unlock(&tsi->tsi_lock);
1076 /* abort launched rpcs in the test */
1077 list_for_each_entry(rpc, &tsi->tsi_active_rpcs, crpc_list) {
1078 spin_lock(&rpc->crpc_lock);
1080 srpc_abort_rpc(rpc, -EINTR);
1082 spin_unlock(&rpc->crpc_lock);
1085 spin_unlock(&tsi->tsi_lock);
1092 sfw_query_batch(struct sfw_batch *tsb, int testidx,
1093 struct srpc_batch_reply *reply)
1095 struct sfw_test_instance *tsi;
1101 reply->bar_active = atomic_read(&tsb->bat_nactive);
1105 list_for_each_entry(tsi, &tsb->bat_tests, tsi_list) {
1109 reply->bar_active = atomic_read(&tsi->tsi_nactive);
1117 sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int len,
1120 LASSERT(rpc->srpc_bulk == NULL);
1121 LASSERT(len > 0 && len <= LNET_MTU);
1123 rpc->srpc_bulk = srpc_alloc_bulk(cpt, len);
1124 if (rpc->srpc_bulk == NULL)
1127 srpc_init_bulk(rpc->srpc_bulk, 0, len, sink);
1133 sfw_add_test(struct srpc_server_rpc *rpc)
1135 struct sfw_session *sn = sfw_data.fw_session;
1136 struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
1137 struct srpc_test_reqst *request;
1139 struct sfw_batch *bat;
1141 request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
1142 reply->tsr_sid = get_old_sid(sn);
1144 if (request->tsr_loop == 0 || request->tsr_concur == 0 ||
1145 request->tsr_sid.ses_nid == LNET_NID_ANY ||
1146 request->tsr_ndest > SFW_MAX_NDESTS ||
1147 (request->tsr_is_client && request->tsr_ndest == 0) ||
1148 request->tsr_concur > SFW_MAX_CONCUR ||
1149 request->tsr_service > SRPC_SERVICE_MAX_ID ||
1150 request->tsr_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID) {
1151 reply->tsr_status = EINVAL;
1155 if (sn == NULL || !sfw_sid_equal(request->tsr_sid, sn->sn_id) ||
1156 sfw_find_test_case(request->tsr_service) == NULL) {
1157 reply->tsr_status = ENOENT;
1161 bat = sfw_bid2batch(request->tsr_bid);
1163 CERROR("dropping RPC %s from %s under memory pressure: rc = %d\n",
1164 rpc->srpc_scd->scd_svc->sv_name,
1165 libcfs_id2str(rpc->srpc_peer), -ENOMEM);
1169 if (sfw_batch_active(bat)) {
1170 reply->tsr_status = EBUSY;
1174 if (request->tsr_is_client && rpc->srpc_bulk == NULL) {
1175 /* rpc will be resumed later in sfw_bulk_ready */
1178 len = sizeof(struct lnet_process_id_packed) *
1181 return sfw_alloc_pages(rpc, CFS_CPT_ANY, len, 1);
1184 rc = sfw_add_test_instance(bat, rpc);
1185 CDEBUG(rc == 0 ? D_NET : D_WARNING,
1186 "%s test: sv %d %s, loop %d, concur %d, ndest %d\n",
1187 rc == 0 ? "Added" : "Failed to add", request->tsr_service,
1188 request->tsr_is_client ? "client" : "server",
1189 request->tsr_loop, request->tsr_concur, request->tsr_ndest);
1191 reply->tsr_status = (rc < 0) ? -rc : rc;
1196 sfw_control_batch(struct srpc_batch_reqst *request,
1197 struct srpc_batch_reply *reply)
1199 struct sfw_session *sn = sfw_data.fw_session;
1200 struct sfw_batch *bat;
1203 reply->bar_sid = get_old_sid(sn);
1205 if (sn == NULL || !sfw_sid_equal(request->bar_sid, sn->sn_id)) {
1206 reply->bar_status = ESRCH;
1210 bat = sfw_find_batch(request->bar_bid);
1212 reply->bar_status = ENOENT;
1216 switch (request->bar_opc) {
1217 case SRPC_BATCH_OPC_RUN:
1218 rc = sfw_run_batch(bat);
1221 case SRPC_BATCH_OPC_STOP:
1222 rc = sfw_stop_batch(bat, request->bar_arg);
1225 case SRPC_BATCH_OPC_QUERY:
1226 rc = sfw_query_batch(bat, request->bar_testidx, reply);
1230 return -EINVAL; /* drop it */
1233 reply->bar_status = (rc < 0) ? -rc : rc;
1238 sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
1240 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1241 struct srpc_msg *reply = &rpc->srpc_replymsg;
1242 struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
1243 unsigned features = LST_FEATS_MASK;
1246 LASSERT(sfw_data.fw_active_srpc == NULL);
1247 LASSERT(sv->sv_id <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1249 spin_lock(&sfw_data.fw_lock);
1251 if (sfw_data.fw_shuttingdown) {
1252 spin_unlock(&sfw_data.fw_lock);
1256 /* Remove timer to avoid racing with it or expiring active session */
1257 if (sfw_del_session_timer() != 0) {
1258 CERROR("dropping RPC %s from %s: racing with expiry timer: rc = %d\n",
1259 sv->sv_name, libcfs_id2str(rpc->srpc_peer), -EAGAIN);
1260 spin_unlock(&sfw_data.fw_lock);
1264 sfw_data.fw_active_srpc = rpc;
1265 spin_unlock(&sfw_data.fw_lock);
1267 sfw_unpack_message(request);
1268 LASSERT(request->msg_type == srpc_service2request(sv->sv_id));
1270 /* rpc module should have checked this */
1271 LASSERT(request->msg_version == SRPC_MSG_VERSION);
1273 if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
1274 sv->sv_id != SRPC_SERVICE_DEBUG) {
1275 struct sfw_session *sn = sfw_data.fw_session;
1278 sn->sn_features != request->msg_ses_feats) {
1279 CNETERR("Features of framework RPC don't match features of current session: %x/%x\n",
1280 request->msg_ses_feats, sn->sn_features);
1281 reply->msg_body.reply.status = EPROTO;
1282 reply->msg_body.reply.sid.ses_stamp = sn->sn_id.ses_stamp;
1283 reply->msg_body.reply.sid.ses_nid =
1284 lnet_nid_to_nid4(&sn->sn_id.ses_nid);
1288 } else if ((request->msg_ses_feats & ~LST_FEATS_MASK) != 0) {
1289 /* NB: at this point, old version will ignore features and
1290 * create new session anyway, so console should be able
1292 reply->msg_body.reply.status = EPROTO;
1296 switch (sv->sv_id) {
1299 case SRPC_SERVICE_TEST:
1300 rc = sfw_add_test(rpc);
1303 case SRPC_SERVICE_BATCH:
1304 rc = sfw_control_batch(&request->msg_body.bat_reqst,
1305 &reply->msg_body.bat_reply);
1308 case SRPC_SERVICE_QUERY_STAT:
1309 rc = sfw_get_stats(&request->msg_body.stat_reqst,
1310 &reply->msg_body.stat_reply);
1313 case SRPC_SERVICE_DEBUG:
1314 rc = sfw_debug_session(&request->msg_body.dbg_reqst,
1315 &reply->msg_body.dbg_reply);
1318 case SRPC_SERVICE_MAKE_SESSION:
1319 rc = sfw_make_session(&request->msg_body.mksn_reqst,
1320 &reply->msg_body.mksn_reply);
1323 case SRPC_SERVICE_REMOVE_SESSION:
1324 rc = sfw_remove_session(&request->msg_body.rmsn_reqst,
1325 &reply->msg_body.rmsn_reply);
1329 if (sfw_data.fw_session != NULL)
1330 features = sfw_data.fw_session->sn_features;
1332 reply->msg_ses_feats = features;
1333 rpc->srpc_done = sfw_server_rpc_done;
1334 spin_lock(&sfw_data.fw_lock);
1336 if (!sfw_data.fw_shuttingdown)
1337 sfw_add_session_timer();
1339 sfw_data.fw_active_srpc = NULL;
1340 spin_unlock(&sfw_data.fw_lock);
1345 sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
1347 struct srpc_service *sv = rpc->srpc_scd->scd_svc;
1350 LASSERT(rpc->srpc_bulk != NULL);
1351 LASSERT(sv->sv_id == SRPC_SERVICE_TEST);
1352 LASSERT(sfw_data.fw_active_srpc == NULL);
1353 LASSERT(rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst.tsr_is_client);
1355 spin_lock(&sfw_data.fw_lock);
1358 CERROR("Bulk transfer failed for RPC: service %s, peer %s, status %d: rc = %d\n",
1359 sv->sv_name, libcfs_id2str(rpc->srpc_peer), status, -EIO);
1360 spin_unlock(&sfw_data.fw_lock);
1364 if (sfw_data.fw_shuttingdown) {
1365 spin_unlock(&sfw_data.fw_lock);
1369 if (sfw_del_session_timer() != 0) {
1370 CERROR("dropping RPC %s from %s: racing with expiry timer: rc = %d\n",
1371 sv->sv_name, libcfs_id2str(rpc->srpc_peer), -EAGAIN);
1372 spin_unlock(&sfw_data.fw_lock);
1376 sfw_data.fw_active_srpc = rpc;
1377 spin_unlock(&sfw_data.fw_lock);
1379 rc = sfw_add_test(rpc);
1381 spin_lock(&sfw_data.fw_lock);
1383 if (!sfw_data.fw_shuttingdown)
1384 sfw_add_session_timer();
1386 sfw_data.fw_active_srpc = NULL;
1387 spin_unlock(&sfw_data.fw_lock);
1391 struct srpc_client_rpc *
1392 sfw_create_rpc(struct lnet_process_id peer, int service,
1393 unsigned features, int nbulkiov, int bulklen,
1394 void (*done)(struct srpc_client_rpc *), void *priv)
1396 struct srpc_client_rpc *rpc = NULL;
1398 spin_lock(&sfw_data.fw_lock);
1400 LASSERT(!sfw_data.fw_shuttingdown);
1401 LASSERT(service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1403 if (nbulkiov == 0 && !list_empty(&sfw_data.fw_zombie_rpcs)) {
1404 rpc = list_first_entry(&sfw_data.fw_zombie_rpcs,
1405 struct srpc_client_rpc, crpc_list);
1406 list_del(&rpc->crpc_list);
1408 spin_unlock(&sfw_data.fw_lock);
1411 /* Ensure that rpc is done */
1412 swi_cancel_workitem(&rpc->crpc_wi);
1413 srpc_init_client_rpc(rpc, peer, service, 0, 0,
1414 done, sfw_client_rpc_fini, priv);
1416 rpc = srpc_create_client_rpc(peer, service,
1417 nbulkiov, bulklen, done,
1418 nbulkiov != 0 ? NULL :
1419 sfw_client_rpc_fini,
1423 if (rpc != NULL) /* "session" is concept in framework */
1424 rpc->crpc_reqstmsg.msg_ses_feats = features;
1430 sfw_unpack_message(struct srpc_msg *msg)
1432 if (msg->msg_magic == SRPC_MSG_MAGIC)
1433 return; /* no flipping needed */
1435 /* srpc module should guarantee I wouldn't get crap */
1436 LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
1438 if (msg->msg_type == SRPC_MSG_STAT_REQST) {
1439 struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
1441 __swab32s(&req->str_type);
1442 __swab64s(&req->str_rpyid);
1443 sfw_unpack_sid(req->str_sid);
1447 if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
1448 struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
1450 __swab32s(&rep->str_status);
1451 sfw_unpack_sid(rep->str_sid);
1452 sfw_unpack_fw_counters(rep->str_fw);
1453 sfw_unpack_rpc_counters(rep->str_rpc);
1454 sfw_unpack_lnet_counters(rep->str_lnet);
1458 if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
1459 struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
1461 __swab64s(&req->mksn_rpyid);
1462 __swab32s(&req->mksn_force);
1463 sfw_unpack_sid(req->mksn_sid);
1467 if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
1468 struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
1470 __swab32s(&rep->mksn_status);
1471 __swab32s(&rep->mksn_timeout);
1472 sfw_unpack_sid(rep->mksn_sid);
1476 if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
1477 struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
1479 __swab64s(&req->rmsn_rpyid);
1480 sfw_unpack_sid(req->rmsn_sid);
1484 if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
1485 struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
1487 __swab32s(&rep->rmsn_status);
1488 sfw_unpack_sid(rep->rmsn_sid);
1492 if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
1493 struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
1495 __swab64s(&req->dbg_rpyid);
1496 __swab32s(&req->dbg_flags);
1497 sfw_unpack_sid(req->dbg_sid);
1501 if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
1502 struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
1504 __swab32s(&rep->dbg_nbatch);
1505 __swab32s(&rep->dbg_timeout);
1506 sfw_unpack_sid(rep->dbg_sid);
1510 if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
1511 struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
1513 __swab32s(&req->bar_opc);
1514 __swab64s(&req->bar_rpyid);
1515 __swab32s(&req->bar_testidx);
1516 __swab32s(&req->bar_arg);
1517 sfw_unpack_sid(req->bar_sid);
1518 __swab64s(&req->bar_bid.bat_id);
1522 if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
1523 struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
1525 __swab32s(&rep->bar_status);
1526 sfw_unpack_sid(rep->bar_sid);
1530 if (msg->msg_type == SRPC_MSG_TEST_REQST) {
1531 struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
1533 __swab64s(&req->tsr_rpyid);
1534 __swab64s(&req->tsr_bulkid);
1535 __swab32s(&req->tsr_loop);
1536 __swab32s(&req->tsr_ndest);
1537 __swab32s(&req->tsr_concur);
1538 __swab32s(&req->tsr_service);
1539 sfw_unpack_sid(req->tsr_sid);
1540 __swab64s(&req->tsr_bid.bat_id);
1544 if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
1545 struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
1547 __swab32s(&rep->tsr_status);
1548 sfw_unpack_sid(rep->tsr_sid);
1552 if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
1553 struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
1555 __swab64s(&req->join_rpyid);
1556 sfw_unpack_sid(req->join_sid);
1560 if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
1561 struct srpc_join_reply *rep = &msg->msg_body.join_reply;
1563 __swab32s(&rep->join_status);
1564 __swab32s(&rep->join_timeout);
1565 sfw_unpack_sid(rep->join_sid);
1573 sfw_abort_rpc(struct srpc_client_rpc *rpc)
1575 LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
1576 LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
1578 spin_lock(&rpc->crpc_lock);
1579 srpc_abort_rpc(rpc, -EINTR);
1580 spin_unlock(&rpc->crpc_lock);
1584 sfw_post_rpc(struct srpc_client_rpc *rpc)
1586 spin_lock(&rpc->crpc_lock);
1588 LASSERT(!rpc->crpc_closed);
1589 LASSERT(!rpc->crpc_aborted);
1590 LASSERT(list_empty(&rpc->crpc_list));
1591 LASSERT(!sfw_data.fw_shuttingdown);
1593 rpc->crpc_timeout = rpc_timeout;
1596 spin_unlock(&rpc->crpc_lock);
1600 static struct srpc_service sfw_services[] = {
1601 { .sv_id = SRPC_SERVICE_DEBUG, .sv_name = "debug", },
1602 { .sv_id = SRPC_SERVICE_QUERY_STAT, .sv_name = "query stats", },
1603 { .sv_id = SRPC_SERVICE_MAKE_SESSION, .sv_name = "make session", },
1604 { .sv_id = SRPC_SERVICE_REMOVE_SESSION, .sv_name = "remove session", },
1605 { .sv_id = SRPC_SERVICE_BATCH, .sv_name = "batch service", },
1606 { .sv_id = SRPC_SERVICE_TEST, .sv_name = "test service", },
1613 struct srpc_service *sv;
1614 struct sfw_test_case *tsc;
1617 if (session_timeout < 0) {
1618 CERROR("Session timeout must be non-negative: %d: rc = %d\n",
1619 session_timeout, -EINVAL);
1623 if (rpc_timeout < 0) {
1624 CERROR("RPC timeout must be non-negative: %d: rc = %d\n",
1625 rpc_timeout, -EINVAL);
1629 if (session_timeout == 0)
1630 CWARN("Zero session_timeout specified - test sessions never expire.\n");
1632 if (rpc_timeout == 0)
1633 CWARN("Zero rpc_timeout specified - test RPC never expire.\n");
1635 memset(&sfw_data, 0, sizeof(struct smoketest_framework));
1637 sfw_data.fw_session = NULL;
1638 sfw_data.fw_active_srpc = NULL;
1639 spin_lock_init(&sfw_data.fw_lock);
1640 atomic_set(&sfw_data.fw_nzombies, 0);
1641 INIT_LIST_HEAD(&sfw_data.fw_tests);
1642 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
1643 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
1645 brw_init_test_service();
1646 rc = sfw_register_test(&brw_test_service, &brw_test_client);
1649 ping_init_test_client();
1650 ping_init_test_service();
1651 rc = sfw_register_test(&ping_test_service, &ping_test_client);
1655 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
1656 sv = tsc->tsc_srv_service;
1658 rc = srpc_add_service(sv);
1659 LASSERT(rc != -EBUSY);
1661 CWARN("Failed to add %s service: %d\n",
1667 for (i = 0; ; i++) {
1668 sv = &sfw_services[i];
1669 if (sv->sv_name == NULL)
1672 sv->sv_bulk_ready = NULL;
1673 sv->sv_handler = sfw_handle_server_rpc;
1674 sv->sv_wi_total = SFW_FRWK_WI_MAX;
1675 if (sv->sv_id == SRPC_SERVICE_TEST)
1676 sv->sv_bulk_ready = sfw_bulk_ready;
1678 rc = srpc_add_service(sv);
1679 LASSERT(rc != -EBUSY);
1681 CWARN("Failed to add %s service: %d\n", sv->sv_name,
1686 /* about to sfw_shutdown, no need to add buffer */
1690 rc = srpc_service_add_buffers(sv, sv->sv_wi_total);
1692 CWARN("Failed to reserve enough buffers: service %s, %d needed: %d\n",
1693 sv->sv_name, sv->sv_wi_total, rc);
1706 struct srpc_service *sv;
1707 struct sfw_test_case *tsc;
1710 spin_lock(&sfw_data.fw_lock);
1712 sfw_data.fw_shuttingdown = 1;
1713 lst_wait_until(sfw_data.fw_active_srpc == NULL, sfw_data.fw_lock,
1714 "waiting for active RPC to finish.\n");
1716 if (sfw_del_session_timer() != 0)
1717 lst_wait_until(sfw_data.fw_session == NULL, sfw_data.fw_lock,
1718 "waiting for session timer to explode.\n");
1720 sfw_deactivate_session();
1721 lst_wait_until(atomic_read(&sfw_data.fw_nzombies) == 0,
1723 "waiting for %d zombie sessions to die.\n",
1724 atomic_read(&sfw_data.fw_nzombies));
1726 spin_unlock(&sfw_data.fw_lock);
1728 for (i = 0; ; i++) {
1729 sv = &sfw_services[i];
1730 if (sv->sv_name == NULL)
1733 srpc_shutdown_service(sv);
1734 srpc_remove_service(sv);
1737 list_for_each_entry(tsc, &sfw_data.fw_tests, tsc_list) {
1738 sv = tsc->tsc_srv_service;
1739 srpc_shutdown_service(sv);
1740 srpc_remove_service(sv);
1743 while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
1744 struct srpc_client_rpc *rpc;
1746 rpc = list_first_entry(&sfw_data.fw_zombie_rpcs,
1747 struct srpc_client_rpc, crpc_list);
1748 list_del(&rpc->crpc_list);
1750 LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
1753 for (i = 0; ; i++) {
1754 sv = &sfw_services[i];
1755 if (sv->sv_name == NULL)
1758 srpc_wait_service_shutdown(sv);
1761 while (!list_empty(&sfw_data.fw_tests)) {
1762 tsc = list_first_entry(&sfw_data.fw_tests,
1763 struct sfw_test_case, tsc_list);
1765 srpc_wait_service_shutdown(tsc->tsc_srv_service);
1767 list_del(&tsc->tsc_list);
1768 LIBCFS_FREE(tsc, sizeof(*tsc));