1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (C) 2001, 2002 Cluster File Systems, Inc.
5 * Author: Isaac Huang <isaac@clusterfs.com>
9 #define DEBUG_SUBSYSTEM S_LNET
23 #define SRPC_PEER_HASH_SIZE 101 /* # peer lists */
24 #define SRPC_PEER_CREDITS 16 /* >= most LND's default peer credit */
26 struct smoketest_rpc {
27 spinlock_t rpc_glock; /* global lock */
28 srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
29 struct list_head *rpc_peers; /* hash table of known peers */
30 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
31 srpc_state_t rpc_state;
32 srpc_counters_t rpc_counters;
33 __u64 rpc_matchbits; /* matchbits counter */
37 int srpc_handle_rpc (swi_workitem_t *wi);
39 void srpc_get_counters (srpc_counters_t *cnt)
41 spin_lock(&srpc_data.rpc_glock);
42 *cnt = srpc_data.rpc_counters;
43 spin_unlock(&srpc_data.rpc_glock);
46 void srpc_set_counters (const srpc_counters_t *cnt)
48 spin_lock(&srpc_data.rpc_glock);
49 srpc_data.rpc_counters = *cnt;
50 spin_unlock(&srpc_data.rpc_glock);
54 srpc_add_bulk_page (srpc_bulk_t *bk, cfs_page_t *pg, int i)
56 LASSERT (i >= 0 && i < bk->bk_niov);
59 bk->bk_iovs[i].kiov_offset = 0;
60 bk->bk_iovs[i].kiov_page = pg;
61 bk->bk_iovs[i].kiov_len = CFS_PAGE_SIZE;
63 LASSERT (bk->bk_pages != NULL);
66 bk->bk_iovs[i].iov_len = CFS_PAGE_SIZE;
67 bk->bk_iovs[i].iov_base = cfs_page_address(pg);
73 srpc_free_bulk (srpc_bulk_t *bk)
80 LASSERT (bk->bk_pages != NULL);
83 for (i = 0; i < bk->bk_niov; i++) {
85 pg = bk->bk_iovs[i].kiov_page;
89 if (pg == NULL) break;
95 LIBCFS_FREE(bk->bk_pages, sizeof(cfs_page_t *) * bk->bk_niov);
97 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
102 srpc_alloc_bulk (int npages, int sink)
108 LASSERT (npages > 0 && npages <= LNET_MAX_IOV);
110 LIBCFS_ALLOC(bk, offsetof(srpc_bulk_t, bk_iovs[npages]));
112 CERROR ("Can't allocate descriptor for %d pages\n", npages);
116 memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[npages]));
118 bk->bk_niov = npages;
119 bk->bk_len = npages * CFS_PAGE_SIZE;
121 LIBCFS_ALLOC(pages, sizeof(cfs_page_t *) * npages);
123 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[npages]));
124 CERROR ("Can't allocate page array for %d pages\n", npages);
128 memset(pages, 0, sizeof(cfs_page_t *) * npages);
129 bk->bk_pages = pages;
134 for (i = 0; i < npages; i++) {
135 cfs_page_t *pg = cfs_alloc_page(CFS_ALLOC_STD);
138 CERROR ("Can't allocate page %d of %d\n", i, npages);
143 srpc_add_bulk_page(bk, pg, i);
150 static inline struct list_head *
151 srpc_nid2peerlist (lnet_nid_t nid)
153 unsigned int hash = ((unsigned int)nid) % SRPC_PEER_HASH_SIZE;
155 return &srpc_data.rpc_peers[hash];
158 static inline srpc_peer_t *
159 srpc_create_peer (lnet_nid_t nid)
163 LASSERT (nid != LNET_NID_ANY);
165 LIBCFS_ALLOC(peer, sizeof(srpc_peer_t));
167 CERROR ("Failed to allocate peer structure for %s\n",
168 libcfs_nid2str(nid));
172 memset(peer, 0, sizeof(srpc_peer_t));
174 peer->stp_credits = SRPC_PEER_CREDITS;
176 spin_lock_init(&peer->stp_lock);
177 CFS_INIT_LIST_HEAD(&peer->stp_rpcq);
178 CFS_INIT_LIST_HEAD(&peer->stp_ctl_rpcq);
183 srpc_find_peer_locked (lnet_nid_t nid)
185 struct list_head *peer_list = srpc_nid2peerlist(nid);
188 LASSERT (nid != LNET_NID_ANY);
190 list_for_each_entry (peer, peer_list, stp_list) {
191 if (peer->stp_nid == nid)
199 srpc_nid2peer (lnet_nid_t nid)
202 srpc_peer_t *new_peer;
204 spin_lock(&srpc_data.rpc_glock);
205 peer = srpc_find_peer_locked(nid);
206 spin_unlock(&srpc_data.rpc_glock);
211 new_peer = srpc_create_peer(nid);
213 spin_lock(&srpc_data.rpc_glock);
215 peer = srpc_find_peer_locked(nid);
217 spin_unlock(&srpc_data.rpc_glock);
218 if (new_peer != NULL)
219 LIBCFS_FREE(new_peer, sizeof(srpc_peer_t));
224 if (new_peer == NULL) {
225 spin_unlock(&srpc_data.rpc_glock);
229 list_add_tail(&new_peer->stp_list, srpc_nid2peerlist(nid));
230 spin_unlock(&srpc_data.rpc_glock);
239 spin_lock(&srpc_data.rpc_glock);
240 id = srpc_data.rpc_matchbits++;
241 spin_unlock(&srpc_data.rpc_glock);
246 srpc_init_server_rpc (srpc_server_rpc_t *rpc,
247 srpc_service_t *sv, srpc_buffer_t *buffer)
249 memset(rpc, 0, sizeof(*rpc));
250 swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc);
252 rpc->srpc_ev.ev_fired = 1; /* no event expected now */
254 rpc->srpc_service = sv;
255 rpc->srpc_reqstbuf = buffer;
256 rpc->srpc_peer = buffer->buf_peer;
257 rpc->srpc_self = buffer->buf_self;
258 rpc->srpc_replymdh = LNET_INVALID_HANDLE;
262 srpc_add_service (srpc_service_t *sv)
266 srpc_server_rpc_t *rpc;
268 LASSERT (sv->sv_concur > 0);
269 LASSERT (0 <= id && id <= SRPC_SERVICE_MAX_ID);
271 spin_lock(&srpc_data.rpc_glock);
273 LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
275 if (srpc_data.rpc_services[id] != NULL) {
276 spin_unlock(&srpc_data.rpc_glock);
280 srpc_data.rpc_services[id] = sv;
281 spin_unlock(&srpc_data.rpc_glock);
284 sv->sv_nposted_msg = 0;
285 sv->sv_shuttingdown = 0;
286 spin_lock_init(&sv->sv_lock);
287 CFS_INIT_LIST_HEAD(&sv->sv_free_rpcq);
288 CFS_INIT_LIST_HEAD(&sv->sv_active_rpcq);
289 CFS_INIT_LIST_HEAD(&sv->sv_posted_msgq);
290 CFS_INIT_LIST_HEAD(&sv->sv_blocked_msgq);
292 sv->sv_ev.ev_data = sv;
293 sv->sv_ev.ev_type = SRPC_REQUEST_RCVD;
295 for (i = 0; i < sv->sv_concur; i++) {
296 LIBCFS_ALLOC(rpc, sizeof(*rpc));
297 if (rpc == NULL) goto enomem;
299 list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
302 CDEBUG (D_NET, "Adding service: id %d, name %s, concurrency %d\n",
303 id, sv->sv_name, sv->sv_concur);
307 while (!list_empty(&sv->sv_free_rpcq)) {
308 rpc = list_entry(sv->sv_free_rpcq.next,
309 srpc_server_rpc_t, srpc_list);
310 list_del(&rpc->srpc_list);
311 LIBCFS_FREE(rpc, sizeof(*rpc));
314 spin_lock(&srpc_data.rpc_glock);
315 srpc_data.rpc_services[id] = NULL;
316 spin_unlock(&srpc_data.rpc_glock);
321 srpc_remove_service (srpc_service_t *sv)
325 spin_lock(&srpc_data.rpc_glock);
327 if (srpc_data.rpc_services[id] != sv) {
328 spin_unlock(&srpc_data.rpc_glock);
332 srpc_data.rpc_services[id] = NULL;
333 spin_unlock(&srpc_data.rpc_glock);
338 srpc_post_passive_rdma(int portal, __u64 matchbits, void *buf,
339 int len, int options, lnet_process_id_t peer,
340 lnet_handle_md_t *mdh, srpc_event_t *ev)
344 lnet_handle_me_t meh;
346 rc = LNetMEAttach(portal, peer, matchbits, 0,
347 LNET_UNLINK, LNET_INS_AFTER, &meh);
349 CERROR ("LNetMEAttach failed: %d\n", rc);
350 LASSERT (rc == -ENOMEM);
358 md.options = options;
359 md.eq_handle = srpc_data.rpc_lnet_eq;
361 rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
363 CERROR ("LNetMDAttach failed: %d\n", rc);
364 LASSERT (rc == -ENOMEM);
366 rc = LNetMEUnlink(meh);
372 "Posted passive RDMA: peer %s, portal %d, matchbits "LPX64"\n",
373 libcfs_id2str(peer), portal, matchbits);
378 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
379 int options, lnet_process_id_t peer, lnet_nid_t self,
380 lnet_handle_md_t *mdh, srpc_event_t *ev)
388 md.eq_handle = srpc_data.rpc_lnet_eq;
389 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
390 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
392 rc = LNetMDBind(md, LNET_UNLINK, mdh);
394 CERROR ("LNetMDBind failed: %d\n", rc);
395 LASSERT (rc == -ENOMEM);
399 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
400 * they're only meaningful for MDs attached to an ME (i.e. passive
402 if ((options & LNET_MD_OP_PUT) != 0) {
403 rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
404 portal, matchbits, 0, 0);
406 LASSERT ((options & LNET_MD_OP_GET) != 0);
408 rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
412 CERROR ("LNet%s(%s, %d, "LPD64") failed: %d\n",
413 ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
414 libcfs_id2str(peer), portal, matchbits, rc);
416 /* The forthcoming unlink event will complete this operation
417 * with failure, so fall through and return success here.
419 rc = LNetMDUnlink(*mdh);
423 "Posted active RDMA: peer %s, portal %u, matchbits "LPX64"\n",
424 libcfs_id2str(peer), portal, matchbits);
430 srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
431 int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
436 if (service > SRPC_FRAMEWORK_SERVICE_MAX_ID)
437 portal = SRPC_REQUEST_PORTAL;
439 portal = SRPC_FRAMEWORK_REQUEST_PORTAL;
441 rc = srpc_post_active_rdma(portal, service, buf, len,
442 LNET_MD_OP_PUT, peer,
443 LNET_NID_ANY, mdh, ev);
448 srpc_post_passive_rqtbuf(int service, void *buf, int len,
449 lnet_handle_md_t *mdh, srpc_event_t *ev)
453 lnet_process_id_t any = {.nid = LNET_NID_ANY,
454 .pid = LNET_PID_ANY};
456 if (service > SRPC_FRAMEWORK_SERVICE_MAX_ID)
457 portal = SRPC_REQUEST_PORTAL;
459 portal = SRPC_FRAMEWORK_REQUEST_PORTAL;
461 rc = srpc_post_passive_rdma(portal, service, buf, len,
462 LNET_MD_OP_PUT, any, mdh, ev);
467 srpc_service_post_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
469 srpc_msg_t *msg = &buf->buf_msg;
472 LASSERT (!sv->sv_shuttingdown);
474 buf->buf_mdh = LNET_INVALID_HANDLE;
475 list_add(&buf->buf_list, &sv->sv_posted_msgq);
476 sv->sv_nposted_msg++;
477 spin_unlock(&sv->sv_lock);
479 rc = srpc_post_passive_rqtbuf(sv->sv_id, msg, sizeof(*msg),
480 &buf->buf_mdh, &sv->sv_ev);
482 /* At this point, a RPC (new or delayed) may have arrived in
483 * msg and its event handler has been called. So we must add
484 * buf to sv_posted_msgq _before_ dropping sv_lock */
486 spin_lock(&sv->sv_lock);
489 if (sv->sv_shuttingdown) {
490 spin_unlock(&sv->sv_lock);
492 /* srpc_shutdown_service might have tried to unlink me
493 * when my buf_mdh was still invalid */
494 LNetMDUnlink(buf->buf_mdh);
496 spin_lock(&sv->sv_lock);
501 sv->sv_nposted_msg--;
502 if (sv->sv_shuttingdown) return rc;
504 list_del(&buf->buf_list);
506 spin_unlock(&sv->sv_lock);
507 LIBCFS_FREE(buf, sizeof(*buf));
508 spin_lock(&sv->sv_lock);
513 srpc_service_add_buffers (srpc_service_t *sv, int nbuffer)
519 LASSERTF (nbuffer > 0,
520 "nbuffer must be positive: %d\n", nbuffer);
522 for (posted = 0; posted < nbuffer; posted++) {
523 LIBCFS_ALLOC(buf, sizeof(*buf));
524 if (buf == NULL) break;
526 spin_lock(&sv->sv_lock);
527 rc = srpc_service_post_buffer(sv, buf);
528 spin_unlock(&sv->sv_lock);
537 srpc_service_remove_buffers (srpc_service_t *sv, int nbuffer)
539 LASSERTF (nbuffer > 0,
540 "nbuffer must be positive: %d\n", nbuffer);
542 spin_lock(&sv->sv_lock);
544 LASSERT (sv->sv_nprune >= 0);
545 LASSERT (!sv->sv_shuttingdown);
547 sv->sv_nprune += nbuffer;
549 spin_unlock(&sv->sv_lock);
553 /* returns 1 if sv has finished, otherwise 0 */
555 srpc_finish_service (srpc_service_t *sv)
557 srpc_server_rpc_t *rpc;
560 spin_lock(&sv->sv_lock);
562 LASSERT (sv->sv_shuttingdown); /* srpc_shutdown_service called */
564 if (sv->sv_nposted_msg != 0 || !list_empty(&sv->sv_active_rpcq)) {
566 "waiting for %d posted buffers to unlink and "
567 "in-flight RPCs to die.\n",
570 if (!list_empty(&sv->sv_active_rpcq)) {
571 rpc = list_entry(sv->sv_active_rpcq.next,
572 srpc_server_rpc_t, srpc_list);
574 "Active RPC on shutdown: sv %s, peer %s, "
575 "wi %s scheduled %d running %d, "
576 "ev fired %d type %d status %d lnet %d\n",
577 sv->sv_name, libcfs_id2str(rpc->srpc_peer),
578 swi_state2str(rpc->srpc_wi.wi_state),
579 rpc->srpc_wi.wi_scheduled,
580 rpc->srpc_wi.wi_running,
581 rpc->srpc_ev.ev_fired,
582 rpc->srpc_ev.ev_type,
583 rpc->srpc_ev.ev_status,
584 rpc->srpc_ev.ev_lnet);
587 spin_unlock(&sv->sv_lock);
591 spin_unlock(&sv->sv_lock); /* no lock needed from now on */
596 if (!list_empty(&sv->sv_posted_msgq))
597 q = &sv->sv_posted_msgq;
598 else if (!list_empty(&sv->sv_blocked_msgq))
599 q = &sv->sv_blocked_msgq;
603 buf = list_entry(q->next, srpc_buffer_t, buf_list);
604 list_del(&buf->buf_list);
606 LIBCFS_FREE(buf, sizeof(*buf));
609 while (!list_empty(&sv->sv_free_rpcq)) {
610 rpc = list_entry(sv->sv_free_rpcq.next,
611 srpc_server_rpc_t, srpc_list);
612 list_del(&rpc->srpc_list);
613 LIBCFS_FREE(rpc, sizeof(*rpc));
619 /* called with sv->sv_lock held */
621 srpc_service_recycle_buffer (srpc_service_t *sv, srpc_buffer_t *buf)
623 if (sv->sv_shuttingdown) goto free;
625 if (sv->sv_nprune == 0) {
626 if (srpc_service_post_buffer(sv, buf) != 0)
627 CWARN ("Failed to post %s buffer\n", sv->sv_name);
633 spin_unlock(&sv->sv_lock);
634 LIBCFS_FREE(buf, sizeof(*buf));
635 spin_lock(&sv->sv_lock);
639 srpc_shutdown_service (srpc_service_t *sv)
641 srpc_server_rpc_t *rpc;
644 spin_lock(&sv->sv_lock);
646 CDEBUG (D_NET, "Shutting down service: id %d, name %s\n",
647 sv->sv_id, sv->sv_name);
649 sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
651 /* schedule in-flight RPCs to notice the shutdown */
652 list_for_each_entry (rpc, &sv->sv_active_rpcq, srpc_list) {
653 swi_schedule_workitem(&rpc->srpc_wi);
656 spin_unlock(&sv->sv_lock);
658 /* OK to traverse sv_posted_msgq without lock, since no one
659 * touches sv_posted_msgq now */
660 list_for_each_entry (buf, &sv->sv_posted_msgq, buf_list)
661 LNetMDUnlink(buf->buf_mdh);
667 srpc_send_request (srpc_client_rpc_t *rpc)
669 srpc_event_t *ev = &rpc->crpc_reqstev;
674 ev->ev_type = SRPC_REQUEST_SENT;
676 rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
677 &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
678 &rpc->crpc_reqstmdh, ev);
680 LASSERT (rc == -ENOMEM);
681 ev->ev_fired = 1; /* no more event expected */
687 srpc_prepare_reply (srpc_client_rpc_t *rpc)
689 srpc_event_t *ev = &rpc->crpc_replyev;
690 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
695 ev->ev_type = SRPC_REPLY_RCVD;
697 *id = srpc_next_id();
699 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, *id,
700 &rpc->crpc_replymsg, sizeof(srpc_msg_t),
701 LNET_MD_OP_PUT, rpc->crpc_dest,
702 &rpc->crpc_replymdh, ev);
704 LASSERT (rc == -ENOMEM);
705 ev->ev_fired = 1; /* no more event expected */
711 srpc_prepare_bulk (srpc_client_rpc_t *rpc)
713 srpc_bulk_t *bk = &rpc->crpc_bulk;
714 srpc_event_t *ev = &rpc->crpc_bulkev;
715 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
719 LASSERT (bk->bk_niov <= LNET_MAX_IOV);
721 if (bk->bk_niov == 0) return 0; /* nothing to do */
723 opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
727 opt |= LNET_MD_IOVEC;
732 ev->ev_type = SRPC_BULK_REQ_RCVD;
734 *id = srpc_next_id();
736 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, *id,
737 &bk->bk_iovs[0], bk->bk_niov, opt,
738 rpc->crpc_dest, &bk->bk_mdh, ev);
740 LASSERT (rc == -ENOMEM);
741 ev->ev_fired = 1; /* no more event expected */
747 srpc_do_bulk (srpc_server_rpc_t *rpc)
749 srpc_event_t *ev = &rpc->srpc_ev;
750 srpc_bulk_t *bk = rpc->srpc_bulk;
751 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
755 LASSERT (bk != NULL);
757 opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
761 opt |= LNET_MD_IOVEC;
766 ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
768 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
769 &bk->bk_iovs[0], bk->bk_niov, opt,
770 rpc->srpc_peer, rpc->srpc_self,
773 ev->ev_fired = 1; /* no more event expected */
777 /* called with srpc_service_t::sv_lock held */
779 srpc_schedule_server_rpc (srpc_server_rpc_t *rpc)
781 srpc_service_t *sv = rpc->srpc_service;
783 if (sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID)
784 swi_schedule_workitem(&rpc->srpc_wi);
785 else /* framework RPCs are handled one by one */
786 swi_schedule_serial_workitem(&rpc->srpc_wi);
791 /* only called from srpc_handle_rpc */
793 srpc_server_rpc_done (srpc_server_rpc_t *rpc, int status)
795 srpc_service_t *sv = rpc->srpc_service;
796 srpc_buffer_t *buffer;
798 LASSERT (status != 0 || rpc->srpc_wi.wi_state == SWI_STATE_DONE);
800 rpc->srpc_status = status;
802 CDEBUG (status == 0 ? D_NET : D_NETERROR,
803 "Server RPC done: service %s, peer %s, status %s:%d\n",
804 sv->sv_name, libcfs_id2str(rpc->srpc_peer),
805 swi_state2str(rpc->srpc_wi.wi_state), status);
808 spin_lock(&srpc_data.rpc_glock);
809 srpc_data.rpc_counters.rpcs_dropped++;
810 spin_unlock(&srpc_data.rpc_glock);
813 if (rpc->srpc_done != NULL)
814 (*rpc->srpc_done) (rpc);
815 LASSERT (rpc->srpc_bulk == NULL);
817 spin_lock(&sv->sv_lock);
819 if (rpc->srpc_reqstbuf != NULL) {
820 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
821 * sv won't go away for sv_active_rpcq must not be empty */
822 srpc_service_recycle_buffer(sv, rpc->srpc_reqstbuf);
823 rpc->srpc_reqstbuf = NULL;
826 list_del(&rpc->srpc_list); /* from sv->sv_active_rpcq */
829 * No one can schedule me now since:
830 * - I'm not on sv_active_rpcq.
831 * - all LNet events have been fired.
832 * Cancel pending schedules and prevent future schedule attempts:
834 LASSERT (rpc->srpc_ev.ev_fired);
835 swi_kill_workitem(&rpc->srpc_wi);
837 if (!sv->sv_shuttingdown && !list_empty(&sv->sv_blocked_msgq)) {
838 buffer = list_entry(sv->sv_blocked_msgq.next,
839 srpc_buffer_t, buf_list);
840 list_del(&buffer->buf_list);
842 srpc_init_server_rpc(rpc, sv, buffer);
843 list_add_tail(&rpc->srpc_list, &sv->sv_active_rpcq);
844 srpc_schedule_server_rpc(rpc);
846 list_add(&rpc->srpc_list, &sv->sv_free_rpcq);
849 spin_unlock(&sv->sv_lock);
853 /* handles an incoming RPC */
855 srpc_handle_rpc (swi_workitem_t *wi)
857 srpc_server_rpc_t *rpc = wi->wi_data;
858 srpc_service_t *sv = rpc->srpc_service;
859 srpc_event_t *ev = &rpc->srpc_ev;
862 LASSERT (wi == &rpc->srpc_wi);
864 spin_lock(&sv->sv_lock);
866 if (sv->sv_shuttingdown) {
867 spin_unlock(&sv->sv_lock);
869 if (rpc->srpc_bulk != NULL)
870 LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
871 LNetMDUnlink(rpc->srpc_replymdh);
873 if (ev->ev_fired) { /* no more event, OK to finish */
874 srpc_server_rpc_done(rpc, -ESHUTDOWN);
880 spin_unlock(&sv->sv_lock);
882 switch (wi->wi_state) {
885 case SWI_STATE_NEWBORN: {
887 srpc_generic_reply_t *reply;
889 msg = &rpc->srpc_reqstbuf->buf_msg;
890 reply = &rpc->srpc_replymsg.msg_body.reply;
892 if (msg->msg_version != SRPC_MSG_VERSION &&
893 msg->msg_version != __swab32(SRPC_MSG_VERSION)) {
894 CWARN ("Version mismatch: %u, %u expected, from %s\n",
895 msg->msg_version, SRPC_MSG_VERSION,
896 libcfs_id2str(rpc->srpc_peer));
897 reply->status = EPROTO;
900 rc = (*sv->sv_handler) (rpc);
901 LASSERT (reply->status == 0 || !rpc->srpc_bulk);
905 srpc_server_rpc_done(rpc, rc);
909 wi->wi_state = SWI_STATE_BULK_STARTED;
911 if (rpc->srpc_bulk != NULL) {
912 rc = srpc_do_bulk(rpc);
914 return 0; /* wait for bulk */
916 LASSERT (ev->ev_fired);
920 case SWI_STATE_BULK_STARTED:
921 LASSERT (rpc->srpc_bulk == NULL || ev->ev_fired);
923 if (rpc->srpc_bulk != NULL) {
926 if (sv->sv_bulk_ready != NULL)
927 rc = (*sv->sv_bulk_ready) (rpc, rc);
930 srpc_server_rpc_done(rpc, rc);
935 wi->wi_state = SWI_STATE_REPLY_SUBMITTED;
936 rc = srpc_send_reply(rpc);
938 return 0; /* wait for reply */
939 srpc_server_rpc_done(rpc, rc);
942 case SWI_STATE_REPLY_SUBMITTED:
943 LASSERT (ev->ev_fired);
945 wi->wi_state = SWI_STATE_DONE;
946 srpc_server_rpc_done(rpc, ev->ev_status);
954 srpc_client_rpc_expired (void *data)
956 srpc_client_rpc_t *rpc = data;
958 CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
959 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
962 spin_lock(&rpc->crpc_lock);
964 rpc->crpc_timeout = 0;
965 srpc_abort_rpc(rpc, -ETIMEDOUT);
967 spin_unlock(&rpc->crpc_lock);
969 spin_lock(&srpc_data.rpc_glock);
970 srpc_data.rpc_counters.rpcs_expired++;
971 spin_unlock(&srpc_data.rpc_glock);
976 srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
978 stt_timer_t *timer = &rpc->crpc_timer;
980 if (rpc->crpc_timeout == 0) return;
982 CFS_INIT_LIST_HEAD(&timer->stt_list);
983 timer->stt_data = rpc;
984 timer->stt_func = srpc_client_rpc_expired;
985 timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
986 cfs_time_current_sec());
987 stt_add_timer(timer);
992 * Called with rpc->crpc_lock held.
994 * Upon exit the RPC expiry timer is not queued and the handler is not
995 * running on any CPU. */
997 srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
999 /* timer not planted or already exploded */
1000 if (rpc->crpc_timeout == 0) return;
1002 /* timer sucessfully defused */
1003 if (stt_del_timer(&rpc->crpc_timer)) return;
1006 /* timer detonated, wait for it to explode */
1007 while (rpc->crpc_timeout != 0) {
1008 spin_unlock(&rpc->crpc_lock);
1012 spin_lock(&rpc->crpc_lock);
1015 LBUG(); /* impossible in single-threaded runtime */
1021 srpc_check_sends (srpc_peer_t *peer, int credits)
1023 struct list_head *q;
1024 srpc_client_rpc_t *rpc;
1026 LASSERT (credits >= 0);
1027 LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
1029 spin_lock(&peer->stp_lock);
1030 peer->stp_credits += credits;
1032 while (peer->stp_credits) {
1033 if (!list_empty(&peer->stp_ctl_rpcq))
1034 q = &peer->stp_ctl_rpcq;
1035 else if (!list_empty(&peer->stp_rpcq))
1036 q = &peer->stp_rpcq;
1040 peer->stp_credits--;
1042 rpc = list_entry(q->next, srpc_client_rpc_t, crpc_privl);
1043 list_del_init(&rpc->crpc_privl);
1044 srpc_client_rpc_decref(rpc); /* --ref for peer->*rpcq */
1046 swi_schedule_workitem(&rpc->crpc_wi);
1049 spin_unlock(&peer->stp_lock);
1054 srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
1056 swi_workitem_t *wi = &rpc->crpc_wi;
1057 srpc_peer_t *peer = rpc->crpc_peer;
1059 LASSERT (status != 0 || wi->wi_state == SWI_STATE_DONE);
1061 spin_lock(&rpc->crpc_lock);
1063 rpc->crpc_closed = 1;
1064 if (rpc->crpc_status == 0)
1065 rpc->crpc_status = status;
1067 srpc_del_client_rpc_timer(rpc);
1069 CDEBUG ((status == 0) ? D_NET : D_NETERROR,
1070 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1071 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1072 swi_state2str(wi->wi_state), rpc->crpc_aborted, status);
1075 * No one can schedule me now since:
1076 * - RPC timer has been defused.
1077 * - all LNet events have been fired.
1078 * - crpc_closed has been set, preventing srpc_abort_rpc from
1080 * Cancel pending schedules and prevent future schedule attempts:
1082 LASSERT (!srpc_event_pending(rpc));
1083 swi_kill_workitem(wi);
1085 spin_unlock(&rpc->crpc_lock);
1087 (*rpc->crpc_done) (rpc);
1090 srpc_check_sends(peer, 1);
1094 /* sends an outgoing RPC */
1096 srpc_send_rpc (swi_workitem_t *wi)
1099 srpc_client_rpc_t *rpc = wi->wi_data;
1100 srpc_msg_t *reply = &rpc->crpc_replymsg;
1101 int do_bulk = rpc->crpc_bulk.bk_niov > 0;
1103 LASSERT (rpc != NULL);
1104 LASSERT (wi == &rpc->crpc_wi);
1106 spin_lock(&rpc->crpc_lock);
1108 if (rpc->crpc_aborted) {
1109 spin_unlock(&rpc->crpc_lock);
1113 spin_unlock(&rpc->crpc_lock);
1115 switch (wi->wi_state) {
1118 case SWI_STATE_NEWBORN:
1119 LASSERT (!srpc_event_pending(rpc));
1121 rc = srpc_prepare_reply(rpc);
1123 srpc_client_rpc_done(rpc, rc);
1127 rc = srpc_prepare_bulk(rpc);
1130 wi->wi_state = SWI_STATE_REQUEST_SUBMITTED;
1131 rc = srpc_send_request(rpc);
1134 case SWI_STATE_REQUEST_SUBMITTED:
1135 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1136 * order; however, they're processed in a strict order:
1137 * rqt, rpy, and bulk. */
1138 if (!rpc->crpc_reqstev.ev_fired) break;
1140 rc = rpc->crpc_reqstev.ev_status;
1143 wi->wi_state = SWI_STATE_REQUEST_SENT;
1144 /* perhaps more events, fall thru */
1145 case SWI_STATE_REQUEST_SENT: {
1146 srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
1148 if (!rpc->crpc_replyev.ev_fired) break;
1150 rc = rpc->crpc_replyev.ev_status;
1153 if ((reply->msg_type != type &&
1154 reply->msg_type != __swab32(type)) ||
1155 (reply->msg_magic != SRPC_MSG_MAGIC &&
1156 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1157 CWARN ("Bad message from %s: type %u (%d expected),"
1158 " magic %u (%d expected).\n",
1159 libcfs_id2str(rpc->crpc_dest),
1160 reply->msg_type, type,
1161 reply->msg_magic, SRPC_MSG_MAGIC);
1166 if (do_bulk && reply->msg_body.reply.status != 0) {
1167 CWARN ("Remote error %d at %s, unlink bulk buffer in "
1168 "case peer didn't initiate bulk transfer\n",
1169 reply->msg_body.reply.status,
1170 libcfs_id2str(rpc->crpc_dest));
1171 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1174 wi->wi_state = SWI_STATE_REPLY_RECEIVED;
1176 case SWI_STATE_REPLY_RECEIVED:
1177 if (do_bulk && !rpc->crpc_bulkev.ev_fired) break;
1179 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1181 /* Bulk buffer was unlinked due to remote error. Clear error
1182 * since reply buffer still contains valid data.
1183 * NB rpc->crpc_done shouldn't look into bulk data in case of
1185 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1186 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1189 wi->wi_state = SWI_STATE_DONE;
1190 srpc_client_rpc_done(rpc, rc);
1195 spin_lock(&rpc->crpc_lock);
1196 srpc_abort_rpc(rpc, rc);
1197 spin_unlock(&rpc->crpc_lock);
1201 if (rpc->crpc_aborted) {
1202 LNetMDUnlink(rpc->crpc_reqstmdh);
1203 LNetMDUnlink(rpc->crpc_replymdh);
1204 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1206 if (!srpc_event_pending(rpc)) {
1207 srpc_client_rpc_done(rpc, -EINTR);
1215 srpc_create_client_rpc (lnet_process_id_t peer, int service,
1216 int nbulkiov, int bulklen,
1217 void (*rpc_done)(srpc_client_rpc_t *),
1218 void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
1220 srpc_client_rpc_t *rpc;
1222 LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
1223 crpc_bulk.bk_iovs[nbulkiov]));
1227 srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1228 bulklen, rpc_done, rpc_fini, priv);
1232 /* called with rpc->crpc_lock held */
1234 srpc_queue_rpc (srpc_peer_t *peer, srpc_client_rpc_t *rpc)
1236 int service = rpc->crpc_service;
1238 LASSERT (peer->stp_nid == rpc->crpc_dest.nid);
1239 LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
1241 rpc->crpc_peer = peer;
1243 spin_lock(&peer->stp_lock);
1245 /* Framework RPCs that alter session state shall take precedence
1246 * over test RPCs and framework query RPCs */
1247 if (service <= SRPC_FRAMEWORK_SERVICE_MAX_ID &&
1248 service != SRPC_SERVICE_DEBUG &&
1249 service != SRPC_SERVICE_QUERY_STAT)
1250 list_add_tail(&rpc->crpc_privl, &peer->stp_ctl_rpcq);
1252 list_add_tail(&rpc->crpc_privl, &peer->stp_rpcq);
1254 srpc_client_rpc_addref(rpc); /* ++ref for peer->*rpcq */
1255 spin_unlock(&peer->stp_lock);
1259 /* called with rpc->crpc_lock held */
1261 srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
1263 srpc_peer_t *peer = rpc->crpc_peer;
1267 if (rpc->crpc_aborted || /* already aborted */
1268 rpc->crpc_closed) /* callback imminent */
1272 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1273 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1274 swi_state2str(rpc->crpc_wi.wi_state), why);
1276 rpc->crpc_aborted = 1;
1277 rpc->crpc_status = why;
1280 spin_lock(&peer->stp_lock);
1282 if (!list_empty(&rpc->crpc_privl)) { /* still queued */
1283 list_del_init(&rpc->crpc_privl);
1284 srpc_client_rpc_decref(rpc); /* --ref for peer->*rpcq */
1285 rpc->crpc_peer = NULL; /* no credit taken */
1288 spin_unlock(&peer->stp_lock);
1291 swi_schedule_workitem(&rpc->crpc_wi);
1295 /* called with rpc->crpc_lock held */
1297 srpc_post_rpc (srpc_client_rpc_t *rpc)
1301 LASSERT (!rpc->crpc_aborted);
1302 LASSERT (rpc->crpc_peer == NULL);
1303 LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
1304 LASSERT ((rpc->crpc_bulk.bk_len & ~CFS_PAGE_MASK) == 0);
1306 CDEBUG (D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1307 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1310 srpc_add_client_rpc_timer(rpc);
1312 peer = srpc_nid2peer(rpc->crpc_dest.nid);
1314 srpc_abort_rpc(rpc, -ENOMEM);
1318 srpc_queue_rpc(peer, rpc);
1320 spin_unlock(&rpc->crpc_lock);
1321 srpc_check_sends(peer, 0);
1322 spin_lock(&rpc->crpc_lock);
1328 srpc_send_reply (srpc_server_rpc_t *rpc)
1330 srpc_event_t *ev = &rpc->srpc_ev;
1331 srpc_msg_t *msg = &rpc->srpc_replymsg;
1332 srpc_buffer_t *buffer = rpc->srpc_reqstbuf;
1333 srpc_service_t *sv = rpc->srpc_service;
1337 LASSERT (buffer != NULL);
1338 rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1340 spin_lock(&sv->sv_lock);
1342 if (!sv->sv_shuttingdown &&
1343 sv->sv_id > SRPC_FRAMEWORK_SERVICE_MAX_ID) {
1344 /* Repost buffer before replying since test client
1345 * might send me another RPC once it gets the reply */
1346 if (srpc_service_post_buffer(sv, buffer) != 0)
1347 CWARN ("Failed to repost %s buffer\n", sv->sv_name);
1348 rpc->srpc_reqstbuf = NULL;
1351 spin_unlock(&sv->sv_lock);
1355 ev->ev_type = SRPC_REPLY_SENT;
1357 msg->msg_magic = SRPC_MSG_MAGIC;
1358 msg->msg_version = SRPC_MSG_VERSION;
1359 msg->msg_type = srpc_service2reply(sv->sv_id);
1361 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1362 sizeof(*msg), LNET_MD_OP_PUT,
1363 rpc->srpc_peer, rpc->srpc_self,
1364 &rpc->srpc_replymdh, ev);
1366 ev->ev_fired = 1; /* no more event expected */
1370 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1372 srpc_lnet_ev_handler (lnet_event_t *ev)
1374 srpc_event_t *rpcev = ev->md.user_ptr;
1375 srpc_client_rpc_t *crpc;
1376 srpc_server_rpc_t *srpc;
1377 srpc_buffer_t *buffer;
1380 srpc_msg_type_t type;
1382 LASSERT (!in_interrupt());
1384 if (ev->status != 0) {
1385 spin_lock(&srpc_data.rpc_glock);
1386 srpc_data.rpc_counters.errors++;
1387 spin_unlock(&srpc_data.rpc_glock);
1390 rpcev->ev_lnet = ev->type;
1392 switch (rpcev->ev_type) {
1395 case SRPC_REQUEST_SENT:
1396 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1397 spin_lock(&srpc_data.rpc_glock);
1398 srpc_data.rpc_counters.rpcs_sent++;
1399 spin_unlock(&srpc_data.rpc_glock);
1401 case SRPC_REPLY_RCVD:
1402 case SRPC_BULK_REQ_RCVD:
1403 crpc = rpcev->ev_data;
1405 LASSERT (rpcev == &crpc->crpc_reqstev ||
1406 rpcev == &crpc->crpc_replyev ||
1407 rpcev == &crpc->crpc_bulkev);
1409 spin_lock(&crpc->crpc_lock);
1411 LASSERT (rpcev->ev_fired == 0);
1412 rpcev->ev_fired = 1;
1413 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1414 -EINTR : ev->status;
1415 swi_schedule_workitem(&crpc->crpc_wi);
1417 spin_unlock(&crpc->crpc_lock);
1420 case SRPC_REQUEST_RCVD:
1421 sv = rpcev->ev_data;
1423 LASSERT (rpcev == &sv->sv_ev);
1425 spin_lock(&sv->sv_lock);
1427 LASSERT (ev->unlinked);
1428 LASSERT (ev->type == LNET_EVENT_PUT ||
1429 ev->type == LNET_EVENT_UNLINK);
1430 LASSERT (ev->type != LNET_EVENT_UNLINK ||
1431 sv->sv_shuttingdown);
1433 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
1434 buffer->buf_peer = ev->initiator;
1435 buffer->buf_self = ev->target.nid;
1437 sv->sv_nposted_msg--;
1438 LASSERT (sv->sv_nposted_msg >= 0);
1440 if (sv->sv_shuttingdown) {
1441 /* Leave buffer on sv->sv_posted_msgq since
1442 * srpc_finish_service needs to traverse it. */
1443 spin_unlock(&sv->sv_lock);
1447 list_del(&buffer->buf_list); /* from sv->sv_posted_msgq */
1448 msg = &buffer->buf_msg;
1449 type = srpc_service2request(sv->sv_id);
1451 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1452 (msg->msg_type != type &&
1453 msg->msg_type != __swab32(type)) ||
1454 (msg->msg_magic != SRPC_MSG_MAGIC &&
1455 msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1456 CERROR ("Dropping RPC (%s) from %s: "
1457 "status %d mlength %d type %u magic %u.\n",
1458 sv->sv_name, libcfs_id2str(ev->initiator),
1459 ev->status, ev->mlength,
1460 msg->msg_type, msg->msg_magic);
1462 /* NB might drop sv_lock in srpc_service_recycle_buffer,
1463 * sv_nposted_msg++ as an implicit reference to prevent
1464 * sv from disappearing under me */
1465 sv->sv_nposted_msg++;
1466 srpc_service_recycle_buffer(sv, buffer);
1467 sv->sv_nposted_msg--;
1468 spin_unlock(&sv->sv_lock);
1470 if (ev->status == 0) { /* status!=0 counted already */
1471 spin_lock(&srpc_data.rpc_glock);
1472 srpc_data.rpc_counters.errors++;
1473 spin_unlock(&srpc_data.rpc_glock);
1478 if (!list_empty(&sv->sv_free_rpcq)) {
1479 srpc = list_entry(sv->sv_free_rpcq.next,
1480 srpc_server_rpc_t, srpc_list);
1481 list_del(&srpc->srpc_list);
1483 srpc_init_server_rpc(srpc, sv, buffer);
1484 list_add_tail(&srpc->srpc_list, &sv->sv_active_rpcq);
1485 srpc_schedule_server_rpc(srpc);
1487 list_add_tail(&buffer->buf_list, &sv->sv_blocked_msgq);
1490 spin_unlock(&sv->sv_lock);
1492 spin_lock(&srpc_data.rpc_glock);
1493 srpc_data.rpc_counters.rpcs_rcvd++;
1494 spin_unlock(&srpc_data.rpc_glock);
1497 case SRPC_BULK_GET_RPLD:
1498 LASSERT (ev->type == LNET_EVENT_SEND ||
1499 ev->type == LNET_EVENT_REPLY ||
1500 ev->type == LNET_EVENT_UNLINK);
1502 if (ev->type == LNET_EVENT_SEND &&
1503 ev->status == 0 && !ev->unlinked)
1504 break; /* wait for the final LNET_EVENT_REPLY */
1506 case SRPC_BULK_PUT_SENT:
1507 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1508 spin_lock(&srpc_data.rpc_glock);
1510 if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1511 srpc_data.rpc_counters.bulk_get += ev->mlength;
1513 srpc_data.rpc_counters.bulk_put += ev->mlength;
1515 spin_unlock(&srpc_data.rpc_glock);
1517 case SRPC_REPLY_SENT:
1518 srpc = rpcev->ev_data;
1519 sv = srpc->srpc_service;
1521 LASSERT (rpcev == &srpc->srpc_ev);
1523 spin_lock(&sv->sv_lock);
1524 rpcev->ev_fired = 1;
1525 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1526 -EINTR : ev->status;
1527 srpc_schedule_server_rpc(srpc);
1528 spin_unlock(&sv->sv_lock);
1538 srpc_check_event (int timeout)
1544 rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
1545 timeout * 1000, &ev, &i);
1546 if (rc == 0) return 0;
1548 LASSERT (rc == -EOVERFLOW || rc == 1);
1550 /* We can't affort to miss any events... */
1551 if (rc == -EOVERFLOW) {
1552 CERROR ("Dropped an event!!!\n");
1556 srpc_lnet_ev_handler(&ev);
1568 memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1569 spin_lock_init(&srpc_data.rpc_glock);
1571 /* 1 second pause to avoid timestamp reuse */
1572 cfs_pause(cfs_time_seconds(1));
1573 srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
1575 srpc_data.rpc_state = SRPC_STATE_NONE;
1577 LIBCFS_ALLOC(srpc_data.rpc_peers,
1578 sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);
1579 if (srpc_data.rpc_peers == NULL) {
1580 CERROR ("Failed to alloc peer hash.\n");
1584 for (i = 0; i < SRPC_PEER_HASH_SIZE; i++)
1585 CFS_INIT_LIST_HEAD(&srpc_data.rpc_peers[i]);
1588 rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
1590 if (the_lnet.ln_server_mode_flag)
1591 rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
1593 rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
1596 CERROR ("LNetNIInit() has failed: %d\n", rc);
1597 LIBCFS_FREE(srpc_data.rpc_peers,
1598 sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);
1602 srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1604 srpc_data.rpc_lnet_eq = LNET_EQ_NONE;
1606 rc = LNetEQAlloc(16, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
1608 rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
1611 CERROR("LNetEQAlloc() has failed: %d\n", rc);
1615 rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1618 srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1624 srpc_data.rpc_state = SRPC_STATE_WI_INIT;
1632 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1638 srpc_shutdown (void)
1644 state = srpc_data.rpc_state;
1645 srpc_data.rpc_state = SRPC_STATE_STOPPING;
1650 case SRPC_STATE_RUNNING:
1651 spin_lock(&srpc_data.rpc_glock);
1653 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1654 srpc_service_t *sv = srpc_data.rpc_services[i];
1656 LASSERTF (sv == NULL,
1657 "service not empty: id %d, name %s\n",
1661 spin_unlock(&srpc_data.rpc_glock);
1665 case SRPC_STATE_WI_INIT:
1668 case SRPC_STATE_EQ_INIT:
1669 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1671 rc = LNetEQFree(srpc_data.rpc_lnet_eq);
1672 LASSERT (rc == 0); /* the EQ should have no user by now */
1674 case SRPC_STATE_NI_INIT:
1679 /* srpc_peer_t's are kept in hash until shutdown */
1680 for (i = 0; i < SRPC_PEER_HASH_SIZE; i++) {
1683 while (!list_empty(&srpc_data.rpc_peers[i])) {
1684 peer = list_entry(srpc_data.rpc_peers[i].next,
1685 srpc_peer_t, stp_list);
1686 list_del(&peer->stp_list);
1688 LASSERT (list_empty(&peer->stp_rpcq));
1689 LASSERT (list_empty(&peer->stp_ctl_rpcq));
1690 LASSERT (peer->stp_credits == SRPC_PEER_CREDITS);
1692 LIBCFS_FREE(peer, sizeof(srpc_peer_t));
1696 LIBCFS_FREE(srpc_data.rpc_peers,
1697 sizeof(struct list_head) * SRPC_PEER_HASH_SIZE);