4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Author: Isaac Huang <isaac@clusterfs.com>
40 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
41 * - percpt data for service to improve smp performance
45 #define DEBUG_SUBSYSTEM S_LNET
57 static struct smoketest_rpc {
58 spinlock_t rpc_glock; /* global lock */
59 srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
60 lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
61 srpc_state_t rpc_state;
62 srpc_counters_t rpc_counters;
63 __u64 rpc_matchbits; /* matchbits counter */
67 srpc_serv_portal(int svc_id)
69 return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
70 SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
74 int srpc_handle_rpc (swi_workitem_t *wi);
76 void srpc_get_counters (srpc_counters_t *cnt)
78 spin_lock(&srpc_data.rpc_glock);
79 *cnt = srpc_data.rpc_counters;
80 spin_unlock(&srpc_data.rpc_glock);
83 void srpc_set_counters (const srpc_counters_t *cnt)
85 spin_lock(&srpc_data.rpc_glock);
86 srpc_data.rpc_counters = *cnt;
87 spin_unlock(&srpc_data.rpc_glock);
91 srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int off, int nob)
93 LASSERT(off < PAGE_SIZE);
94 LASSERT(nob > 0 && nob <= PAGE_SIZE);
96 bk->bk_iovs[i].kiov_offset = off;
97 bk->bk_iovs[i].kiov_page = pg;
98 bk->bk_iovs[i].kiov_len = nob;
103 srpc_free_bulk (srpc_bulk_t *bk)
108 LASSERT (bk != NULL);
110 for (i = 0; i < bk->bk_niov; i++) {
111 pg = bk->bk_iovs[i].kiov_page;
112 if (pg == NULL) break;
117 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
122 srpc_alloc_bulk(int cpt, unsigned bulk_off, unsigned bulk_npg,
123 unsigned bulk_len, int sink)
128 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
130 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
131 offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
133 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
137 memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
139 bk->bk_len = bulk_len;
140 bk->bk_niov = bulk_npg;
142 for (i = 0; i < bulk_npg; i++) {
146 pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
148 CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
153 nob = min_t(unsigned, bulk_off + bulk_len, PAGE_SIZE) -
156 srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
169 spin_lock(&srpc_data.rpc_glock);
170 id = srpc_data.rpc_matchbits++;
171 spin_unlock(&srpc_data.rpc_glock);
176 srpc_init_server_rpc(struct srpc_server_rpc *rpc,
177 struct srpc_service_cd *scd,
178 struct srpc_buffer *buffer)
180 memset(rpc, 0, sizeof(*rpc));
181 swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
182 srpc_serv_is_framework(scd->scd_svc) ?
183 lst_sched_serial : lst_sched_test[scd->scd_cpt]);
185 rpc->srpc_ev.ev_fired = 1; /* no event expected now */
188 rpc->srpc_reqstbuf = buffer;
189 rpc->srpc_peer = buffer->buf_peer;
190 rpc->srpc_self = buffer->buf_self;
191 LNetInvalidateHandle(&rpc->srpc_replymdh);
195 srpc_service_fini(struct srpc_service *svc)
197 struct srpc_service_cd *scd;
198 struct srpc_server_rpc *rpc;
199 struct srpc_buffer *buf;
203 if (svc->sv_cpt_data == NULL)
206 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
208 if (!list_empty(&scd->scd_buf_posted))
209 q = &scd->scd_buf_posted;
210 else if (!list_empty(&scd->scd_buf_blocked))
211 q = &scd->scd_buf_blocked;
215 while (!list_empty(q)) {
216 buf = list_entry(q->next,
219 list_del(&buf->buf_list);
220 LIBCFS_FREE(buf, sizeof(*buf));
224 LASSERT(list_empty(&scd->scd_rpc_active));
226 while (!list_empty(&scd->scd_rpc_free)) {
227 rpc = list_entry(scd->scd_rpc_free.next,
228 struct srpc_server_rpc,
230 list_del(&rpc->srpc_list);
231 LIBCFS_FREE(rpc, sizeof(*rpc));
235 cfs_percpt_free(svc->sv_cpt_data);
236 svc->sv_cpt_data = NULL;
240 srpc_service_nrpcs(struct srpc_service *svc)
242 int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
244 return srpc_serv_is_framework(svc) ?
245 max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
248 int srpc_add_buffer(struct swi_workitem *wi);
251 srpc_service_init(struct srpc_service *svc)
253 struct srpc_service_cd *scd;
254 struct srpc_server_rpc *rpc;
259 svc->sv_shuttingdown = 0;
261 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
262 sizeof(struct srpc_service_cd));
263 if (svc->sv_cpt_data == NULL)
266 svc->sv_ncpts = srpc_serv_is_framework(svc) ?
267 1 : cfs_cpt_number(lnet_cpt_table());
268 nrpcs = srpc_service_nrpcs(svc);
270 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
273 spin_lock_init(&scd->scd_lock);
274 INIT_LIST_HEAD(&scd->scd_rpc_free);
275 INIT_LIST_HEAD(&scd->scd_rpc_active);
276 INIT_LIST_HEAD(&scd->scd_buf_posted);
277 INIT_LIST_HEAD(&scd->scd_buf_blocked);
279 scd->scd_ev.ev_data = scd;
280 scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
282 /* NB: don't use lst_sched_serial for adding buffer,
283 * see details in srpc_service_add_buffers() */
284 swi_init_workitem(&scd->scd_buf_wi, scd,
285 srpc_add_buffer, lst_sched_test[i]);
287 if (i != 0 && srpc_serv_is_framework(svc)) {
288 /* NB: framework service only needs srpc_service_cd for
289 * one partition, but we allocate for all to make
290 * it easier to implement, it will waste a little
291 * memory but nobody should care about this */
295 for (j = 0; j < nrpcs; j++) {
296 LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
299 srpc_service_fini(svc);
302 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
310 srpc_add_service(struct srpc_service *sv)
314 LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
316 if (srpc_service_init(sv) != 0)
319 spin_lock(&srpc_data.rpc_glock);
321 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
323 if (srpc_data.rpc_services[id] != NULL) {
324 spin_unlock(&srpc_data.rpc_glock);
328 srpc_data.rpc_services[id] = sv;
329 spin_unlock(&srpc_data.rpc_glock);
331 CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
335 srpc_service_fini(sv);
340 srpc_remove_service (srpc_service_t *sv)
344 spin_lock(&srpc_data.rpc_glock);
346 if (srpc_data.rpc_services[id] != sv) {
347 spin_unlock(&srpc_data.rpc_glock);
351 srpc_data.rpc_services[id] = NULL;
352 spin_unlock(&srpc_data.rpc_glock);
357 srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
358 int len, int options, lnet_process_id_t peer,
359 lnet_handle_md_t *mdh, srpc_event_t *ev)
363 lnet_handle_me_t meh;
365 rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
366 local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
368 CERROR ("LNetMEAttach failed: %d\n", rc);
369 LASSERT (rc == -ENOMEM);
377 md.options = options;
378 md.eq_handle = srpc_data.rpc_lnet_eq;
380 rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
382 CERROR ("LNetMDAttach failed: %d\n", rc);
383 LASSERT (rc == -ENOMEM);
385 rc = LNetMEUnlink(meh);
391 "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
392 libcfs_id2str(peer), portal, matchbits);
397 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
398 int options, lnet_process_id_t peer, lnet_nid_t self,
399 lnet_handle_md_t *mdh, srpc_event_t *ev)
407 md.eq_handle = srpc_data.rpc_lnet_eq;
408 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
409 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
411 rc = LNetMDBind(md, LNET_UNLINK, mdh);
413 CERROR ("LNetMDBind failed: %d\n", rc);
414 LASSERT (rc == -ENOMEM);
418 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
419 * they're only meaningful for MDs attached to an ME (i.e. passive
421 if ((options & LNET_MD_OP_PUT) != 0) {
422 rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
423 portal, matchbits, 0, 0);
425 LASSERT ((options & LNET_MD_OP_GET) != 0);
427 rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
431 CERROR ("LNet%s(%s, %d, %lld) failed: %d\n",
432 ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
433 libcfs_id2str(peer), portal, matchbits, rc);
435 /* The forthcoming unlink event will complete this operation
436 * with failure, so fall through and return success here.
438 rc = LNetMDUnlink(*mdh);
442 "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
443 libcfs_id2str(peer), portal, matchbits);
449 srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
450 lnet_handle_md_t *mdh, srpc_event_t *ev)
452 lnet_process_id_t any = {0};
454 any.nid = LNET_NID_ANY;
455 any.pid = LNET_PID_ANY;
457 return srpc_post_passive_rdma(srpc_serv_portal(service),
458 local, service, buf, len,
459 LNET_MD_OP_PUT, any, mdh, ev);
463 srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
464 __must_hold(&scd->scd_lock)
466 struct srpc_service *sv = scd->scd_svc;
467 struct srpc_msg *msg = &buf->buf_msg;
470 LNetInvalidateHandle(&buf->buf_mdh);
471 list_add(&buf->buf_list, &scd->scd_buf_posted);
472 scd->scd_buf_nposted++;
473 spin_unlock(&scd->scd_lock);
475 rc = srpc_post_passive_rqtbuf(sv->sv_id,
476 !srpc_serv_is_framework(sv),
477 msg, sizeof(*msg), &buf->buf_mdh,
480 /* At this point, a RPC (new or delayed) may have arrived in
481 * msg and its event handler has been called. So we must add
482 * buf to scd_buf_posted _before_ dropping scd_lock */
484 spin_lock(&scd->scd_lock);
487 if (!sv->sv_shuttingdown)
490 spin_unlock(&scd->scd_lock);
491 /* srpc_shutdown_service might have tried to unlink me
492 * when my buf_mdh was still invalid */
493 LNetMDUnlink(buf->buf_mdh);
494 spin_lock(&scd->scd_lock);
498 scd->scd_buf_nposted--;
499 if (sv->sv_shuttingdown)
500 return rc; /* don't allow to change scd_buf_posted */
502 list_del(&buf->buf_list);
503 spin_unlock(&scd->scd_lock);
505 LIBCFS_FREE(buf, sizeof(*buf));
507 spin_lock(&scd->scd_lock);
512 srpc_add_buffer(struct swi_workitem *wi)
514 struct srpc_service_cd *scd = wi->swi_workitem.wi_data;
515 struct srpc_buffer *buf;
518 /* it's called by workitem scheduler threads, these threads
519 * should have been set CPT affinity, so buffers will be posted
520 * on CPT local list of Portal */
521 spin_lock(&scd->scd_lock);
523 while (scd->scd_buf_adjust > 0 &&
524 !scd->scd_svc->sv_shuttingdown) {
525 scd->scd_buf_adjust--; /* consume it */
526 scd->scd_buf_posting++;
528 spin_unlock(&scd->scd_lock);
530 LIBCFS_ALLOC(buf, sizeof(*buf));
532 CERROR("Failed to add new buf to service: %s\n",
533 scd->scd_svc->sv_name);
534 spin_lock(&scd->scd_lock);
539 spin_lock(&scd->scd_lock);
540 if (scd->scd_svc->sv_shuttingdown) {
541 spin_unlock(&scd->scd_lock);
542 LIBCFS_FREE(buf, sizeof(*buf));
544 spin_lock(&scd->scd_lock);
549 rc = srpc_service_post_buffer(scd, buf);
551 break; /* buf has been freed inside */
553 LASSERT(scd->scd_buf_posting > 0);
554 scd->scd_buf_posting--;
555 scd->scd_buf_total++;
556 scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
560 scd->scd_buf_err_stamp = cfs_time_current_sec();
561 scd->scd_buf_err = rc;
563 LASSERT(scd->scd_buf_posting > 0);
564 scd->scd_buf_posting--;
567 spin_unlock(&scd->scd_lock);
572 srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
574 struct srpc_service_cd *scd;
578 LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
580 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
581 spin_lock(&scd->scd_lock);
583 scd->scd_buf_err = 0;
584 scd->scd_buf_err_stamp = 0;
585 scd->scd_buf_posting = 0;
586 scd->scd_buf_adjust = nbuffer;
587 /* start to post buffers */
588 swi_schedule_workitem(&scd->scd_buf_wi);
589 spin_unlock(&scd->scd_lock);
591 /* framework service only post buffer for one partition */
592 if (srpc_serv_is_framework(sv))
596 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
597 spin_lock(&scd->scd_lock);
599 * NB: srpc_service_add_buffers() can be called inside
600 * thread context of lst_sched_serial, and we don't normally
601 * allow to sleep inside thread context of WI scheduler
602 * because it will block current scheduler thread from doing
603 * anything else, even worse, it could deadlock if it's
604 * waiting on result from another WI of the same scheduler.
605 * However, it's safe at here because scd_buf_wi is scheduled
606 * by thread in a different WI scheduler (lst_sched_test),
607 * so we don't have any risk of deadlock, though this could
608 * block all WIs pending on lst_sched_serial for a moment
609 * which is not good but not fatal.
611 lst_wait_until(scd->scd_buf_err != 0 ||
612 (scd->scd_buf_adjust == 0 &&
613 scd->scd_buf_posting == 0),
614 scd->scd_lock, "waiting for adding buffer\n");
616 if (scd->scd_buf_err != 0 && rc == 0)
617 rc = scd->scd_buf_err;
619 spin_unlock(&scd->scd_lock);
626 srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
628 struct srpc_service_cd *scd;
632 LASSERT(!sv->sv_shuttingdown);
634 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
635 spin_lock(&scd->scd_lock);
637 num = scd->scd_buf_total + scd->scd_buf_posting;
638 scd->scd_buf_adjust -= min(nbuffer, num);
640 spin_unlock(&scd->scd_lock);
644 /* returns 1 if sv has finished, otherwise 0 */
646 srpc_finish_service(struct srpc_service *sv)
648 struct srpc_service_cd *scd;
649 struct srpc_server_rpc *rpc;
652 LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
654 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
655 spin_lock(&scd->scd_lock);
656 if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
657 spin_unlock(&scd->scd_lock);
661 if (scd->scd_buf_nposted > 0) {
662 CDEBUG(D_NET, "waiting for %d posted buffers to "
663 "unlink\n", scd->scd_buf_nposted);
664 spin_unlock(&scd->scd_lock);
668 if (list_empty(&scd->scd_rpc_active)) {
669 spin_unlock(&scd->scd_lock);
673 rpc = list_entry(scd->scd_rpc_active.next,
674 struct srpc_server_rpc, srpc_list);
675 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
676 "wi %s scheduled %d running %d, "
677 "ev fired %d type %d status %d lnet %d\n",
678 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
679 swi_state2str(rpc->srpc_wi.swi_state),
680 rpc->srpc_wi.swi_workitem.wi_scheduled,
681 rpc->srpc_wi.swi_workitem.wi_running,
682 rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
683 rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
684 spin_unlock(&scd->scd_lock);
688 /* no lock needed from now on */
689 srpc_service_fini(sv);
693 /* called with sv->sv_lock held */
695 srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
696 __must_hold(&scd->scd_lock)
698 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
699 if (srpc_service_post_buffer(scd, buf) != 0) {
700 CWARN("Failed to post %s buffer\n",
701 scd->scd_svc->sv_name);
706 /* service is shutting down, or we want to recycle some buffers */
707 scd->scd_buf_total--;
709 if (scd->scd_buf_adjust < 0) {
710 scd->scd_buf_adjust++;
711 if (scd->scd_buf_adjust < 0 &&
712 scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
714 "Try to recyle %d buffers but nothing left\n",
715 scd->scd_buf_adjust);
716 scd->scd_buf_adjust = 0;
720 spin_unlock(&scd->scd_lock);
721 LIBCFS_FREE(buf, sizeof(*buf));
722 spin_lock(&scd->scd_lock);
726 srpc_abort_service(struct srpc_service *sv)
728 struct srpc_service_cd *scd;
729 struct srpc_server_rpc *rpc;
732 CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
733 sv->sv_id, sv->sv_name);
735 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
736 spin_lock(&scd->scd_lock);
738 /* schedule in-flight RPCs to notice the abort, NB:
739 * racing with incoming RPCs; complete fix should make test
740 * RPCs carry session ID in its headers */
741 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
742 rpc->srpc_aborted = 1;
743 swi_schedule_workitem(&rpc->srpc_wi);
746 spin_unlock(&scd->scd_lock);
751 srpc_shutdown_service(srpc_service_t *sv)
753 struct srpc_service_cd *scd;
754 struct srpc_server_rpc *rpc;
758 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
759 sv->sv_id, sv->sv_name);
761 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
762 spin_lock(&scd->scd_lock);
764 sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
766 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
767 spin_unlock(&scd->scd_lock);
769 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
770 spin_lock(&scd->scd_lock);
772 /* schedule in-flight RPCs to notice the shutdown */
773 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
774 swi_schedule_workitem(&rpc->srpc_wi);
776 spin_unlock(&scd->scd_lock);
778 /* OK to traverse scd_buf_posted without lock, since no one
779 * touches scd_buf_posted now */
780 list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
781 LNetMDUnlink(buf->buf_mdh);
786 srpc_send_request (srpc_client_rpc_t *rpc)
788 srpc_event_t *ev = &rpc->crpc_reqstev;
793 ev->ev_type = SRPC_REQUEST_SENT;
795 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
796 rpc->crpc_service, &rpc->crpc_reqstmsg,
797 sizeof(srpc_msg_t), LNET_MD_OP_PUT,
798 rpc->crpc_dest, LNET_NID_ANY,
799 &rpc->crpc_reqstmdh, ev);
801 LASSERT (rc == -ENOMEM);
802 ev->ev_fired = 1; /* no more event expected */
808 srpc_prepare_reply (srpc_client_rpc_t *rpc)
810 srpc_event_t *ev = &rpc->crpc_replyev;
811 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
816 ev->ev_type = SRPC_REPLY_RCVD;
818 *id = srpc_next_id();
820 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
821 &rpc->crpc_replymsg, sizeof(srpc_msg_t),
822 LNET_MD_OP_PUT, rpc->crpc_dest,
823 &rpc->crpc_replymdh, ev);
825 LASSERT (rc == -ENOMEM);
826 ev->ev_fired = 1; /* no more event expected */
832 srpc_prepare_bulk (srpc_client_rpc_t *rpc)
834 srpc_bulk_t *bk = &rpc->crpc_bulk;
835 srpc_event_t *ev = &rpc->crpc_bulkev;
836 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
840 LASSERT (bk->bk_niov <= LNET_MAX_IOV);
842 if (bk->bk_niov == 0) return 0; /* nothing to do */
844 opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
849 ev->ev_type = SRPC_BULK_REQ_RCVD;
851 *id = srpc_next_id();
853 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
854 &bk->bk_iovs[0], bk->bk_niov, opt,
855 rpc->crpc_dest, &bk->bk_mdh, ev);
857 LASSERT (rc == -ENOMEM);
858 ev->ev_fired = 1; /* no more event expected */
864 srpc_do_bulk (srpc_server_rpc_t *rpc)
866 srpc_event_t *ev = &rpc->srpc_ev;
867 srpc_bulk_t *bk = rpc->srpc_bulk;
868 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
872 LASSERT (bk != NULL);
874 opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
879 ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
881 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
882 &bk->bk_iovs[0], bk->bk_niov, opt,
883 rpc->srpc_peer, rpc->srpc_self,
886 ev->ev_fired = 1; /* no more event expected */
890 /* only called from srpc_handle_rpc */
892 srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
894 struct srpc_service_cd *scd = rpc->srpc_scd;
895 struct srpc_service *sv = scd->scd_svc;
896 srpc_buffer_t *buffer;
898 LASSERT (status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
900 rpc->srpc_status = status;
902 CDEBUG_LIMIT (status == 0 ? D_NET : D_NETERROR,
903 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
904 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
905 swi_state2str(rpc->srpc_wi.swi_state), status);
908 spin_lock(&srpc_data.rpc_glock);
909 srpc_data.rpc_counters.rpcs_dropped++;
910 spin_unlock(&srpc_data.rpc_glock);
913 if (rpc->srpc_done != NULL)
914 (*rpc->srpc_done) (rpc);
915 LASSERT(rpc->srpc_bulk == NULL);
917 spin_lock(&scd->scd_lock);
919 if (rpc->srpc_reqstbuf != NULL) {
920 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
921 * sv won't go away for scd_rpc_active must not be empty */
922 srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
923 rpc->srpc_reqstbuf = NULL;
926 list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
929 * No one can schedule me now since:
930 * - I'm not on scd_rpc_active.
931 * - all LNet events have been fired.
932 * Cancel pending schedules and prevent future schedule attempts:
934 LASSERT(rpc->srpc_ev.ev_fired);
935 swi_exit_workitem(&rpc->srpc_wi);
937 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
938 buffer = list_entry(scd->scd_buf_blocked.next,
939 srpc_buffer_t, buf_list);
940 list_del(&buffer->buf_list);
942 srpc_init_server_rpc(rpc, scd, buffer);
943 list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
944 swi_schedule_workitem(&rpc->srpc_wi);
946 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
949 spin_unlock(&scd->scd_lock);
953 /* handles an incoming RPC */
955 srpc_handle_rpc(swi_workitem_t *wi)
957 struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
958 struct srpc_service_cd *scd = rpc->srpc_scd;
959 struct srpc_service *sv = scd->scd_svc;
960 srpc_event_t *ev = &rpc->srpc_ev;
963 LASSERT(wi == &rpc->srpc_wi);
965 spin_lock(&scd->scd_lock);
967 if (sv->sv_shuttingdown || rpc->srpc_aborted) {
968 spin_unlock(&scd->scd_lock);
970 if (rpc->srpc_bulk != NULL)
971 LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
972 LNetMDUnlink(rpc->srpc_replymdh);
974 if (ev->ev_fired) { /* no more event, OK to finish */
975 srpc_server_rpc_done(rpc, -ESHUTDOWN);
981 spin_unlock(&scd->scd_lock);
983 switch (wi->swi_state) {
986 case SWI_STATE_NEWBORN: {
988 srpc_generic_reply_t *reply;
990 msg = &rpc->srpc_reqstbuf->buf_msg;
991 reply = &rpc->srpc_replymsg.msg_body.reply;
993 if (msg->msg_magic == 0) {
994 /* moaned already in srpc_lnet_ev_handler */
995 srpc_server_rpc_done(rpc, EBADMSG);
999 srpc_unpack_msg_hdr(msg);
1000 if (msg->msg_version != SRPC_MSG_VERSION) {
1001 CWARN("Version mismatch: %u, %u expected, from %s\n",
1002 msg->msg_version, SRPC_MSG_VERSION,
1003 libcfs_id2str(rpc->srpc_peer));
1004 reply->status = EPROTO;
1005 /* drop through and send reply */
1008 rc = (*sv->sv_handler)(rpc);
1009 LASSERT(reply->status == 0 || !rpc->srpc_bulk);
1011 srpc_server_rpc_done(rpc, rc);
1016 wi->swi_state = SWI_STATE_BULK_STARTED;
1018 if (rpc->srpc_bulk != NULL) {
1019 rc = srpc_do_bulk(rpc);
1021 return 0; /* wait for bulk */
1023 LASSERT (ev->ev_fired);
1027 case SWI_STATE_BULK_STARTED:
1028 LASSERT (rpc->srpc_bulk == NULL || ev->ev_fired);
1030 if (rpc->srpc_bulk != NULL) {
1033 if (sv->sv_bulk_ready != NULL)
1034 rc = (*sv->sv_bulk_ready) (rpc, rc);
1037 srpc_server_rpc_done(rpc, rc);
1042 wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
1043 rc = srpc_send_reply(rpc);
1045 return 0; /* wait for reply */
1046 srpc_server_rpc_done(rpc, rc);
1049 case SWI_STATE_REPLY_SUBMITTED:
1050 if (!ev->ev_fired) {
1051 CERROR("RPC %p: bulk %p, service %d\n",
1052 rpc, rpc->srpc_bulk, sv->sv_id);
1053 CERROR("Event: status %d, type %d, lnet %d\n",
1054 ev->ev_status, ev->ev_type, ev->ev_lnet);
1055 LASSERT (ev->ev_fired);
1058 wi->swi_state = SWI_STATE_DONE;
1059 srpc_server_rpc_done(rpc, ev->ev_status);
1067 srpc_client_rpc_expired (void *data)
1069 srpc_client_rpc_t *rpc = data;
1071 CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
1072 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1075 spin_lock(&rpc->crpc_lock);
1077 rpc->crpc_timeout = 0;
1078 srpc_abort_rpc(rpc, -ETIMEDOUT);
1080 spin_unlock(&rpc->crpc_lock);
1082 spin_lock(&srpc_data.rpc_glock);
1083 srpc_data.rpc_counters.rpcs_expired++;
1084 spin_unlock(&srpc_data.rpc_glock);
1088 srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
1090 stt_timer_t *timer = &rpc->crpc_timer;
1092 if (rpc->crpc_timeout == 0)
1095 INIT_LIST_HEAD(&timer->stt_list);
1096 timer->stt_data = rpc;
1097 timer->stt_func = srpc_client_rpc_expired;
1098 timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
1099 cfs_time_current_sec());
1100 stt_add_timer(timer);
1105 * Called with rpc->crpc_lock held.
1107 * Upon exit the RPC expiry timer is not queued and the handler is not
1108 * running on any CPU. */
1110 srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
1112 /* timer not planted or already exploded */
1113 if (rpc->crpc_timeout == 0)
1116 /* timer successfully defused */
1117 if (stt_del_timer(&rpc->crpc_timer))
1120 /* timer detonated, wait for it to explode */
1121 while (rpc->crpc_timeout != 0) {
1122 spin_unlock(&rpc->crpc_lock);
1126 spin_lock(&rpc->crpc_lock);
1131 srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
1133 swi_workitem_t *wi = &rpc->crpc_wi;
1135 LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
1137 spin_lock(&rpc->crpc_lock);
1139 rpc->crpc_closed = 1;
1140 if (rpc->crpc_status == 0)
1141 rpc->crpc_status = status;
1143 srpc_del_client_rpc_timer(rpc);
1145 CDEBUG_LIMIT ((status == 0) ? D_NET : D_NETERROR,
1146 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1147 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1148 swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
1151 * No one can schedule me now since:
1152 * - RPC timer has been defused.
1153 * - all LNet events have been fired.
1154 * - crpc_closed has been set, preventing srpc_abort_rpc from
1156 * Cancel pending schedules and prevent future schedule attempts:
1158 LASSERT (!srpc_event_pending(rpc));
1159 swi_exit_workitem(wi);
1161 spin_unlock(&rpc->crpc_lock);
1163 (*rpc->crpc_done)(rpc);
1167 /* sends an outgoing RPC */
1169 srpc_send_rpc (swi_workitem_t *wi)
1172 srpc_client_rpc_t *rpc;
1176 LASSERT(wi != NULL);
1178 rpc = wi->swi_workitem.wi_data;
1180 LASSERT (rpc != NULL);
1181 LASSERT (wi == &rpc->crpc_wi);
1183 reply = &rpc->crpc_replymsg;
1184 do_bulk = rpc->crpc_bulk.bk_niov > 0;
1186 spin_lock(&rpc->crpc_lock);
1188 if (rpc->crpc_aborted) {
1189 spin_unlock(&rpc->crpc_lock);
1193 spin_unlock(&rpc->crpc_lock);
1195 switch (wi->swi_state) {
1198 case SWI_STATE_NEWBORN:
1199 LASSERT (!srpc_event_pending(rpc));
1201 rc = srpc_prepare_reply(rpc);
1203 srpc_client_rpc_done(rpc, rc);
1207 rc = srpc_prepare_bulk(rpc);
1210 wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
1211 rc = srpc_send_request(rpc);
1214 case SWI_STATE_REQUEST_SUBMITTED:
1215 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1216 * order; however, they're processed in a strict order:
1217 * rqt, rpy, and bulk. */
1218 if (!rpc->crpc_reqstev.ev_fired) break;
1220 rc = rpc->crpc_reqstev.ev_status;
1223 wi->swi_state = SWI_STATE_REQUEST_SENT;
1224 /* perhaps more events, fall thru */
1225 case SWI_STATE_REQUEST_SENT: {
1226 srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
1228 if (!rpc->crpc_replyev.ev_fired) break;
1230 rc = rpc->crpc_replyev.ev_status;
1233 srpc_unpack_msg_hdr(reply);
1234 if (reply->msg_type != type ||
1235 (reply->msg_magic != SRPC_MSG_MAGIC &&
1236 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1237 CWARN ("Bad message from %s: type %u (%d expected),"
1238 " magic %u (%d expected).\n",
1239 libcfs_id2str(rpc->crpc_dest),
1240 reply->msg_type, type,
1241 reply->msg_magic, SRPC_MSG_MAGIC);
1246 if (do_bulk && reply->msg_body.reply.status != 0) {
1247 CWARN ("Remote error %d at %s, unlink bulk buffer in "
1248 "case peer didn't initiate bulk transfer\n",
1249 reply->msg_body.reply.status,
1250 libcfs_id2str(rpc->crpc_dest));
1251 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1254 wi->swi_state = SWI_STATE_REPLY_RECEIVED;
1256 case SWI_STATE_REPLY_RECEIVED:
1257 if (do_bulk && !rpc->crpc_bulkev.ev_fired) break;
1259 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1261 /* Bulk buffer was unlinked due to remote error. Clear error
1262 * since reply buffer still contains valid data.
1263 * NB rpc->crpc_done shouldn't look into bulk data in case of
1265 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1266 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1269 wi->swi_state = SWI_STATE_DONE;
1270 srpc_client_rpc_done(rpc, rc);
1275 spin_lock(&rpc->crpc_lock);
1276 srpc_abort_rpc(rpc, rc);
1277 spin_unlock(&rpc->crpc_lock);
1281 if (rpc->crpc_aborted) {
1282 LNetMDUnlink(rpc->crpc_reqstmdh);
1283 LNetMDUnlink(rpc->crpc_replymdh);
1284 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1286 if (!srpc_event_pending(rpc)) {
1287 srpc_client_rpc_done(rpc, -EINTR);
1295 srpc_create_client_rpc (lnet_process_id_t peer, int service,
1296 int nbulkiov, int bulklen,
1297 void (*rpc_done)(srpc_client_rpc_t *),
1298 void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
1300 srpc_client_rpc_t *rpc;
1302 LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
1303 crpc_bulk.bk_iovs[nbulkiov]));
1307 srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1308 bulklen, rpc_done, rpc_fini, priv);
1312 /* called with rpc->crpc_lock held */
1314 srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
1318 if (rpc->crpc_aborted || /* already aborted */
1319 rpc->crpc_closed) /* callback imminent */
1323 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1324 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1325 swi_state2str(rpc->crpc_wi.swi_state), why);
1327 rpc->crpc_aborted = 1;
1328 rpc->crpc_status = why;
1329 swi_schedule_workitem(&rpc->crpc_wi);
1333 /* called with rpc->crpc_lock held */
1335 srpc_post_rpc (srpc_client_rpc_t *rpc)
1337 LASSERT (!rpc->crpc_aborted);
1338 LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
1340 CDEBUG (D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1341 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1344 srpc_add_client_rpc_timer(rpc);
1345 swi_schedule_workitem(&rpc->crpc_wi);
1351 srpc_send_reply(struct srpc_server_rpc *rpc)
1353 srpc_event_t *ev = &rpc->srpc_ev;
1354 struct srpc_msg *msg = &rpc->srpc_replymsg;
1355 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1356 struct srpc_service_cd *scd = rpc->srpc_scd;
1357 struct srpc_service *sv = scd->scd_svc;
1361 LASSERT(buffer != NULL);
1362 rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1364 spin_lock(&scd->scd_lock);
1366 if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
1367 /* Repost buffer before replying since test client
1368 * might send me another RPC once it gets the reply */
1369 if (srpc_service_post_buffer(scd, buffer) != 0)
1370 CWARN("Failed to repost %s buffer\n", sv->sv_name);
1371 rpc->srpc_reqstbuf = NULL;
1374 spin_unlock(&scd->scd_lock);
1378 ev->ev_type = SRPC_REPLY_SENT;
1380 msg->msg_magic = SRPC_MSG_MAGIC;
1381 msg->msg_version = SRPC_MSG_VERSION;
1382 msg->msg_type = srpc_service2reply(sv->sv_id);
1384 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1385 sizeof(*msg), LNET_MD_OP_PUT,
1386 rpc->srpc_peer, rpc->srpc_self,
1387 &rpc->srpc_replymdh, ev);
1389 ev->ev_fired = 1; /* no more event expected */
1393 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1395 srpc_lnet_ev_handler(lnet_event_t *ev)
1397 struct srpc_service_cd *scd;
1398 srpc_event_t *rpcev = ev->md.user_ptr;
1399 srpc_client_rpc_t *crpc;
1400 srpc_server_rpc_t *srpc;
1401 srpc_buffer_t *buffer;
1404 srpc_msg_type_t type;
1406 LASSERT (!in_interrupt());
1408 if (ev->status != 0) {
1411 spin_lock(&srpc_data.rpc_glock);
1412 if (ev->status != -ECANCELED) /* cancellation is not error */
1413 srpc_data.rpc_counters.errors++;
1414 errors = srpc_data.rpc_counters.errors;
1415 spin_unlock(&srpc_data.rpc_glock);
1417 CNETERR("LNet event status %d type %d, RPC errors %u\n",
1418 ev->status, ev->type, errors);
1421 rpcev->ev_lnet = ev->type;
1423 switch (rpcev->ev_type) {
1425 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1426 rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1428 case SRPC_REQUEST_SENT:
1429 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1430 spin_lock(&srpc_data.rpc_glock);
1431 srpc_data.rpc_counters.rpcs_sent++;
1432 spin_unlock(&srpc_data.rpc_glock);
1434 case SRPC_REPLY_RCVD:
1435 case SRPC_BULK_REQ_RCVD:
1436 crpc = rpcev->ev_data;
1438 if (rpcev != &crpc->crpc_reqstev &&
1439 rpcev != &crpc->crpc_replyev &&
1440 rpcev != &crpc->crpc_bulkev) {
1441 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1442 rpcev, crpc, &crpc->crpc_reqstev,
1443 &crpc->crpc_replyev, &crpc->crpc_bulkev);
1444 CERROR("Bad event: status %d, type %d, lnet %d\n",
1445 rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1449 spin_lock(&crpc->crpc_lock);
1451 LASSERT(rpcev->ev_fired == 0);
1452 rpcev->ev_fired = 1;
1453 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1454 -EINTR : ev->status;
1455 swi_schedule_workitem(&crpc->crpc_wi);
1457 spin_unlock(&crpc->crpc_lock);
1460 case SRPC_REQUEST_RCVD:
1461 scd = rpcev->ev_data;
1464 LASSERT(rpcev == &scd->scd_ev);
1466 spin_lock(&scd->scd_lock);
1468 LASSERT (ev->unlinked);
1469 LASSERT (ev->type == LNET_EVENT_PUT ||
1470 ev->type == LNET_EVENT_UNLINK);
1471 LASSERT (ev->type != LNET_EVENT_UNLINK ||
1472 sv->sv_shuttingdown);
1474 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
1475 buffer->buf_peer = ev->initiator;
1476 buffer->buf_self = ev->target.nid;
1478 LASSERT(scd->scd_buf_nposted > 0);
1479 scd->scd_buf_nposted--;
1481 if (sv->sv_shuttingdown) {
1482 /* Leave buffer on scd->scd_buf_nposted since
1483 * srpc_finish_service needs to traverse it. */
1484 spin_unlock(&scd->scd_lock);
1488 if (scd->scd_buf_err_stamp != 0 &&
1489 scd->scd_buf_err_stamp < cfs_time_current_sec()) {
1490 /* re-enable adding buffer */
1491 scd->scd_buf_err_stamp = 0;
1492 scd->scd_buf_err = 0;
1495 if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
1496 scd->scd_buf_adjust == 0 &&
1497 scd->scd_buf_nposted < scd->scd_buf_low) {
1498 scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
1500 swi_schedule_workitem(&scd->scd_buf_wi);
1503 list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
1504 msg = &buffer->buf_msg;
1505 type = srpc_service2request(sv->sv_id);
1507 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1508 (msg->msg_type != type &&
1509 msg->msg_type != __swab32(type)) ||
1510 (msg->msg_magic != SRPC_MSG_MAGIC &&
1511 msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1512 CERROR ("Dropping RPC (%s) from %s: "
1513 "status %d mlength %d type %u magic %u.\n",
1514 sv->sv_name, libcfs_id2str(ev->initiator),
1515 ev->status, ev->mlength,
1516 msg->msg_type, msg->msg_magic);
1518 /* NB can't call srpc_service_recycle_buffer here since
1519 * it may call LNetM[DE]Attach. The invalid magic tells
1520 * srpc_handle_rpc to drop this RPC */
1524 if (!list_empty(&scd->scd_rpc_free)) {
1525 srpc = list_entry(scd->scd_rpc_free.next,
1526 struct srpc_server_rpc,
1528 list_del(&srpc->srpc_list);
1530 srpc_init_server_rpc(srpc, scd, buffer);
1531 list_add_tail(&srpc->srpc_list,
1532 &scd->scd_rpc_active);
1533 swi_schedule_workitem(&srpc->srpc_wi);
1535 list_add_tail(&buffer->buf_list,
1536 &scd->scd_buf_blocked);
1539 spin_unlock(&scd->scd_lock);
1541 spin_lock(&srpc_data.rpc_glock);
1542 srpc_data.rpc_counters.rpcs_rcvd++;
1543 spin_unlock(&srpc_data.rpc_glock);
1546 case SRPC_BULK_GET_RPLD:
1547 LASSERT (ev->type == LNET_EVENT_SEND ||
1548 ev->type == LNET_EVENT_REPLY ||
1549 ev->type == LNET_EVENT_UNLINK);
1552 break; /* wait for final event */
1554 case SRPC_BULK_PUT_SENT:
1555 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1556 spin_lock(&srpc_data.rpc_glock);
1558 if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1559 srpc_data.rpc_counters.bulk_get += ev->mlength;
1561 srpc_data.rpc_counters.bulk_put += ev->mlength;
1563 spin_unlock(&srpc_data.rpc_glock);
1565 case SRPC_REPLY_SENT:
1566 srpc = rpcev->ev_data;
1567 scd = srpc->srpc_scd;
1569 LASSERT(rpcev == &srpc->srpc_ev);
1571 spin_lock(&scd->scd_lock);
1573 rpcev->ev_fired = 1;
1574 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1575 -EINTR : ev->status;
1576 swi_schedule_workitem(&srpc->srpc_wi);
1578 spin_unlock(&scd->scd_lock);
1589 memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1590 spin_lock_init(&srpc_data.rpc_glock);
1592 /* 1 second pause to avoid timestamp reuse */
1593 set_current_state(TASK_UNINTERRUPTIBLE);
1594 schedule_timeout(cfs_time_seconds(1));
1595 srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
1597 srpc_data.rpc_state = SRPC_STATE_NONE;
1599 rc = LNetNIInit(LNET_PID_LUSTRE);
1601 CERROR ("LNetNIInit() has failed: %d\n", rc);
1605 srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1607 LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
1608 rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
1610 CERROR("LNetEQAlloc() has failed: %d\n", rc);
1614 rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1616 rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
1619 srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1627 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1633 srpc_shutdown (void)
1639 state = srpc_data.rpc_state;
1640 srpc_data.rpc_state = SRPC_STATE_STOPPING;
1645 case SRPC_STATE_RUNNING:
1646 spin_lock(&srpc_data.rpc_glock);
1648 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1649 srpc_service_t *sv = srpc_data.rpc_services[i];
1651 LASSERTF (sv == NULL,
1652 "service not empty: id %d, name %s\n",
1656 spin_unlock(&srpc_data.rpc_glock);
1660 case SRPC_STATE_EQ_INIT:
1661 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1662 rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
1664 rc = LNetEQFree(srpc_data.rpc_lnet_eq);
1665 LASSERT (rc == 0); /* the EQ should have no user by now */
1667 case SRPC_STATE_NI_INIT: