4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Isaac Huang <isaac@clusterfs.com>
35 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
36 * - percpt data for service to improve smp performance
40 #define DEBUG_SUBSYSTEM S_LNET
52 static struct smoketest_rpc {
53 spinlock_t rpc_glock; /* global lock */
54 struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
55 lnet_handler_t rpc_lnet_handler;/* _the_ LNet event handler */
56 enum srpc_state rpc_state;
57 struct srpc_counters rpc_counters;
58 __u64 rpc_matchbits; /* matchbits counter */
62 srpc_serv_portal(int svc_id)
64 return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
65 SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
69 static int srpc_handle_rpc(struct swi_workitem *wi);
71 void srpc_get_counters(struct srpc_counters *cnt)
73 spin_lock(&srpc_data.rpc_glock);
74 *cnt = srpc_data.rpc_counters;
75 spin_unlock(&srpc_data.rpc_glock);
78 void srpc_set_counters(const struct srpc_counters *cnt)
80 spin_lock(&srpc_data.rpc_glock);
81 srpc_data.rpc_counters = *cnt;
82 spin_unlock(&srpc_data.rpc_glock);
86 srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
89 LASSERT(off < PAGE_SIZE);
90 LASSERT(nob > 0 && nob <= PAGE_SIZE);
92 bk->bk_iovs[i].bv_offset = off;
93 bk->bk_iovs[i].bv_page = pg;
94 bk->bk_iovs[i].bv_len = nob;
99 srpc_free_bulk(struct srpc_bulk *bk)
106 for (i = 0; i < bk->bk_niov; i++) {
107 pg = bk->bk_iovs[i].bv_page;
114 LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
118 srpc_alloc_bulk(int cpt, unsigned bulk_off, unsigned bulk_npg,
119 unsigned bulk_len, int sink)
121 struct srpc_bulk *bk;
124 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
126 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
127 offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
129 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
133 memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
135 bk->bk_len = bulk_len;
136 bk->bk_niov = bulk_npg;
138 for (i = 0; i < bulk_npg; i++) {
142 pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
144 CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
149 nob = min_t(unsigned, bulk_off + bulk_len, PAGE_SIZE) -
152 srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
165 spin_lock(&srpc_data.rpc_glock);
166 id = srpc_data.rpc_matchbits++;
167 spin_unlock(&srpc_data.rpc_glock);
172 srpc_init_server_rpc(struct srpc_server_rpc *rpc,
173 struct srpc_service_cd *scd,
174 struct srpc_buffer *buffer)
176 memset(rpc, 0, sizeof(*rpc));
177 swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
178 srpc_serv_is_framework(scd->scd_svc) ?
179 lst_sched_serial : lst_sched_test[scd->scd_cpt]);
181 rpc->srpc_ev.ev_fired = 1; /* no event expected now */
184 rpc->srpc_reqstbuf = buffer;
185 rpc->srpc_peer = buffer->buf_peer;
186 rpc->srpc_self = buffer->buf_self;
187 LNetInvalidateMDHandle(&rpc->srpc_replymdh);
191 srpc_service_fini(struct srpc_service *svc)
193 struct srpc_service_cd *scd;
194 struct srpc_server_rpc *rpc;
195 struct srpc_buffer *buf;
199 if (svc->sv_cpt_data == NULL)
202 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
204 if (!list_empty(&scd->scd_buf_posted))
205 q = &scd->scd_buf_posted;
206 else if (!list_empty(&scd->scd_buf_blocked))
207 q = &scd->scd_buf_blocked;
211 while (!list_empty(q)) {
212 buf = list_entry(q->next,
215 list_del(&buf->buf_list);
216 LIBCFS_FREE(buf, sizeof(*buf));
220 LASSERT(list_empty(&scd->scd_rpc_active));
222 while (!list_empty(&scd->scd_rpc_free)) {
223 rpc = list_entry(scd->scd_rpc_free.next,
224 struct srpc_server_rpc,
226 list_del(&rpc->srpc_list);
227 LIBCFS_FREE(rpc, sizeof(*rpc));
231 cfs_percpt_free(svc->sv_cpt_data);
232 svc->sv_cpt_data = NULL;
236 srpc_service_nrpcs(struct srpc_service *svc)
238 int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
240 return srpc_serv_is_framework(svc) ?
241 max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
244 int srpc_add_buffer(struct swi_workitem *wi);
247 srpc_service_init(struct srpc_service *svc)
249 struct srpc_service_cd *scd;
250 struct srpc_server_rpc *rpc;
255 svc->sv_shuttingdown = 0;
257 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
258 sizeof(struct srpc_service_cd));
259 if (svc->sv_cpt_data == NULL)
262 svc->sv_ncpts = srpc_serv_is_framework(svc) ?
263 1 : cfs_cpt_number(lnet_cpt_table());
264 nrpcs = srpc_service_nrpcs(svc);
266 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
269 spin_lock_init(&scd->scd_lock);
270 INIT_LIST_HEAD(&scd->scd_rpc_free);
271 INIT_LIST_HEAD(&scd->scd_rpc_active);
272 INIT_LIST_HEAD(&scd->scd_buf_posted);
273 INIT_LIST_HEAD(&scd->scd_buf_blocked);
275 scd->scd_ev.ev_data = scd;
276 scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
278 /* NB: don't use lst_sched_serial for adding buffer,
279 * see details in srpc_service_add_buffers() */
280 swi_init_workitem(&scd->scd_buf_wi,
281 srpc_add_buffer, lst_sched_test[i]);
283 if (i != 0 && srpc_serv_is_framework(svc)) {
284 /* NB: framework service only needs srpc_service_cd for
285 * one partition, but we allocate for all to make
286 * it easier to implement, it will waste a little
287 * memory but nobody should care about this */
291 for (j = 0; j < nrpcs; j++) {
292 LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
295 srpc_service_fini(svc);
298 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
306 srpc_add_service(struct srpc_service *sv)
310 LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
312 if (srpc_service_init(sv) != 0)
315 spin_lock(&srpc_data.rpc_glock);
317 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
319 if (srpc_data.rpc_services[id] != NULL) {
320 spin_unlock(&srpc_data.rpc_glock);
324 srpc_data.rpc_services[id] = sv;
325 spin_unlock(&srpc_data.rpc_glock);
327 CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
331 srpc_service_fini(sv);
336 srpc_remove_service(struct srpc_service *sv)
340 spin_lock(&srpc_data.rpc_glock);
342 if (srpc_data.rpc_services[id] != sv) {
343 spin_unlock(&srpc_data.rpc_glock);
347 srpc_data.rpc_services[id] = NULL;
348 spin_unlock(&srpc_data.rpc_glock);
353 srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
354 int len, int options, struct lnet_process_id peer4,
355 struct lnet_handle_md *mdh, struct srpc_event *ev)
360 struct lnet_processid peer;
362 peer.pid = peer4.pid;
363 lnet_nid4_to_nid(peer4.nid, &peer.nid);
365 me = LNetMEAttach(portal, &peer, matchbits, 0, LNET_UNLINK,
366 local ? LNET_INS_LOCAL : LNET_INS_AFTER);
369 CERROR("LNetMEAttach failed: %d\n", rc);
370 LASSERT(rc == -ENOMEM);
378 md.options = options;
379 md.handler = srpc_data.rpc_lnet_handler;
381 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdh);
383 CERROR("LNetMDAttach failed: %d\n", rc);
384 LASSERT(rc == -ENOMEM);
390 "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
391 libcfs_id2str(peer4), portal, matchbits);
396 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
397 int options, struct lnet_process_id peer4,
398 lnet_nid_t self4, struct lnet_handle_md *mdh,
399 struct srpc_event *ev)
403 struct lnet_nid self;
404 struct lnet_processid peer;
406 lnet_nid4_to_nid(self4, &self);
407 lnet_pid4_to_pid(peer4, &peer);
412 md.handler = srpc_data.rpc_lnet_handler;
413 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
414 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
416 rc = LNetMDBind(&md, LNET_UNLINK, mdh);
418 CERROR("LNetMDBind failed: %d\n", rc);
419 LASSERT(rc == -ENOMEM);
423 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
424 * they're only meaningful for MDs attached to an ME (i.e. passive
427 if ((options & LNET_MD_OP_PUT) != 0) {
428 rc = LNetPut(&self, *mdh, LNET_NOACK_REQ, &peer,
429 portal, matchbits, 0, 0);
431 LASSERT((options & LNET_MD_OP_GET) != 0);
433 rc = LNetGet(&self, *mdh, &peer, portal, matchbits, 0, false);
437 CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
438 ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
439 libcfs_id2str(peer4), portal, matchbits, rc);
441 /* The forthcoming unlink event will complete this operation
442 * with failure, so fall through and return success here.
444 rc = LNetMDUnlink(*mdh);
448 "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
449 libcfs_id2str(peer4), portal, matchbits);
455 srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
456 struct lnet_handle_md *mdh, struct srpc_event *ev)
458 struct lnet_process_id any = {0};
460 any.nid = LNET_NID_ANY;
461 any.pid = LNET_PID_ANY;
463 return srpc_post_passive_rdma(srpc_serv_portal(service),
464 local, service, buf, len,
465 LNET_MD_OP_PUT, any, mdh, ev);
469 srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
470 __must_hold(&scd->scd_lock)
472 struct srpc_service *sv = scd->scd_svc;
473 struct srpc_msg *msg = &buf->buf_msg;
476 LNetInvalidateMDHandle(&buf->buf_mdh);
477 list_add(&buf->buf_list, &scd->scd_buf_posted);
478 scd->scd_buf_nposted++;
479 spin_unlock(&scd->scd_lock);
481 rc = srpc_post_passive_rqtbuf(sv->sv_id,
482 !srpc_serv_is_framework(sv),
483 msg, sizeof(*msg), &buf->buf_mdh,
486 /* At this point, a RPC (new or delayed) may have arrived in
487 * msg and its event handler has been called. So we must add
488 * buf to scd_buf_posted _before_ dropping scd_lock */
490 spin_lock(&scd->scd_lock);
493 if (!sv->sv_shuttingdown)
496 spin_unlock(&scd->scd_lock);
497 /* srpc_shutdown_service might have tried to unlink me
498 * when my buf_mdh was still invalid */
499 LNetMDUnlink(buf->buf_mdh);
500 spin_lock(&scd->scd_lock);
504 scd->scd_buf_nposted--;
505 if (sv->sv_shuttingdown)
506 return rc; /* don't allow to change scd_buf_posted */
508 list_del(&buf->buf_list);
509 spin_unlock(&scd->scd_lock);
511 LIBCFS_FREE(buf, sizeof(*buf));
513 spin_lock(&scd->scd_lock);
518 srpc_add_buffer(struct swi_workitem *wi)
520 struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd,
522 struct srpc_buffer *buf;
525 /* it's called by workitem scheduler threads, these threads
526 * should have been set CPT affinity, so buffers will be posted
527 * on CPT local list of Portal */
528 spin_lock(&scd->scd_lock);
530 while (scd->scd_buf_adjust > 0 &&
531 !scd->scd_svc->sv_shuttingdown) {
532 scd->scd_buf_adjust--; /* consume it */
533 scd->scd_buf_posting++;
535 spin_unlock(&scd->scd_lock);
537 LIBCFS_ALLOC(buf, sizeof(*buf));
539 CERROR("Failed to add new buf to service: %s\n",
540 scd->scd_svc->sv_name);
541 spin_lock(&scd->scd_lock);
546 spin_lock(&scd->scd_lock);
547 if (scd->scd_svc->sv_shuttingdown) {
548 spin_unlock(&scd->scd_lock);
549 LIBCFS_FREE(buf, sizeof(*buf));
551 spin_lock(&scd->scd_lock);
556 rc = srpc_service_post_buffer(scd, buf);
558 break; /* buf has been freed inside */
560 LASSERT(scd->scd_buf_posting > 0);
561 scd->scd_buf_posting--;
562 scd->scd_buf_total++;
563 scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
567 scd->scd_buf_err_stamp = ktime_get_real_seconds();
568 scd->scd_buf_err = rc;
570 LASSERT(scd->scd_buf_posting > 0);
571 scd->scd_buf_posting--;
574 spin_unlock(&scd->scd_lock);
579 srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
581 struct srpc_service_cd *scd;
585 LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
587 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
588 spin_lock(&scd->scd_lock);
590 scd->scd_buf_err = 0;
591 scd->scd_buf_err_stamp = 0;
592 scd->scd_buf_posting = 0;
593 scd->scd_buf_adjust = nbuffer;
594 /* start to post buffers */
595 swi_schedule_workitem(&scd->scd_buf_wi);
596 spin_unlock(&scd->scd_lock);
598 /* framework service only post buffer for one partition */
599 if (srpc_serv_is_framework(sv))
603 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
604 spin_lock(&scd->scd_lock);
606 * NB: srpc_service_add_buffers() can be called inside
607 * thread context of lst_sched_serial, and we don't normally
608 * allow to sleep inside thread context of WI scheduler
609 * because it will block current scheduler thread from doing
610 * anything else, even worse, it could deadlock if it's
611 * waiting on result from another WI of the same scheduler.
612 * However, it's safe at here because scd_buf_wi is scheduled
613 * by thread in a different WI scheduler (lst_sched_test),
614 * so we don't have any risk of deadlock, though this could
615 * block all WIs pending on lst_sched_serial for a moment
616 * which is not good but not fatal.
618 lst_wait_until(scd->scd_buf_err != 0 ||
619 (scd->scd_buf_adjust == 0 &&
620 scd->scd_buf_posting == 0),
621 scd->scd_lock, "waiting for adding buffer\n");
623 if (scd->scd_buf_err != 0 && rc == 0)
624 rc = scd->scd_buf_err;
626 spin_unlock(&scd->scd_lock);
633 srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
635 struct srpc_service_cd *scd;
639 LASSERT(!sv->sv_shuttingdown);
641 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
642 spin_lock(&scd->scd_lock);
644 num = scd->scd_buf_total + scd->scd_buf_posting;
645 scd->scd_buf_adjust -= min(nbuffer, num);
647 spin_unlock(&scd->scd_lock);
651 /* returns 1 if sv has finished, otherwise 0 */
653 srpc_finish_service(struct srpc_service *sv)
655 struct srpc_service_cd *scd;
656 struct srpc_server_rpc *rpc;
659 LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
661 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
662 spin_lock(&scd->scd_lock);
663 if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
664 spin_unlock(&scd->scd_lock);
668 if (scd->scd_buf_nposted > 0) {
669 CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
670 scd->scd_buf_nposted);
671 spin_unlock(&scd->scd_lock);
675 if (list_empty(&scd->scd_rpc_active)) {
676 spin_unlock(&scd->scd_lock);
680 rpc = list_entry(scd->scd_rpc_active.next,
681 struct srpc_server_rpc, srpc_list);
682 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
683 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
684 swi_state2str(rpc->srpc_wi.swi_state),
685 rpc->srpc_wi.swi_workitem.wi_scheduled,
686 rpc->srpc_wi.swi_workitem.wi_running,
687 rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
688 rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
689 spin_unlock(&scd->scd_lock);
693 /* no lock needed from now on */
694 srpc_service_fini(sv);
698 /* called with sv->sv_lock held */
700 srpc_service_recycle_buffer(struct srpc_service_cd *scd,
701 struct srpc_buffer *buf)
702 __must_hold(&scd->scd_lock)
704 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
705 if (srpc_service_post_buffer(scd, buf) != 0) {
706 CWARN("Failed to post %s buffer\n",
707 scd->scd_svc->sv_name);
712 /* service is shutting down, or we want to recycle some buffers */
713 scd->scd_buf_total--;
715 if (scd->scd_buf_adjust < 0) {
716 scd->scd_buf_adjust++;
717 if (scd->scd_buf_adjust < 0 &&
718 scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
720 "Try to recyle %d buffers but nothing left\n",
721 scd->scd_buf_adjust);
722 scd->scd_buf_adjust = 0;
726 spin_unlock(&scd->scd_lock);
727 LIBCFS_FREE(buf, sizeof(*buf));
728 spin_lock(&scd->scd_lock);
732 srpc_abort_service(struct srpc_service *sv)
734 struct srpc_service_cd *scd;
735 struct srpc_server_rpc *rpc;
738 CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
739 sv->sv_id, sv->sv_name);
741 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
742 spin_lock(&scd->scd_lock);
744 /* schedule in-flight RPCs to notice the abort, NB:
745 * racing with incoming RPCs; complete fix should make test
746 * RPCs carry session ID in its headers
748 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
749 rpc->srpc_aborted = 1;
750 swi_schedule_workitem(&rpc->srpc_wi);
753 spin_unlock(&scd->scd_lock);
758 srpc_shutdown_service(struct srpc_service *sv)
760 struct srpc_service_cd *scd;
761 struct srpc_server_rpc *rpc;
762 struct srpc_buffer *buf;
765 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
766 sv->sv_id, sv->sv_name);
768 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
769 spin_lock(&scd->scd_lock);
771 sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
773 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
774 spin_unlock(&scd->scd_lock);
776 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
777 spin_lock(&scd->scd_lock);
779 /* schedule in-flight RPCs to notice the shutdown */
780 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
781 swi_schedule_workitem(&rpc->srpc_wi);
783 spin_unlock(&scd->scd_lock);
785 /* OK to traverse scd_buf_posted without lock, since no one
786 * touches scd_buf_posted now
788 list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
789 LNetMDUnlink(buf->buf_mdh);
794 srpc_send_request(struct srpc_client_rpc *rpc)
796 struct srpc_event *ev = &rpc->crpc_reqstev;
801 ev->ev_type = SRPC_REQUEST_SENT;
803 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
804 rpc->crpc_service, &rpc->crpc_reqstmsg,
805 sizeof(struct srpc_msg), LNET_MD_OP_PUT,
806 rpc->crpc_dest, LNET_NID_ANY,
807 &rpc->crpc_reqstmdh, ev);
809 LASSERT(rc == -ENOMEM);
810 ev->ev_fired = 1; /* no more event expected */
816 srpc_prepare_reply(struct srpc_client_rpc *rpc)
818 struct srpc_event *ev = &rpc->crpc_replyev;
819 u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
824 ev->ev_type = SRPC_REPLY_RCVD;
826 *id = srpc_next_id();
828 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
830 sizeof(struct srpc_msg),
831 LNET_MD_OP_PUT, rpc->crpc_dest,
832 &rpc->crpc_replymdh, ev);
834 LASSERT(rc == -ENOMEM);
835 ev->ev_fired = 1; /* no more event expected */
841 srpc_prepare_bulk(struct srpc_client_rpc *rpc)
843 struct srpc_bulk *bk = &rpc->crpc_bulk;
844 struct srpc_event *ev = &rpc->crpc_bulkev;
845 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
849 LASSERT(bk->bk_niov <= LNET_MAX_IOV);
852 if (bk->bk_niov == 0)
855 opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
860 ev->ev_type = SRPC_BULK_REQ_RCVD;
862 *id = srpc_next_id();
864 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
865 &bk->bk_iovs[0], bk->bk_niov, opt,
866 rpc->crpc_dest, &bk->bk_mdh, ev);
868 LASSERT(rc == -ENOMEM);
869 ev->ev_fired = 1; /* no more event expected */
875 srpc_do_bulk(struct srpc_server_rpc *rpc)
877 struct srpc_event *ev = &rpc->srpc_ev;
878 struct srpc_bulk *bk = rpc->srpc_bulk;
879 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
885 opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
890 ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
892 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
893 &bk->bk_iovs[0], bk->bk_niov, opt,
894 rpc->srpc_peer, rpc->srpc_self,
897 ev->ev_fired = 1; /* no more event expected */
901 /* only called from srpc_handle_rpc */
903 srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
905 struct srpc_service_cd *scd = rpc->srpc_scd;
906 struct srpc_service *sv = scd->scd_svc;
907 struct srpc_buffer *buffer;
909 LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
911 rpc->srpc_status = status;
913 CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
914 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
915 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
916 swi_state2str(rpc->srpc_wi.swi_state), status);
919 spin_lock(&srpc_data.rpc_glock);
920 srpc_data.rpc_counters.rpcs_dropped++;
921 spin_unlock(&srpc_data.rpc_glock);
924 if (rpc->srpc_done != NULL)
925 (*rpc->srpc_done) (rpc);
926 LASSERT(rpc->srpc_bulk == NULL);
928 spin_lock(&scd->scd_lock);
930 if (rpc->srpc_reqstbuf != NULL) {
931 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
932 * sv won't go away for scd_rpc_active must not be empty
934 srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
935 rpc->srpc_reqstbuf = NULL;
938 list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
941 * No one can schedule me now since:
942 * - I'm not on scd_rpc_active.
943 * - all LNet events have been fired.
944 * Cancel pending schedules and prevent future schedule attempts:
946 LASSERT(rpc->srpc_ev.ev_fired);
947 swi_exit_workitem(&rpc->srpc_wi);
949 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
950 buffer = list_entry(scd->scd_buf_blocked.next,
951 struct srpc_buffer, buf_list);
952 list_del(&buffer->buf_list);
954 srpc_init_server_rpc(rpc, scd, buffer);
955 list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
956 swi_schedule_workitem(&rpc->srpc_wi);
958 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
961 spin_unlock(&scd->scd_lock);
964 /* handles an incoming RPC */
965 static int srpc_handle_rpc(struct swi_workitem *wi)
967 struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc,
969 struct srpc_service_cd *scd = rpc->srpc_scd;
970 struct srpc_service *sv = scd->scd_svc;
971 struct srpc_event *ev = &rpc->srpc_ev;
974 LASSERT(wi == &rpc->srpc_wi);
976 spin_lock(&scd->scd_lock);
978 if (sv->sv_shuttingdown || rpc->srpc_aborted) {
979 spin_unlock(&scd->scd_lock);
981 if (rpc->srpc_bulk != NULL)
982 LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
983 LNetMDUnlink(rpc->srpc_replymdh);
985 if (ev->ev_fired) { /* no more event, OK to finish */
986 srpc_server_rpc_done(rpc, -ESHUTDOWN);
992 spin_unlock(&scd->scd_lock);
994 switch (wi->swi_state) {
998 case SWI_STATE_NEWBORN: {
999 struct srpc_msg *msg;
1000 struct srpc_generic_reply *reply;
1002 msg = &rpc->srpc_reqstbuf->buf_msg;
1003 reply = &rpc->srpc_replymsg.msg_body.reply;
1005 if (msg->msg_magic == 0) {
1006 /* moaned already in srpc_lnet_ev_handler */
1007 srpc_server_rpc_done(rpc, EBADMSG);
1011 srpc_unpack_msg_hdr(msg);
1012 if (msg->msg_version != SRPC_MSG_VERSION) {
1013 CWARN("Version mismatch: %u, %u expected, from %s\n",
1014 msg->msg_version, SRPC_MSG_VERSION,
1015 libcfs_id2str(rpc->srpc_peer));
1016 reply->status = EPROTO;
1017 /* drop through and send reply */
1020 rc = (*sv->sv_handler)(rpc);
1021 LASSERT(reply->status == 0 || !rpc->srpc_bulk);
1023 srpc_server_rpc_done(rpc, rc);
1028 wi->swi_state = SWI_STATE_BULK_STARTED;
1030 if (rpc->srpc_bulk != NULL) {
1031 rc = srpc_do_bulk(rpc);
1033 return 0; /* wait for bulk */
1035 LASSERT(ev->ev_fired);
1040 case SWI_STATE_BULK_STARTED:
1041 LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
1043 if (rpc->srpc_bulk != NULL) {
1046 if (sv->sv_bulk_ready != NULL)
1047 rc = (*sv->sv_bulk_ready) (rpc, rc);
1050 srpc_server_rpc_done(rpc, rc);
1055 wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
1056 rc = srpc_send_reply(rpc);
1058 return 0; /* wait for reply */
1059 srpc_server_rpc_done(rpc, rc);
1062 case SWI_STATE_REPLY_SUBMITTED:
1063 if (!ev->ev_fired) {
1064 CERROR("RPC %p: bulk %p, service %d\n",
1065 rpc, rpc->srpc_bulk, sv->sv_id);
1066 CERROR("Event: status %d, type %d, lnet %d\n",
1067 ev->ev_status, ev->ev_type, ev->ev_lnet);
1068 LASSERT(ev->ev_fired);
1071 wi->swi_state = SWI_STATE_DONE;
1072 srpc_server_rpc_done(rpc, ev->ev_status);
1080 srpc_client_rpc_expired (void *data)
1082 struct srpc_client_rpc *rpc = data;
1084 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1085 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1088 spin_lock(&rpc->crpc_lock);
1090 rpc->crpc_timeout = 0;
1091 srpc_abort_rpc(rpc, -ETIMEDOUT);
1093 spin_unlock(&rpc->crpc_lock);
1095 spin_lock(&srpc_data.rpc_glock);
1096 srpc_data.rpc_counters.rpcs_expired++;
1097 spin_unlock(&srpc_data.rpc_glock);
1101 srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
1103 struct stt_timer *timer = &rpc->crpc_timer;
1105 if (rpc->crpc_timeout == 0)
1108 INIT_LIST_HEAD(&timer->stt_list);
1109 timer->stt_data = rpc;
1110 timer->stt_func = srpc_client_rpc_expired;
1111 timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
1112 stt_add_timer(timer);
1116 * Called with rpc->crpc_lock held.
1118 * Upon exit the RPC expiry timer is not queued and the handler is not
1119 * running on any CPU.
1122 srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
1124 /* timer not planted or already exploded */
1125 if (rpc->crpc_timeout == 0)
1128 /* timer successfully defused */
1129 if (stt_del_timer(&rpc->crpc_timer))
1132 /* timer detonated, wait for it to explode */
1133 while (rpc->crpc_timeout != 0) {
1134 spin_unlock(&rpc->crpc_lock);
1138 spin_lock(&rpc->crpc_lock);
1143 srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
1145 struct swi_workitem *wi = &rpc->crpc_wi;
1147 LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
1149 spin_lock(&rpc->crpc_lock);
1151 rpc->crpc_closed = 1;
1152 if (rpc->crpc_status == 0)
1153 rpc->crpc_status = status;
1155 srpc_del_client_rpc_timer(rpc);
1157 CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
1158 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1159 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1160 swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
1163 * No one can schedule me now since:
1164 * - RPC timer has been defused.
1165 * - all LNet events have been fired.
1166 * - crpc_closed has been set, preventing srpc_abort_rpc from
1168 * Cancel pending schedules and prevent future schedule attempts:
1170 LASSERT(!srpc_event_pending(rpc));
1171 swi_exit_workitem(wi);
1173 spin_unlock(&rpc->crpc_lock);
1175 (*rpc->crpc_done)(rpc);
1178 /* sends an outgoing RPC */
1180 srpc_send_rpc(struct swi_workitem *wi)
1183 struct srpc_client_rpc *rpc;
1184 struct srpc_msg *reply;
1187 LASSERT(wi != NULL);
1189 rpc = container_of(wi, struct srpc_client_rpc, crpc_wi);
1191 LASSERT(rpc != NULL);
1192 LASSERT(wi == &rpc->crpc_wi);
1194 reply = &rpc->crpc_replymsg;
1195 do_bulk = rpc->crpc_bulk.bk_niov > 0;
1197 spin_lock(&rpc->crpc_lock);
1199 if (rpc->crpc_aborted) {
1200 spin_unlock(&rpc->crpc_lock);
1204 spin_unlock(&rpc->crpc_lock);
1206 switch (wi->swi_state) {
1209 case SWI_STATE_NEWBORN:
1210 LASSERT(!srpc_event_pending(rpc));
1212 rc = srpc_prepare_reply(rpc);
1214 srpc_client_rpc_done(rpc, rc);
1218 rc = srpc_prepare_bulk(rpc);
1222 wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
1223 rc = srpc_send_request(rpc);
1226 case SWI_STATE_REQUEST_SUBMITTED:
1227 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1228 * order; however, they're processed in a strict order:
1229 * rqt, rpy, and bulk.
1231 if (!rpc->crpc_reqstev.ev_fired)
1234 rc = rpc->crpc_reqstev.ev_status;
1238 wi->swi_state = SWI_STATE_REQUEST_SENT;
1240 case SWI_STATE_REQUEST_SENT: {
1241 enum srpc_msg_type type;
1243 type = srpc_service2reply(rpc->crpc_service);
1245 if (!rpc->crpc_replyev.ev_fired)
1248 rc = rpc->crpc_replyev.ev_status;
1252 srpc_unpack_msg_hdr(reply);
1253 if (reply->msg_type != type ||
1254 (reply->msg_magic != SRPC_MSG_MAGIC &&
1255 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1256 CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
1257 libcfs_id2str(rpc->crpc_dest),
1258 reply->msg_type, type,
1259 reply->msg_magic, SRPC_MSG_MAGIC);
1264 if (do_bulk && reply->msg_body.reply.status != 0) {
1265 CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
1266 reply->msg_body.reply.status,
1267 libcfs_id2str(rpc->crpc_dest));
1268 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1271 wi->swi_state = SWI_STATE_REPLY_RECEIVED;
1274 case SWI_STATE_REPLY_RECEIVED:
1275 if (do_bulk && !rpc->crpc_bulkev.ev_fired)
1278 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1280 /* Bulk buffer was unlinked due to remote error. Clear error
1281 * since reply buffer still contains valid data.
1282 * NB rpc->crpc_done shouldn't look into bulk data in case of
1285 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1286 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1289 wi->swi_state = SWI_STATE_DONE;
1290 srpc_client_rpc_done(rpc, rc);
1295 spin_lock(&rpc->crpc_lock);
1296 srpc_abort_rpc(rpc, rc);
1297 spin_unlock(&rpc->crpc_lock);
1301 if (rpc->crpc_aborted) {
1302 LNetMDUnlink(rpc->crpc_reqstmdh);
1303 LNetMDUnlink(rpc->crpc_replymdh);
1304 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1306 if (!srpc_event_pending(rpc)) {
1307 srpc_client_rpc_done(rpc, -EINTR);
1314 struct srpc_client_rpc *
1315 srpc_create_client_rpc(struct lnet_process_id peer, int service,
1316 int nbulkiov, int bulklen,
1317 void (*rpc_done)(struct srpc_client_rpc *),
1318 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
1320 struct srpc_client_rpc *rpc;
1322 LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
1323 crpc_bulk.bk_iovs[nbulkiov]));
1327 srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1328 bulklen, rpc_done, rpc_fini, priv);
1332 /* called with rpc->crpc_lock held */
1334 srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
1338 if (rpc->crpc_aborted || /* already aborted */
1339 rpc->crpc_closed) /* callback imminent */
1343 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1344 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1345 swi_state2str(rpc->crpc_wi.swi_state), why);
1347 rpc->crpc_aborted = 1;
1348 rpc->crpc_status = why;
1349 swi_schedule_workitem(&rpc->crpc_wi);
1352 /* called with rpc->crpc_lock held */
1354 srpc_post_rpc(struct srpc_client_rpc *rpc)
1356 LASSERT(!rpc->crpc_aborted);
1357 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
1359 CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1360 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1363 srpc_add_client_rpc_timer(rpc);
1364 swi_schedule_workitem(&rpc->crpc_wi);
1369 srpc_send_reply(struct srpc_server_rpc *rpc)
1371 struct srpc_event *ev = &rpc->srpc_ev;
1372 struct srpc_msg *msg = &rpc->srpc_replymsg;
1373 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1374 struct srpc_service_cd *scd = rpc->srpc_scd;
1375 struct srpc_service *sv = scd->scd_svc;
1379 LASSERT(buffer != NULL);
1380 rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1382 spin_lock(&scd->scd_lock);
1384 if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
1385 /* Repost buffer before replying since test client
1386 * might send me another RPC once it gets the reply
1388 if (srpc_service_post_buffer(scd, buffer) != 0)
1389 CWARN("Failed to repost %s buffer\n", sv->sv_name);
1390 rpc->srpc_reqstbuf = NULL;
1393 spin_unlock(&scd->scd_lock);
1397 ev->ev_type = SRPC_REPLY_SENT;
1399 msg->msg_magic = SRPC_MSG_MAGIC;
1400 msg->msg_version = SRPC_MSG_VERSION;
1401 msg->msg_type = srpc_service2reply(sv->sv_id);
1403 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1404 sizeof(*msg), LNET_MD_OP_PUT,
1405 rpc->srpc_peer, rpc->srpc_self,
1406 &rpc->srpc_replymdh, ev);
1408 ev->ev_fired = 1; /* no more event expected */
1412 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1414 srpc_lnet_ev_handler(struct lnet_event *ev)
1416 struct srpc_service_cd *scd;
1417 struct srpc_event *rpcev = ev->md_user_ptr;
1418 struct srpc_client_rpc *crpc;
1419 struct srpc_server_rpc *srpc;
1420 struct srpc_buffer *buffer;
1421 struct srpc_service *sv;
1422 struct srpc_msg *msg;
1423 enum srpc_msg_type type;
1425 LASSERT(!in_interrupt());
1427 if (ev->status != 0) {
1430 spin_lock(&srpc_data.rpc_glock);
1431 if (ev->status != -ECANCELED) /* cancellation is not error */
1432 srpc_data.rpc_counters.errors++;
1433 errors = srpc_data.rpc_counters.errors;
1434 spin_unlock(&srpc_data.rpc_glock);
1436 CNETERR("LNet event status %d type %d, RPC errors %u\n",
1437 ev->status, ev->type, errors);
1440 rpcev->ev_lnet = ev->type;
1442 switch (rpcev->ev_type) {
1444 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1445 rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1448 case SRPC_REQUEST_SENT:
1449 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1450 spin_lock(&srpc_data.rpc_glock);
1451 srpc_data.rpc_counters.rpcs_sent++;
1452 spin_unlock(&srpc_data.rpc_glock);
1455 case SRPC_REPLY_RCVD:
1456 case SRPC_BULK_REQ_RCVD:
1457 crpc = rpcev->ev_data;
1459 if (rpcev != &crpc->crpc_reqstev &&
1460 rpcev != &crpc->crpc_replyev &&
1461 rpcev != &crpc->crpc_bulkev) {
1462 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1463 rpcev, crpc, &crpc->crpc_reqstev,
1464 &crpc->crpc_replyev, &crpc->crpc_bulkev);
1465 CERROR("Bad event: status %d, type %d, lnet %d\n",
1466 rpcev->ev_status, rpcev->ev_type,
1471 spin_lock(&crpc->crpc_lock);
1473 LASSERT(rpcev->ev_fired == 0);
1474 rpcev->ev_fired = 1;
1475 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1476 -EINTR : ev->status;
1477 swi_schedule_workitem(&crpc->crpc_wi);
1479 spin_unlock(&crpc->crpc_lock);
1482 case SRPC_REQUEST_RCVD:
1483 scd = rpcev->ev_data;
1486 LASSERT(rpcev == &scd->scd_ev);
1488 spin_lock(&scd->scd_lock);
1490 LASSERT(ev->unlinked);
1491 LASSERT(ev->type == LNET_EVENT_PUT ||
1492 ev->type == LNET_EVENT_UNLINK);
1493 LASSERT(ev->type != LNET_EVENT_UNLINK ||
1494 sv->sv_shuttingdown);
1496 buffer = container_of(ev->md_start, struct srpc_buffer,
1498 buffer->buf_peer = lnet_pid_to_pid4(&ev->source);
1499 buffer->buf_self = lnet_nid_to_nid4(&ev->target.nid);
1501 LASSERT(scd->scd_buf_nposted > 0);
1502 scd->scd_buf_nposted--;
1504 if (sv->sv_shuttingdown) {
1505 /* Leave buffer on scd->scd_buf_nposted since
1506 * srpc_finish_service needs to traverse it.
1508 spin_unlock(&scd->scd_lock);
1512 if (scd->scd_buf_err_stamp != 0 &&
1513 scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
1514 /* re-enable adding buffer */
1515 scd->scd_buf_err_stamp = 0;
1516 scd->scd_buf_err = 0;
1519 if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
1520 scd->scd_buf_adjust == 0 &&
1521 scd->scd_buf_nposted < scd->scd_buf_low) {
1522 scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
1524 swi_schedule_workitem(&scd->scd_buf_wi);
1527 list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
1528 msg = &buffer->buf_msg;
1529 type = srpc_service2request(sv->sv_id);
1531 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1532 (msg->msg_type != type &&
1533 msg->msg_type != __swab32(type)) ||
1534 (msg->msg_magic != SRPC_MSG_MAGIC &&
1535 msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1536 CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
1537 sv->sv_name, libcfs_idstr(&ev->initiator),
1538 ev->status, ev->mlength,
1539 msg->msg_type, msg->msg_magic);
1541 /* NB can't call srpc_service_recycle_buffer here since
1542 * it may call LNetM[DE]Attach. The invalid magic tells
1543 * srpc_handle_rpc to drop this RPC
1548 if (!list_empty(&scd->scd_rpc_free)) {
1549 srpc = list_entry(scd->scd_rpc_free.next,
1550 struct srpc_server_rpc,
1552 list_del(&srpc->srpc_list);
1554 srpc_init_server_rpc(srpc, scd, buffer);
1555 list_add_tail(&srpc->srpc_list,
1556 &scd->scd_rpc_active);
1557 swi_schedule_workitem(&srpc->srpc_wi);
1559 list_add_tail(&buffer->buf_list,
1560 &scd->scd_buf_blocked);
1563 spin_unlock(&scd->scd_lock);
1565 spin_lock(&srpc_data.rpc_glock);
1566 srpc_data.rpc_counters.rpcs_rcvd++;
1567 spin_unlock(&srpc_data.rpc_glock);
1570 case SRPC_BULK_GET_RPLD:
1571 LASSERT(ev->type == LNET_EVENT_SEND ||
1572 ev->type == LNET_EVENT_REPLY ||
1573 ev->type == LNET_EVENT_UNLINK);
1576 break; /* wait for final event */
1578 case SRPC_BULK_PUT_SENT:
1579 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1580 spin_lock(&srpc_data.rpc_glock);
1582 if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1583 srpc_data.rpc_counters.bulk_get += ev->mlength;
1585 srpc_data.rpc_counters.bulk_put += ev->mlength;
1587 spin_unlock(&srpc_data.rpc_glock);
1590 case SRPC_REPLY_SENT:
1591 srpc = rpcev->ev_data;
1592 scd = srpc->srpc_scd;
1594 LASSERT(rpcev == &srpc->srpc_ev);
1596 spin_lock(&scd->scd_lock);
1598 rpcev->ev_fired = 1;
1599 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1600 -EINTR : ev->status;
1601 swi_schedule_workitem(&srpc->srpc_wi);
1603 spin_unlock(&scd->scd_lock);
1614 memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1615 spin_lock_init(&srpc_data.rpc_glock);
1617 /* 1 second pause to avoid timestamp reuse */
1618 schedule_timeout_uninterruptible(cfs_time_seconds(1));
1619 srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48;
1621 srpc_data.rpc_state = SRPC_STATE_NONE;
1623 rc = LNetNIInit(LNET_PID_LUSTRE);
1625 CERROR("LNetNIInit() has failed: %d\n", rc);
1629 srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1631 srpc_data.rpc_lnet_handler = srpc_lnet_ev_handler;
1633 rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1635 rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
1638 srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1645 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1651 srpc_shutdown (void)
1657 state = srpc_data.rpc_state;
1658 srpc_data.rpc_state = SRPC_STATE_STOPPING;
1664 case SRPC_STATE_RUNNING:
1665 spin_lock(&srpc_data.rpc_glock);
1667 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1668 struct srpc_service *sv = srpc_data.rpc_services[i];
1670 LASSERTF(sv == NULL,
1671 "service not empty: id %d, name %s\n",
1675 spin_unlock(&srpc_data.rpc_glock);
1680 case SRPC_STATE_EQ_INIT:
1681 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1682 rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
1684 lnet_assert_handler_unused(srpc_data.rpc_lnet_handler);
1687 case SRPC_STATE_NI_INIT: