4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
33 * Author: Isaac Huang <isaac@clusterfs.com>
35 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
36 * - percpt data for service to improve smp performance
40 #define DEBUG_SUBSYSTEM S_LNET
67 static struct smoketest_rpc {
68 spinlock_t rpc_glock; /* global lock */
69 struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
70 lnet_handler_t rpc_lnet_handler;/* _the_ LNet event handler */
71 enum srpc_state rpc_state;
72 atomic_t rpc_counters32[SRPC_COUNTER32_MAX];
73 atomic64_t rpc_counters64[SRPC_COUNTER64_MAX];
74 atomic64_t rpc_matchbits; /* matchbits counter */
78 #define RPC_STAT32(a) \
79 srpc_data.rpc_counters32[(a)]
81 #define GET_RPC_STAT32(a) \
82 atomic_read(&srpc_data.rpc_counters32[(a)])
84 #define GET_RPC_STAT64(a) \
85 atomic64_read(&srpc_data.rpc_counters64[(a)])
88 srpc_serv_portal(int svc_id)
90 return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
91 SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
95 static int srpc_handle_rpc(struct swi_workitem *wi);
98 void srpc_get_counters(struct srpc_counters *cnt)
100 cnt->errors = GET_RPC_STAT32(SRPC_ERROR);
101 cnt->rpcs_sent = GET_RPC_STAT32(SRPC_RPC_SENT);
102 cnt->rpcs_rcvd = GET_RPC_STAT32(SRPC_RPC_RCVD);
103 cnt->rpcs_dropped = GET_RPC_STAT32(SRPC_RPC_DROP);
104 cnt->rpcs_expired = GET_RPC_STAT32(SRPC_RPC_EXPIRED);
106 cnt->bulk_get = GET_RPC_STAT64(SRPC_BULK_GET);
107 cnt->bulk_put = GET_RPC_STAT64(SRPC_BULK_PUT);
111 srpc_init_bulk_page(struct srpc_bulk *bk, int i, int off, int nob)
113 LASSERT(off < PAGE_SIZE);
114 LASSERT(nob > 0 && nob <= PAGE_SIZE);
116 bk->bk_iovs[i].bv_offset = off;
117 bk->bk_iovs[i].bv_len = nob;
122 srpc_free_bulk(struct srpc_bulk *bk)
129 for (i = 0; i < bk->bk_niov; i++) {
130 pg = bk->bk_iovs[i].bv_page;
137 LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
141 srpc_alloc_bulk(int cpt, unsigned int bulk_npg)
143 struct srpc_bulk *bk;
146 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
148 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
149 offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
151 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
155 memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
156 bk->bk_niov = bulk_npg;
158 for (i = 0; i < bulk_npg; i++) {
161 pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
163 CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
167 bk->bk_iovs[i].bv_page = pg;
174 srpc_init_bulk(struct srpc_bulk *bk, unsigned int bulk_off,
175 unsigned int bulk_npg, unsigned int bulk_len, int sink)
180 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
183 bk->bk_len = bulk_len;
184 bk->bk_niov = bulk_npg;
186 for (i = 0; i < bulk_npg && bulk_len > 0; i++) {
189 LASSERT(bk->bk_iovs[i].bv_page != NULL);
191 nob = min_t(unsigned, bulk_off + bulk_len, PAGE_SIZE) -
194 srpc_init_bulk_page(bk, i, bulk_off, nob);
203 return atomic64_inc_return(&srpc_data.rpc_matchbits);
207 srpc_init_server_rpc(struct srpc_server_rpc *rpc,
208 struct srpc_service_cd *scd,
209 struct srpc_buffer *buffer)
211 swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
212 srpc_serv_is_framework(scd->scd_svc) ?
213 lst_sched_serial : lst_sched_test[scd->scd_cpt]);
215 rpc->srpc_ev.ev_fired = 1; /* no event expected now */
218 rpc->srpc_reqstbuf = buffer;
219 rpc->srpc_peer = buffer->buf_peer;
220 rpc->srpc_self = buffer->buf_self;
221 LNetInvalidateMDHandle(&rpc->srpc_replymdh);
223 rpc->srpc_aborted = 0;
224 rpc->srpc_status = 0;
228 srpc_service_fini(struct srpc_service *svc)
230 struct srpc_service_cd *scd;
231 struct srpc_server_rpc *rpc;
232 struct srpc_buffer *buf;
236 if (svc->sv_cpt_data == NULL)
239 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
241 if (!list_empty(&scd->scd_buf_posted))
242 q = &scd->scd_buf_posted;
243 else if (!list_empty(&scd->scd_buf_blocked))
244 q = &scd->scd_buf_blocked;
248 while (!list_empty(q)) {
249 buf = list_entry(q->next,
252 list_del(&buf->buf_list);
253 LIBCFS_FREE(buf, sizeof(*buf));
257 LASSERT(list_empty(&scd->scd_rpc_active));
259 while (!list_empty(&scd->scd_rpc_free)) {
260 rpc = list_entry(scd->scd_rpc_free.next,
261 struct srpc_server_rpc,
263 list_del(&rpc->srpc_list);
264 if (svc->sv_srpc_fini)
265 svc->sv_srpc_fini(rpc);
266 LIBCFS_FREE(rpc, sizeof(*rpc));
270 cfs_percpt_free(svc->sv_cpt_data);
271 svc->sv_cpt_data = NULL;
275 srpc_service_nrpcs(struct srpc_service *svc)
277 int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
279 return srpc_serv_is_framework(svc) ?
280 max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
283 int srpc_add_buffer(struct swi_workitem *wi);
286 srpc_service_init(struct srpc_service *svc)
288 struct srpc_service_cd *scd;
289 struct srpc_server_rpc *rpc;
294 svc->sv_shuttingdown = 0;
296 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
297 sizeof(struct srpc_service_cd));
298 if (svc->sv_cpt_data == NULL)
301 svc->sv_ncpts = srpc_serv_is_framework(svc) ?
302 1 : cfs_cpt_number(lnet_cpt_table());
303 nrpcs = srpc_service_nrpcs(svc);
305 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
308 spin_lock_init(&scd->scd_lock);
309 INIT_LIST_HEAD(&scd->scd_rpc_free);
310 INIT_LIST_HEAD(&scd->scd_rpc_active);
311 INIT_LIST_HEAD(&scd->scd_buf_posted);
312 INIT_LIST_HEAD(&scd->scd_buf_blocked);
314 scd->scd_ev.ev_data = scd;
315 scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
317 /* NB: don't use lst_sched_serial for adding buffer,
318 * see details in srpc_service_add_buffers() */
319 swi_init_workitem(&scd->scd_buf_wi,
320 srpc_add_buffer, lst_sched_test[i]);
322 if (i != 0 && srpc_serv_is_framework(svc)) {
323 /* NB: framework service only needs srpc_service_cd for
324 * one partition, but we allocate for all to make
325 * it easier to implement, it will waste a little
326 * memory but nobody should care about this */
330 for (j = 0; j < nrpcs; j++) {
331 LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
334 (svc->sv_srpc_init && svc->sv_srpc_init(rpc, i))) {
335 srpc_service_fini(svc);
338 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
346 srpc_add_service(struct srpc_service *sv)
350 LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
352 if (srpc_service_init(sv) != 0)
355 spin_lock(&srpc_data.rpc_glock);
357 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
359 if (srpc_data.rpc_services[id] != NULL) {
360 spin_unlock(&srpc_data.rpc_glock);
364 srpc_data.rpc_services[id] = sv;
365 spin_unlock(&srpc_data.rpc_glock);
367 CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
371 srpc_service_fini(sv);
376 srpc_remove_service(struct srpc_service *sv)
380 spin_lock(&srpc_data.rpc_glock);
382 if (srpc_data.rpc_services[id] != sv) {
383 spin_unlock(&srpc_data.rpc_glock);
387 srpc_data.rpc_services[id] = NULL;
388 spin_unlock(&srpc_data.rpc_glock);
393 srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
394 int len, int options, struct lnet_process_id peer4,
395 struct lnet_handle_md *mdh, struct srpc_event *ev)
400 struct lnet_processid peer;
402 peer.pid = peer4.pid;
403 lnet_nid4_to_nid(peer4.nid, &peer.nid);
405 me = LNetMEAttach(portal, &peer, matchbits, 0, LNET_UNLINK,
406 local ? LNET_INS_LOCAL : LNET_INS_AFTER);
409 CERROR("LNetMEAttach failed: %d\n", rc);
410 LASSERT(rc == -ENOMEM);
418 md.options = options;
419 md.handler = srpc_data.rpc_lnet_handler;
421 rc = LNetMDAttach(me, &md, LNET_UNLINK, mdh);
423 CERROR("LNetMDAttach failed: %d\n", rc);
424 LASSERT(rc == -ENOMEM);
430 "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
431 libcfs_id2str(peer4), portal, matchbits);
436 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
437 int options, struct lnet_process_id peer4,
438 lnet_nid_t self4, struct lnet_handle_md *mdh,
439 struct srpc_event *ev)
443 struct lnet_nid self;
444 struct lnet_processid peer;
446 lnet_nid4_to_nid(self4, &self);
447 lnet_pid4_to_pid(peer4, &peer);
452 md.handler = srpc_data.rpc_lnet_handler;
453 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
454 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
456 rc = LNetMDBind(&md, LNET_UNLINK, mdh);
458 CERROR("LNetMDBind failed: %d\n", rc);
459 LASSERT(rc == -ENOMEM);
463 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
464 * they're only meaningful for MDs attached to an ME (i.e. passive
467 if ((options & LNET_MD_OP_PUT) != 0) {
468 rc = LNetPut(&self, *mdh, LNET_NOACK_REQ, &peer,
469 portal, matchbits, 0, 0);
471 LASSERT((options & LNET_MD_OP_GET) != 0);
473 rc = LNetGet(&self, *mdh, &peer, portal, matchbits, 0, false);
477 CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
478 ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
479 libcfs_id2str(peer4), portal, matchbits, rc);
481 /* The forthcoming unlink event will complete this operation
482 * with failure, so fall through and return success here.
484 rc = LNetMDUnlink(*mdh);
488 "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
489 libcfs_id2str(peer4), portal, matchbits);
495 srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
496 struct lnet_handle_md *mdh, struct srpc_event *ev)
498 struct lnet_process_id any = {0};
500 any.nid = LNET_NID_ANY;
501 any.pid = LNET_PID_ANY;
503 return srpc_post_passive_rdma(srpc_serv_portal(service),
504 local, service, buf, len,
505 LNET_MD_OP_PUT, any, mdh, ev);
509 srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
510 __must_hold(&scd->scd_lock)
512 struct srpc_service *sv = scd->scd_svc;
513 struct srpc_msg *msg = &buf->buf_msg;
516 LNetInvalidateMDHandle(&buf->buf_mdh);
517 list_add(&buf->buf_list, &scd->scd_buf_posted);
518 scd->scd_buf_nposted++;
519 spin_unlock(&scd->scd_lock);
521 rc = srpc_post_passive_rqtbuf(sv->sv_id,
522 !srpc_serv_is_framework(sv),
523 msg, sizeof(*msg), &buf->buf_mdh,
526 /* At this point, a RPC (new or delayed) may have arrived in
527 * msg and its event handler has been called. So we must add
528 * buf to scd_buf_posted _before_ dropping scd_lock */
530 spin_lock(&scd->scd_lock);
533 if (!sv->sv_shuttingdown)
536 spin_unlock(&scd->scd_lock);
537 /* srpc_shutdown_service might have tried to unlink me
538 * when my buf_mdh was still invalid */
539 LNetMDUnlink(buf->buf_mdh);
540 spin_lock(&scd->scd_lock);
544 scd->scd_buf_nposted--;
545 if (sv->sv_shuttingdown)
546 return rc; /* don't allow to change scd_buf_posted */
548 list_del(&buf->buf_list);
549 spin_unlock(&scd->scd_lock);
551 LIBCFS_FREE(buf, sizeof(*buf));
553 spin_lock(&scd->scd_lock);
558 srpc_add_buffer(struct swi_workitem *wi)
560 struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd,
562 struct srpc_buffer *buf;
565 /* it's called by workitem scheduler threads, these threads
566 * should have been set CPT affinity, so buffers will be posted
567 * on CPT local list of Portal */
568 spin_lock(&scd->scd_lock);
570 while (scd->scd_buf_adjust > 0 &&
571 !scd->scd_svc->sv_shuttingdown) {
572 scd->scd_buf_adjust--; /* consume it */
573 scd->scd_buf_posting++;
575 spin_unlock(&scd->scd_lock);
577 LIBCFS_ALLOC(buf, sizeof(*buf));
579 CERROR("Failed to add new buf to service: %s\n",
580 scd->scd_svc->sv_name);
581 spin_lock(&scd->scd_lock);
586 spin_lock(&scd->scd_lock);
587 if (scd->scd_svc->sv_shuttingdown) {
588 spin_unlock(&scd->scd_lock);
589 LIBCFS_FREE(buf, sizeof(*buf));
591 spin_lock(&scd->scd_lock);
596 rc = srpc_service_post_buffer(scd, buf);
598 break; /* buf has been freed inside */
600 LASSERT(scd->scd_buf_posting > 0);
601 scd->scd_buf_posting--;
602 scd->scd_buf_total++;
603 scd->scd_buf_low = max(2, scd->scd_buf_total / 4);
607 scd->scd_buf_err_stamp = ktime_get_real_seconds();
608 scd->scd_buf_err = rc;
610 LASSERT(scd->scd_buf_posting > 0);
611 scd->scd_buf_posting--;
614 spin_unlock(&scd->scd_lock);
619 srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
621 struct srpc_service_cd *scd;
625 LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
627 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
628 spin_lock(&scd->scd_lock);
630 scd->scd_buf_err = 0;
631 scd->scd_buf_err_stamp = 0;
632 scd->scd_buf_posting = 0;
633 scd->scd_buf_adjust = nbuffer;
634 /* start to post buffers */
635 swi_schedule_workitem(&scd->scd_buf_wi);
636 spin_unlock(&scd->scd_lock);
638 /* framework service only post buffer for one partition */
639 if (srpc_serv_is_framework(sv))
643 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
644 spin_lock(&scd->scd_lock);
646 * NB: srpc_service_add_buffers() can be called inside
647 * thread context of lst_sched_serial, and we don't normally
648 * allow to sleep inside thread context of WI scheduler
649 * because it will block current scheduler thread from doing
650 * anything else, even worse, it could deadlock if it's
651 * waiting on result from another WI of the same scheduler.
652 * However, it's safe at here because scd_buf_wi is scheduled
653 * by thread in a different WI scheduler (lst_sched_test),
654 * so we don't have any risk of deadlock, though this could
655 * block all WIs pending on lst_sched_serial for a moment
656 * which is not good but not fatal.
658 lst_wait_until(scd->scd_buf_err != 0 ||
659 (scd->scd_buf_adjust == 0 &&
660 scd->scd_buf_posting == 0),
661 scd->scd_lock, "waiting for adding buffer\n");
663 if (scd->scd_buf_err != 0 && rc == 0)
664 rc = scd->scd_buf_err;
666 spin_unlock(&scd->scd_lock);
673 srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
675 struct srpc_service_cd *scd;
679 LASSERT(!sv->sv_shuttingdown);
681 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
682 spin_lock(&scd->scd_lock);
684 num = scd->scd_buf_total + scd->scd_buf_posting;
685 scd->scd_buf_adjust -= min(nbuffer, num);
687 spin_unlock(&scd->scd_lock);
691 /* returns 1 if sv has finished, otherwise 0 */
693 srpc_finish_service(struct srpc_service *sv)
695 struct srpc_service_cd *scd;
696 struct srpc_server_rpc *rpc;
699 LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
701 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
702 spin_lock(&scd->scd_lock);
703 if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
704 spin_unlock(&scd->scd_lock);
708 if (scd->scd_buf_nposted > 0) {
709 CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
710 scd->scd_buf_nposted);
711 spin_unlock(&scd->scd_lock);
715 if (list_empty(&scd->scd_rpc_active)) {
716 spin_unlock(&scd->scd_lock);
720 rpc = list_entry(scd->scd_rpc_active.next,
721 struct srpc_server_rpc, srpc_list);
722 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
723 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
724 swi_state2str(rpc->srpc_wi.swi_state),
725 rpc->srpc_wi.swi_workitem.wi_scheduled,
726 rpc->srpc_wi.swi_workitem.wi_running,
727 rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
728 rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
729 spin_unlock(&scd->scd_lock);
733 /* no lock needed from now on */
734 srpc_service_fini(sv);
738 /* called with sv->sv_lock held */
740 srpc_service_recycle_buffer(struct srpc_service_cd *scd,
741 struct srpc_buffer *buf)
742 __must_hold(&scd->scd_lock)
744 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
745 if (srpc_service_post_buffer(scd, buf) != 0) {
746 CWARN("Failed to post %s buffer\n",
747 scd->scd_svc->sv_name);
752 /* service is shutting down, or we want to recycle some buffers */
753 scd->scd_buf_total--;
755 if (scd->scd_buf_adjust < 0) {
756 scd->scd_buf_adjust++;
757 if (scd->scd_buf_adjust < 0 &&
758 scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
760 "Try to recyle %d buffers but nothing left\n",
761 scd->scd_buf_adjust);
762 scd->scd_buf_adjust = 0;
766 spin_unlock(&scd->scd_lock);
767 LIBCFS_FREE(buf, sizeof(*buf));
768 spin_lock(&scd->scd_lock);
772 srpc_abort_service(struct srpc_service *sv)
774 struct srpc_service_cd *scd;
775 struct srpc_server_rpc *rpc;
778 CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
779 sv->sv_id, sv->sv_name);
781 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
782 spin_lock(&scd->scd_lock);
784 /* schedule in-flight RPCs to notice the abort, NB:
785 * racing with incoming RPCs; complete fix should make test
786 * RPCs carry session ID in its headers
788 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
789 rpc->srpc_aborted = 1;
790 swi_schedule_workitem(&rpc->srpc_wi);
793 spin_unlock(&scd->scd_lock);
798 srpc_shutdown_service(struct srpc_service *sv)
800 struct srpc_service_cd *scd;
801 struct srpc_server_rpc *rpc;
802 struct srpc_buffer *buf;
805 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
806 sv->sv_id, sv->sv_name);
808 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
809 spin_lock(&scd->scd_lock);
811 sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
813 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
814 spin_unlock(&scd->scd_lock);
816 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
817 spin_lock(&scd->scd_lock);
819 /* schedule in-flight RPCs to notice the shutdown */
820 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
821 swi_schedule_workitem(&rpc->srpc_wi);
823 spin_unlock(&scd->scd_lock);
825 /* OK to traverse scd_buf_posted without lock, since no one
826 * touches scd_buf_posted now
828 list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
829 LNetMDUnlink(buf->buf_mdh);
834 srpc_send_request(struct srpc_client_rpc *rpc)
836 struct srpc_event *ev = &rpc->crpc_reqstev;
841 ev->ev_type = SRPC_REQUEST_SENT;
843 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
844 rpc->crpc_service, &rpc->crpc_reqstmsg,
845 sizeof(struct srpc_msg), LNET_MD_OP_PUT,
846 rpc->crpc_dest, LNET_NID_ANY,
847 &rpc->crpc_reqstmdh, ev);
849 LASSERT(rc == -ENOMEM);
850 ev->ev_fired = 1; /* no more event expected */
856 srpc_prepare_reply(struct srpc_client_rpc *rpc)
858 struct srpc_event *ev = &rpc->crpc_replyev;
859 u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
864 ev->ev_type = SRPC_REPLY_RCVD;
866 *id = srpc_next_id();
868 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
870 sizeof(struct srpc_msg),
871 LNET_MD_OP_PUT, rpc->crpc_dest,
872 &rpc->crpc_replymdh, ev);
874 LASSERT(rc == -ENOMEM);
875 ev->ev_fired = 1; /* no more event expected */
881 srpc_prepare_bulk(struct srpc_client_rpc *rpc)
883 struct srpc_bulk *bk = &rpc->crpc_bulk;
884 struct srpc_event *ev = &rpc->crpc_bulkev;
885 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
889 LASSERT(bk->bk_niov <= LNET_MAX_IOV);
892 if (bk->bk_niov == 0)
895 opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
900 ev->ev_type = SRPC_BULK_REQ_RCVD;
902 *id = srpc_next_id();
904 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
905 &bk->bk_iovs[0], bk->bk_niov, opt,
906 rpc->crpc_dest, &bk->bk_mdh, ev);
908 LASSERT(rc == -ENOMEM);
909 ev->ev_fired = 1; /* no more event expected */
915 srpc_do_bulk(struct srpc_server_rpc *rpc)
917 struct srpc_event *ev = &rpc->srpc_ev;
918 struct srpc_bulk *bk = rpc->srpc_bulk;
919 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
925 opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
930 ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
932 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
933 &bk->bk_iovs[0], bk->bk_niov, opt,
934 rpc->srpc_peer, rpc->srpc_self,
937 ev->ev_fired = 1; /* no more event expected */
941 /* only called from srpc_handle_rpc */
943 srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
945 struct srpc_service_cd *scd = rpc->srpc_scd;
946 struct srpc_service *sv = scd->scd_svc;
947 struct srpc_buffer *buffer;
949 LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
951 rpc->srpc_status = status;
953 CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
954 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
955 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
956 swi_state2str(rpc->srpc_wi.swi_state), status);
959 atomic_inc(&RPC_STAT32(SRPC_RPC_DROP));
961 if (rpc->srpc_done != NULL)
962 (*rpc->srpc_done) (rpc);
964 spin_lock(&scd->scd_lock);
966 if (rpc->srpc_reqstbuf != NULL) {
967 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
968 * sv won't go away for scd_rpc_active must not be empty
970 srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
971 rpc->srpc_reqstbuf = NULL;
974 list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
977 * No one can schedule me now since:
978 * - I'm not on scd_rpc_active.
979 * - all LNet events have been fired.
980 * Cancel pending schedules and prevent future schedule attempts:
982 LASSERT(rpc->srpc_ev.ev_fired);
983 swi_exit_workitem(&rpc->srpc_wi);
985 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
986 buffer = list_entry(scd->scd_buf_blocked.next,
987 struct srpc_buffer, buf_list);
988 list_del(&buffer->buf_list);
990 srpc_init_server_rpc(rpc, scd, buffer);
991 list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
992 swi_schedule_workitem(&rpc->srpc_wi);
994 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
997 spin_unlock(&scd->scd_lock);
1000 /* handles an incoming RPC */
1001 static int srpc_handle_rpc(struct swi_workitem *wi)
1003 struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc,
1005 struct srpc_service_cd *scd = rpc->srpc_scd;
1006 struct srpc_service *sv = scd->scd_svc;
1007 struct srpc_event *ev = &rpc->srpc_ev;
1010 LASSERT(wi == &rpc->srpc_wi);
1012 spin_lock(&scd->scd_lock);
1014 if (sv->sv_shuttingdown || rpc->srpc_aborted) {
1015 spin_unlock(&scd->scd_lock);
1017 if (rpc->srpc_bulk != NULL)
1018 LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
1019 LNetMDUnlink(rpc->srpc_replymdh);
1021 if (ev->ev_fired) { /* no more event, OK to finish */
1022 srpc_server_rpc_done(rpc, -ESHUTDOWN);
1028 spin_unlock(&scd->scd_lock);
1030 switch (wi->swi_state) {
1034 case SWI_STATE_NEWBORN: {
1035 struct srpc_msg *msg;
1036 struct srpc_generic_reply *reply;
1038 msg = &rpc->srpc_reqstbuf->buf_msg;
1039 reply = &rpc->srpc_replymsg.msg_body.reply;
1041 if (msg->msg_magic == 0) {
1042 /* moaned already in srpc_lnet_ev_handler */
1043 srpc_server_rpc_done(rpc, EBADMSG);
1047 srpc_unpack_msg_hdr(msg);
1048 if (msg->msg_version != SRPC_MSG_VERSION) {
1049 CWARN("Version mismatch: %u, %u expected, from %s\n",
1050 msg->msg_version, SRPC_MSG_VERSION,
1051 libcfs_id2str(rpc->srpc_peer));
1052 reply->status = EPROTO;
1053 /* drop through and send reply */
1056 rc = (*sv->sv_handler)(rpc);
1057 LASSERT(reply->status == 0 || !rpc->srpc_bulk);
1059 srpc_server_rpc_done(rpc, rc);
1064 wi->swi_state = SWI_STATE_BULK_STARTED;
1066 if (rpc->srpc_bulk != NULL) {
1067 rc = srpc_do_bulk(rpc);
1069 return 0; /* wait for bulk */
1071 LASSERT(ev->ev_fired);
1076 case SWI_STATE_BULK_STARTED:
1077 LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
1079 if (rpc->srpc_bulk != NULL) {
1082 if (sv->sv_bulk_ready != NULL)
1083 rc = (*sv->sv_bulk_ready) (rpc, rc);
1086 srpc_server_rpc_done(rpc, rc);
1091 wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
1092 rc = srpc_send_reply(rpc);
1094 return 0; /* wait for reply */
1095 srpc_server_rpc_done(rpc, rc);
1098 case SWI_STATE_REPLY_SUBMITTED:
1099 if (!ev->ev_fired) {
1100 CERROR("RPC %p: bulk %p, service %d\n",
1101 rpc, rpc->srpc_bulk, sv->sv_id);
1102 CERROR("Event: status %d, type %d, lnet %d\n",
1103 ev->ev_status, ev->ev_type, ev->ev_lnet);
1104 LASSERT(ev->ev_fired);
1107 wi->swi_state = SWI_STATE_DONE;
1108 srpc_server_rpc_done(rpc, ev->ev_status);
1116 srpc_client_rpc_expired (void *data)
1118 struct srpc_client_rpc *rpc = data;
1120 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1121 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1124 spin_lock(&rpc->crpc_lock);
1126 rpc->crpc_timeout = 0;
1127 srpc_abort_rpc(rpc, -ETIMEDOUT);
1129 spin_unlock(&rpc->crpc_lock);
1131 atomic_inc(&RPC_STAT32(SRPC_RPC_EXPIRED));
1135 srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
1137 struct stt_timer *timer = &rpc->crpc_timer;
1139 if (rpc->crpc_timeout == 0)
1142 INIT_LIST_HEAD(&timer->stt_list);
1143 timer->stt_data = rpc;
1144 timer->stt_func = srpc_client_rpc_expired;
1145 timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
1146 stt_add_timer(timer);
1150 * Called with rpc->crpc_lock held.
1152 * Upon exit the RPC expiry timer is not queued and the handler is not
1153 * running on any CPU.
1156 srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
1158 /* timer not planted or already exploded */
1159 if (rpc->crpc_timeout == 0)
1162 /* timer successfully defused */
1163 if (stt_del_timer(&rpc->crpc_timer))
1166 /* timer detonated, wait for it to explode */
1167 while (rpc->crpc_timeout != 0) {
1168 spin_unlock(&rpc->crpc_lock);
1172 spin_lock(&rpc->crpc_lock);
1177 srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
1179 struct swi_workitem *wi = &rpc->crpc_wi;
1181 LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
1183 spin_lock(&rpc->crpc_lock);
1185 rpc->crpc_closed = 1;
1186 if (rpc->crpc_status == 0)
1187 rpc->crpc_status = status;
1189 srpc_del_client_rpc_timer(rpc);
1191 CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
1192 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1193 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1194 swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
1197 * No one can schedule me now since:
1198 * - RPC timer has been defused.
1199 * - all LNet events have been fired.
1200 * - crpc_closed has been set, preventing srpc_abort_rpc from
1202 * Cancel pending schedules and prevent future schedule attempts:
1204 LASSERT(!srpc_event_pending(rpc));
1205 swi_exit_workitem(wi);
1207 spin_unlock(&rpc->crpc_lock);
1209 (*rpc->crpc_done)(rpc);
1212 /* sends an outgoing RPC */
1214 srpc_send_rpc(struct swi_workitem *wi)
1217 struct srpc_client_rpc *rpc;
1218 struct srpc_msg *reply;
1221 LASSERT(wi != NULL);
1223 rpc = container_of(wi, struct srpc_client_rpc, crpc_wi);
1225 LASSERT(rpc != NULL);
1226 LASSERT(wi == &rpc->crpc_wi);
1228 reply = &rpc->crpc_replymsg;
1229 do_bulk = rpc->crpc_bulk.bk_niov > 0;
1231 spin_lock(&rpc->crpc_lock);
1233 if (rpc->crpc_aborted) {
1234 spin_unlock(&rpc->crpc_lock);
1238 spin_unlock(&rpc->crpc_lock);
1240 switch (wi->swi_state) {
1243 case SWI_STATE_NEWBORN:
1244 LASSERT(!srpc_event_pending(rpc));
1246 rc = srpc_prepare_reply(rpc);
1248 srpc_client_rpc_done(rpc, rc);
1252 rc = srpc_prepare_bulk(rpc);
1256 wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
1257 rc = srpc_send_request(rpc);
1260 case SWI_STATE_REQUEST_SUBMITTED:
1261 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1262 * order; however, they're processed in a strict order:
1263 * rqt, rpy, and bulk.
1265 if (!rpc->crpc_reqstev.ev_fired)
1268 rc = rpc->crpc_reqstev.ev_status;
1272 wi->swi_state = SWI_STATE_REQUEST_SENT;
1274 case SWI_STATE_REQUEST_SENT: {
1275 enum srpc_msg_type type;
1277 type = srpc_service2reply(rpc->crpc_service);
1279 if (!rpc->crpc_replyev.ev_fired)
1282 rc = rpc->crpc_replyev.ev_status;
1286 srpc_unpack_msg_hdr(reply);
1287 if (reply->msg_type != type ||
1288 (reply->msg_magic != SRPC_MSG_MAGIC &&
1289 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1290 CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
1291 libcfs_id2str(rpc->crpc_dest),
1292 reply->msg_type, type,
1293 reply->msg_magic, SRPC_MSG_MAGIC);
1298 if (do_bulk && reply->msg_body.reply.status != 0) {
1299 CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
1300 reply->msg_body.reply.status,
1301 libcfs_id2str(rpc->crpc_dest));
1302 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1305 wi->swi_state = SWI_STATE_REPLY_RECEIVED;
1308 case SWI_STATE_REPLY_RECEIVED:
1309 if (do_bulk && !rpc->crpc_bulkev.ev_fired)
1312 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1314 /* Bulk buffer was unlinked due to remote error. Clear error
1315 * since reply buffer still contains valid data.
1316 * NB rpc->crpc_done shouldn't look into bulk data in case of
1319 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1320 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1323 wi->swi_state = SWI_STATE_DONE;
1324 srpc_client_rpc_done(rpc, rc);
1329 spin_lock(&rpc->crpc_lock);
1330 srpc_abort_rpc(rpc, rc);
1331 spin_unlock(&rpc->crpc_lock);
1335 if (rpc->crpc_aborted) {
1336 LNetMDUnlink(rpc->crpc_reqstmdh);
1337 LNetMDUnlink(rpc->crpc_replymdh);
1338 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1340 if (!srpc_event_pending(rpc)) {
1341 srpc_client_rpc_done(rpc, -EINTR);
1348 struct srpc_client_rpc *
1349 srpc_create_client_rpc(struct lnet_process_id peer, int service,
1350 int nbulkiov, int bulklen,
1351 void (*rpc_done)(struct srpc_client_rpc *),
1352 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
1354 struct srpc_client_rpc *rpc;
1356 LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
1357 crpc_bulk.bk_iovs[nbulkiov]));
1361 srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1362 bulklen, rpc_done, rpc_fini, priv);
1366 /* called with rpc->crpc_lock held */
1368 srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
1372 if (rpc->crpc_aborted || /* already aborted */
1373 rpc->crpc_closed) /* callback imminent */
1377 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1378 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1379 swi_state2str(rpc->crpc_wi.swi_state), why);
1381 rpc->crpc_aborted = 1;
1382 rpc->crpc_status = why;
1383 swi_schedule_workitem(&rpc->crpc_wi);
1386 /* called with rpc->crpc_lock held */
1388 srpc_post_rpc(struct srpc_client_rpc *rpc)
1390 LASSERT(!rpc->crpc_aborted);
1391 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
1393 CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1394 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1397 srpc_add_client_rpc_timer(rpc);
1398 swi_schedule_workitem(&rpc->crpc_wi);
1403 srpc_send_reply(struct srpc_server_rpc *rpc)
1405 struct srpc_event *ev = &rpc->srpc_ev;
1406 struct srpc_msg *msg = &rpc->srpc_replymsg;
1407 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1408 struct srpc_service_cd *scd = rpc->srpc_scd;
1409 struct srpc_service *sv = scd->scd_svc;
1413 LASSERT(buffer != NULL);
1414 rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1416 spin_lock(&scd->scd_lock);
1418 if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
1419 /* Repost buffer before replying since test client
1420 * might send me another RPC once it gets the reply
1422 if (srpc_service_post_buffer(scd, buffer) != 0)
1423 CWARN("Failed to repost %s buffer\n", sv->sv_name);
1424 rpc->srpc_reqstbuf = NULL;
1427 spin_unlock(&scd->scd_lock);
1431 ev->ev_type = SRPC_REPLY_SENT;
1433 msg->msg_magic = SRPC_MSG_MAGIC;
1434 msg->msg_version = SRPC_MSG_VERSION;
1435 msg->msg_type = srpc_service2reply(sv->sv_id);
1437 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1438 sizeof(*msg), LNET_MD_OP_PUT,
1439 rpc->srpc_peer, rpc->srpc_self,
1440 &rpc->srpc_replymdh, ev);
1442 ev->ev_fired = 1; /* no more event expected */
1446 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1448 srpc_lnet_ev_handler(struct lnet_event *ev)
1450 struct srpc_service_cd *scd;
1451 struct srpc_event *rpcev = ev->md_user_ptr;
1452 struct srpc_client_rpc *crpc;
1453 struct srpc_server_rpc *srpc;
1454 struct srpc_buffer *buffer;
1455 struct srpc_service *sv;
1456 struct srpc_msg *msg;
1457 enum srpc_msg_type type;
1459 LASSERT(!in_interrupt());
1461 if (ev->status != 0) {
1465 if (ev->status != -ECANCELED) /* cancellation is not error */
1466 errors = atomic_inc_return(&RPC_STAT32(SRPC_ERROR));
1468 errors = atomic_read(&RPC_STAT32(SRPC_ERROR));
1470 CNETERR("LNet event status %d type %d, RPC errors %u\n",
1471 ev->status, ev->type, errors);
1474 rpcev->ev_lnet = ev->type;
1476 switch (rpcev->ev_type) {
1478 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1479 rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1482 case SRPC_REQUEST_SENT:
1483 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK)
1484 atomic_inc(&RPC_STAT32(SRPC_RPC_SENT));
1487 case SRPC_REPLY_RCVD:
1488 case SRPC_BULK_REQ_RCVD:
1489 crpc = rpcev->ev_data;
1491 if (rpcev != &crpc->crpc_reqstev &&
1492 rpcev != &crpc->crpc_replyev &&
1493 rpcev != &crpc->crpc_bulkev) {
1494 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1495 rpcev, crpc, &crpc->crpc_reqstev,
1496 &crpc->crpc_replyev, &crpc->crpc_bulkev);
1497 CERROR("Bad event: status %d, type %d, lnet %d\n",
1498 rpcev->ev_status, rpcev->ev_type,
1503 spin_lock(&crpc->crpc_lock);
1505 LASSERT(rpcev->ev_fired == 0);
1506 rpcev->ev_fired = 1;
1507 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1508 -EINTR : ev->status;
1509 swi_schedule_workitem(&crpc->crpc_wi);
1511 spin_unlock(&crpc->crpc_lock);
1514 case SRPC_REQUEST_RCVD:
1515 scd = rpcev->ev_data;
1518 LASSERT(rpcev == &scd->scd_ev);
1520 spin_lock(&scd->scd_lock);
1522 LASSERT(ev->unlinked);
1523 LASSERT(ev->type == LNET_EVENT_PUT ||
1524 ev->type == LNET_EVENT_UNLINK);
1525 LASSERT(ev->type != LNET_EVENT_UNLINK ||
1526 sv->sv_shuttingdown);
1528 buffer = container_of(ev->md_start, struct srpc_buffer,
1530 buffer->buf_peer = lnet_pid_to_pid4(&ev->source);
1531 buffer->buf_self = lnet_nid_to_nid4(&ev->target.nid);
1533 LASSERT(scd->scd_buf_nposted > 0);
1534 scd->scd_buf_nposted--;
1536 if (sv->sv_shuttingdown) {
1537 /* Leave buffer on scd->scd_buf_nposted since
1538 * srpc_finish_service needs to traverse it.
1540 spin_unlock(&scd->scd_lock);
1544 if (scd->scd_buf_err_stamp != 0 &&
1545 scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
1546 /* re-enable adding buffer */
1547 scd->scd_buf_err_stamp = 0;
1548 scd->scd_buf_err = 0;
1551 if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
1552 scd->scd_buf_adjust == 0 &&
1553 scd->scd_buf_nposted < scd->scd_buf_low) {
1554 scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
1556 swi_schedule_workitem(&scd->scd_buf_wi);
1559 list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
1560 msg = &buffer->buf_msg;
1561 type = srpc_service2request(sv->sv_id);
1563 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1564 (msg->msg_type != type &&
1565 msg->msg_type != __swab32(type)) ||
1566 (msg->msg_magic != SRPC_MSG_MAGIC &&
1567 msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1568 CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
1569 sv->sv_name, libcfs_idstr(&ev->initiator),
1570 ev->status, ev->mlength,
1571 msg->msg_type, msg->msg_magic);
1573 /* NB can't call srpc_service_recycle_buffer here since
1574 * it may call LNetM[DE]Attach. The invalid magic tells
1575 * srpc_handle_rpc to drop this RPC
1580 if (!list_empty(&scd->scd_rpc_free)) {
1581 srpc = list_entry(scd->scd_rpc_free.next,
1582 struct srpc_server_rpc,
1584 list_del(&srpc->srpc_list);
1586 srpc_init_server_rpc(srpc, scd, buffer);
1587 list_add_tail(&srpc->srpc_list,
1588 &scd->scd_rpc_active);
1589 swi_schedule_workitem(&srpc->srpc_wi);
1591 list_add_tail(&buffer->buf_list,
1592 &scd->scd_buf_blocked);
1595 spin_unlock(&scd->scd_lock);
1597 atomic_inc(&RPC_STAT32(SRPC_RPC_RCVD));
1600 case SRPC_BULK_GET_RPLD:
1601 LASSERT(ev->type == LNET_EVENT_SEND ||
1602 ev->type == LNET_EVENT_REPLY ||
1603 ev->type == LNET_EVENT_UNLINK);
1606 break; /* wait for final event */
1608 case SRPC_BULK_PUT_SENT:
1609 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1612 if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1613 data = &srpc_data.rpc_counters64[SRPC_BULK_GET];
1615 data = &srpc_data.rpc_counters64[SRPC_BULK_PUT];
1617 atomic64_add(ev->mlength, data);
1620 case SRPC_REPLY_SENT:
1621 srpc = rpcev->ev_data;
1622 scd = srpc->srpc_scd;
1624 LASSERT(rpcev == &srpc->srpc_ev);
1626 spin_lock(&scd->scd_lock);
1628 rpcev->ev_fired = 1;
1629 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1630 -EINTR : ev->status;
1631 swi_schedule_workitem(&srpc->srpc_wi);
1633 spin_unlock(&scd->scd_lock);
1644 memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1645 spin_lock_init(&srpc_data.rpc_glock);
1647 /* 1 second pause to avoid timestamp reuse */
1648 schedule_timeout_uninterruptible(cfs_time_seconds(1));
1649 atomic64_set(&srpc_data.rpc_matchbits,
1650 ((__u64)ktime_get_real_seconds() << 48));
1652 srpc_data.rpc_state = SRPC_STATE_NONE;
1654 rc = LNetNIInit(LNET_PID_LUSTRE);
1656 CERROR("LNetNIInit() has failed: %d\n", rc);
1660 srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1662 srpc_data.rpc_lnet_handler = srpc_lnet_ev_handler;
1664 rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1666 rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
1669 srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1676 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1682 srpc_shutdown (void)
1688 state = srpc_data.rpc_state;
1689 srpc_data.rpc_state = SRPC_STATE_STOPPING;
1695 case SRPC_STATE_RUNNING:
1696 spin_lock(&srpc_data.rpc_glock);
1698 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1699 struct srpc_service *sv = srpc_data.rpc_services[i];
1701 LASSERTF(sv == NULL,
1702 "service not empty: id %d, name %s\n",
1706 spin_unlock(&srpc_data.rpc_glock);
1711 case SRPC_STATE_EQ_INIT:
1712 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1713 rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
1715 lnet_assert_handler_unused(srpc_data.rpc_lnet_handler);
1718 case SRPC_STATE_NI_INIT: