4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Isaac Huang <isaac@clusterfs.com>
36 * 2012-05-13: Liang Zhen <liang@whamcloud.com>
37 * - percpt data for service to improve smp performance
41 #define DEBUG_SUBSYSTEM S_LNET
53 static struct smoketest_rpc {
54 spinlock_t rpc_glock; /* global lock */
55 struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
56 struct lnet_handle_eq rpc_lnet_eq; /* _the_ LNet event queue */
57 enum srpc_state rpc_state;
58 struct srpc_counters rpc_counters;
59 __u64 rpc_matchbits; /* matchbits counter */
63 srpc_serv_portal(int svc_id)
65 return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
66 SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
70 static int srpc_handle_rpc(struct swi_workitem *wi);
72 void srpc_get_counters(struct srpc_counters *cnt)
74 spin_lock(&srpc_data.rpc_glock);
75 *cnt = srpc_data.rpc_counters;
76 spin_unlock(&srpc_data.rpc_glock);
79 void srpc_set_counters(const struct srpc_counters *cnt)
81 spin_lock(&srpc_data.rpc_glock);
82 srpc_data.rpc_counters = *cnt;
83 spin_unlock(&srpc_data.rpc_glock);
87 srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int off,
90 LASSERT(off < PAGE_SIZE);
91 LASSERT(nob > 0 && nob <= PAGE_SIZE);
93 bk->bk_iovs[i].kiov_offset = off;
94 bk->bk_iovs[i].kiov_page = pg;
95 bk->bk_iovs[i].kiov_len = nob;
100 srpc_free_bulk(struct srpc_bulk *bk)
107 for (i = 0; i < bk->bk_niov; i++) {
108 pg = bk->bk_iovs[i].kiov_page;
115 LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
120 srpc_alloc_bulk(int cpt, unsigned bulk_off, unsigned bulk_npg,
121 unsigned bulk_len, int sink)
123 struct srpc_bulk *bk;
126 LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
128 LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
129 offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
131 CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
135 memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
137 bk->bk_len = bulk_len;
138 bk->bk_niov = bulk_npg;
140 for (i = 0; i < bulk_npg; i++) {
144 pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, GFP_KERNEL);
146 CERROR("Can't allocate page %d of %d\n", i, bulk_npg);
151 nob = min_t(unsigned, bulk_off + bulk_len, PAGE_SIZE) -
154 srpc_add_bulk_page(bk, pg, i, bulk_off, nob);
167 spin_lock(&srpc_data.rpc_glock);
168 id = srpc_data.rpc_matchbits++;
169 spin_unlock(&srpc_data.rpc_glock);
174 srpc_init_server_rpc(struct srpc_server_rpc *rpc,
175 struct srpc_service_cd *scd,
176 struct srpc_buffer *buffer)
178 memset(rpc, 0, sizeof(*rpc));
179 swi_init_workitem(&rpc->srpc_wi, srpc_handle_rpc,
180 srpc_serv_is_framework(scd->scd_svc) ?
181 lst_sched_serial : lst_sched_test[scd->scd_cpt]);
183 rpc->srpc_ev.ev_fired = 1; /* no event expected now */
186 rpc->srpc_reqstbuf = buffer;
187 rpc->srpc_peer = buffer->buf_peer;
188 rpc->srpc_self = buffer->buf_self;
189 LNetInvalidateMDHandle(&rpc->srpc_replymdh);
193 srpc_service_fini(struct srpc_service *svc)
195 struct srpc_service_cd *scd;
196 struct srpc_server_rpc *rpc;
197 struct srpc_buffer *buf;
201 if (svc->sv_cpt_data == NULL)
204 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
206 if (!list_empty(&scd->scd_buf_posted))
207 q = &scd->scd_buf_posted;
208 else if (!list_empty(&scd->scd_buf_blocked))
209 q = &scd->scd_buf_blocked;
213 while (!list_empty(q)) {
214 buf = list_entry(q->next,
217 list_del(&buf->buf_list);
218 LIBCFS_FREE(buf, sizeof(*buf));
222 LASSERT(list_empty(&scd->scd_rpc_active));
224 while (!list_empty(&scd->scd_rpc_free)) {
225 rpc = list_entry(scd->scd_rpc_free.next,
226 struct srpc_server_rpc,
228 list_del(&rpc->srpc_list);
229 LIBCFS_FREE(rpc, sizeof(*rpc));
233 cfs_percpt_free(svc->sv_cpt_data);
234 svc->sv_cpt_data = NULL;
238 srpc_service_nrpcs(struct srpc_service *svc)
240 int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
242 return srpc_serv_is_framework(svc) ?
243 max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
246 int srpc_add_buffer(struct swi_workitem *wi);
249 srpc_service_init(struct srpc_service *svc)
251 struct srpc_service_cd *scd;
252 struct srpc_server_rpc *rpc;
257 svc->sv_shuttingdown = 0;
259 svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
260 sizeof(struct srpc_service_cd));
261 if (svc->sv_cpt_data == NULL)
264 svc->sv_ncpts = srpc_serv_is_framework(svc) ?
265 1 : cfs_cpt_number(lnet_cpt_table());
266 nrpcs = srpc_service_nrpcs(svc);
268 cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
271 spin_lock_init(&scd->scd_lock);
272 INIT_LIST_HEAD(&scd->scd_rpc_free);
273 INIT_LIST_HEAD(&scd->scd_rpc_active);
274 INIT_LIST_HEAD(&scd->scd_buf_posted);
275 INIT_LIST_HEAD(&scd->scd_buf_blocked);
277 scd->scd_ev.ev_data = scd;
278 scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
280 /* NB: don't use lst_sched_serial for adding buffer,
281 * see details in srpc_service_add_buffers() */
282 swi_init_workitem(&scd->scd_buf_wi,
283 srpc_add_buffer, lst_sched_test[i]);
285 if (i != 0 && srpc_serv_is_framework(svc)) {
286 /* NB: framework service only needs srpc_service_cd for
287 * one partition, but we allocate for all to make
288 * it easier to implement, it will waste a little
289 * memory but nobody should care about this */
293 for (j = 0; j < nrpcs; j++) {
294 LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
297 srpc_service_fini(svc);
300 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
308 srpc_add_service(struct srpc_service *sv)
312 LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
314 if (srpc_service_init(sv) != 0)
317 spin_lock(&srpc_data.rpc_glock);
319 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
321 if (srpc_data.rpc_services[id] != NULL) {
322 spin_unlock(&srpc_data.rpc_glock);
326 srpc_data.rpc_services[id] = sv;
327 spin_unlock(&srpc_data.rpc_glock);
329 CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
333 srpc_service_fini(sv);
338 srpc_remove_service(struct srpc_service *sv)
342 spin_lock(&srpc_data.rpc_glock);
344 if (srpc_data.rpc_services[id] != sv) {
345 spin_unlock(&srpc_data.rpc_glock);
349 srpc_data.rpc_services[id] = NULL;
350 spin_unlock(&srpc_data.rpc_glock);
355 srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
356 int len, int options, struct lnet_process_id peer,
357 struct lnet_handle_md *mdh, struct srpc_event *ev)
361 struct lnet_handle_me meh;
363 rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
364 local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
366 CERROR("LNetMEAttach failed: %d\n", rc);
367 LASSERT(rc == -ENOMEM);
375 md.options = options;
376 md.eq_handle = srpc_data.rpc_lnet_eq;
378 rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
380 CERROR("LNetMDAttach failed: %d\n", rc);
381 LASSERT(rc == -ENOMEM);
383 rc = LNetMEUnlink(meh);
389 "Posted passive RDMA: peer %s, portal %d, matchbits %#llx\n",
390 libcfs_id2str(peer), portal, matchbits);
395 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
396 int options, struct lnet_process_id peer,
397 lnet_nid_t self, struct lnet_handle_md *mdh,
398 struct srpc_event *ev)
406 md.eq_handle = srpc_data.rpc_lnet_eq;
407 md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
408 md.options = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
410 rc = LNetMDBind(md, LNET_UNLINK, mdh);
412 CERROR("LNetMDBind failed: %d\n", rc);
413 LASSERT(rc == -ENOMEM);
417 /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
418 * they're only meaningful for MDs attached to an ME (i.e. passive
421 if ((options & LNET_MD_OP_PUT) != 0) {
422 rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
423 portal, matchbits, 0, 0);
425 LASSERT((options & LNET_MD_OP_GET) != 0);
427 rc = LNetGet(self, *mdh, peer, portal, matchbits, 0, false);
431 CERROR("LNet%s(%s, %d, %lld) failed: %d\n",
432 ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
433 libcfs_id2str(peer), portal, matchbits, rc);
435 /* The forthcoming unlink event will complete this operation
436 * with failure, so fall through and return success here.
438 rc = LNetMDUnlink(*mdh);
442 "Posted active RDMA: peer %s, portal %u, matchbits %#llx\n",
443 libcfs_id2str(peer), portal, matchbits);
449 srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
450 struct lnet_handle_md *mdh, struct srpc_event *ev)
452 struct lnet_process_id any = {0};
454 any.nid = LNET_NID_ANY;
455 any.pid = LNET_PID_ANY;
457 return srpc_post_passive_rdma(srpc_serv_portal(service),
458 local, service, buf, len,
459 LNET_MD_OP_PUT, any, mdh, ev);
463 srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
464 __must_hold(&scd->scd_lock)
466 struct srpc_service *sv = scd->scd_svc;
467 struct srpc_msg *msg = &buf->buf_msg;
470 LNetInvalidateMDHandle(&buf->buf_mdh);
471 list_add(&buf->buf_list, &scd->scd_buf_posted);
472 scd->scd_buf_nposted++;
473 spin_unlock(&scd->scd_lock);
475 rc = srpc_post_passive_rqtbuf(sv->sv_id,
476 !srpc_serv_is_framework(sv),
477 msg, sizeof(*msg), &buf->buf_mdh,
480 /* At this point, a RPC (new or delayed) may have arrived in
481 * msg and its event handler has been called. So we must add
482 * buf to scd_buf_posted _before_ dropping scd_lock */
484 spin_lock(&scd->scd_lock);
487 if (!sv->sv_shuttingdown)
490 spin_unlock(&scd->scd_lock);
491 /* srpc_shutdown_service might have tried to unlink me
492 * when my buf_mdh was still invalid */
493 LNetMDUnlink(buf->buf_mdh);
494 spin_lock(&scd->scd_lock);
498 scd->scd_buf_nposted--;
499 if (sv->sv_shuttingdown)
500 return rc; /* don't allow to change scd_buf_posted */
502 list_del(&buf->buf_list);
503 spin_unlock(&scd->scd_lock);
505 LIBCFS_FREE(buf, sizeof(*buf));
507 spin_lock(&scd->scd_lock);
512 srpc_add_buffer(struct swi_workitem *wi)
514 struct srpc_service_cd *scd = container_of(wi, struct srpc_service_cd,
516 struct srpc_buffer *buf;
519 /* it's called by workitem scheduler threads, these threads
520 * should have been set CPT affinity, so buffers will be posted
521 * on CPT local list of Portal */
522 spin_lock(&scd->scd_lock);
524 while (scd->scd_buf_adjust > 0 &&
525 !scd->scd_svc->sv_shuttingdown) {
526 scd->scd_buf_adjust--; /* consume it */
527 scd->scd_buf_posting++;
529 spin_unlock(&scd->scd_lock);
531 LIBCFS_ALLOC(buf, sizeof(*buf));
533 CERROR("Failed to add new buf to service: %s\n",
534 scd->scd_svc->sv_name);
535 spin_lock(&scd->scd_lock);
540 spin_lock(&scd->scd_lock);
541 if (scd->scd_svc->sv_shuttingdown) {
542 spin_unlock(&scd->scd_lock);
543 LIBCFS_FREE(buf, sizeof(*buf));
545 spin_lock(&scd->scd_lock);
550 rc = srpc_service_post_buffer(scd, buf);
552 break; /* buf has been freed inside */
554 LASSERT(scd->scd_buf_posting > 0);
555 scd->scd_buf_posting--;
556 scd->scd_buf_total++;
557 scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
561 scd->scd_buf_err_stamp = ktime_get_real_seconds();
562 scd->scd_buf_err = rc;
564 LASSERT(scd->scd_buf_posting > 0);
565 scd->scd_buf_posting--;
568 spin_unlock(&scd->scd_lock);
573 srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
575 struct srpc_service_cd *scd;
579 LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
581 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
582 spin_lock(&scd->scd_lock);
584 scd->scd_buf_err = 0;
585 scd->scd_buf_err_stamp = 0;
586 scd->scd_buf_posting = 0;
587 scd->scd_buf_adjust = nbuffer;
588 /* start to post buffers */
589 swi_schedule_workitem(&scd->scd_buf_wi);
590 spin_unlock(&scd->scd_lock);
592 /* framework service only post buffer for one partition */
593 if (srpc_serv_is_framework(sv))
597 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
598 spin_lock(&scd->scd_lock);
600 * NB: srpc_service_add_buffers() can be called inside
601 * thread context of lst_sched_serial, and we don't normally
602 * allow to sleep inside thread context of WI scheduler
603 * because it will block current scheduler thread from doing
604 * anything else, even worse, it could deadlock if it's
605 * waiting on result from another WI of the same scheduler.
606 * However, it's safe at here because scd_buf_wi is scheduled
607 * by thread in a different WI scheduler (lst_sched_test),
608 * so we don't have any risk of deadlock, though this could
609 * block all WIs pending on lst_sched_serial for a moment
610 * which is not good but not fatal.
612 lst_wait_until(scd->scd_buf_err != 0 ||
613 (scd->scd_buf_adjust == 0 &&
614 scd->scd_buf_posting == 0),
615 scd->scd_lock, "waiting for adding buffer\n");
617 if (scd->scd_buf_err != 0 && rc == 0)
618 rc = scd->scd_buf_err;
620 spin_unlock(&scd->scd_lock);
627 srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
629 struct srpc_service_cd *scd;
633 LASSERT(!sv->sv_shuttingdown);
635 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
636 spin_lock(&scd->scd_lock);
638 num = scd->scd_buf_total + scd->scd_buf_posting;
639 scd->scd_buf_adjust -= min(nbuffer, num);
641 spin_unlock(&scd->scd_lock);
645 /* returns 1 if sv has finished, otherwise 0 */
647 srpc_finish_service(struct srpc_service *sv)
649 struct srpc_service_cd *scd;
650 struct srpc_server_rpc *rpc;
653 LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
655 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
656 spin_lock(&scd->scd_lock);
657 if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
658 spin_unlock(&scd->scd_lock);
662 if (scd->scd_buf_nposted > 0) {
663 CDEBUG(D_NET, "waiting for %d posted buffers to unlink\n",
664 scd->scd_buf_nposted);
665 spin_unlock(&scd->scd_lock);
669 if (list_empty(&scd->scd_rpc_active)) {
670 spin_unlock(&scd->scd_lock);
674 rpc = list_entry(scd->scd_rpc_active.next,
675 struct srpc_server_rpc, srpc_list);
676 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, wi %s scheduled %d running %d, ev fired %d type %d status %d lnet %d\n",
677 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
678 swi_state2str(rpc->srpc_wi.swi_state),
679 rpc->srpc_wi.swi_workitem.wi_scheduled,
680 rpc->srpc_wi.swi_workitem.wi_running,
681 rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
682 rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
683 spin_unlock(&scd->scd_lock);
687 /* no lock needed from now on */
688 srpc_service_fini(sv);
692 /* called with sv->sv_lock held */
694 srpc_service_recycle_buffer(struct srpc_service_cd *scd,
695 struct srpc_buffer *buf)
696 __must_hold(&scd->scd_lock)
698 if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
699 if (srpc_service_post_buffer(scd, buf) != 0) {
700 CWARN("Failed to post %s buffer\n",
701 scd->scd_svc->sv_name);
706 /* service is shutting down, or we want to recycle some buffers */
707 scd->scd_buf_total--;
709 if (scd->scd_buf_adjust < 0) {
710 scd->scd_buf_adjust++;
711 if (scd->scd_buf_adjust < 0 &&
712 scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
714 "Try to recyle %d buffers but nothing left\n",
715 scd->scd_buf_adjust);
716 scd->scd_buf_adjust = 0;
720 spin_unlock(&scd->scd_lock);
721 LIBCFS_FREE(buf, sizeof(*buf));
722 spin_lock(&scd->scd_lock);
726 srpc_abort_service(struct srpc_service *sv)
728 struct srpc_service_cd *scd;
729 struct srpc_server_rpc *rpc;
732 CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
733 sv->sv_id, sv->sv_name);
735 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
736 spin_lock(&scd->scd_lock);
738 /* schedule in-flight RPCs to notice the abort, NB:
739 * racing with incoming RPCs; complete fix should make test
740 * RPCs carry session ID in its headers
742 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
743 rpc->srpc_aborted = 1;
744 swi_schedule_workitem(&rpc->srpc_wi);
747 spin_unlock(&scd->scd_lock);
752 srpc_shutdown_service(struct srpc_service *sv)
754 struct srpc_service_cd *scd;
755 struct srpc_server_rpc *rpc;
756 struct srpc_buffer *buf;
759 CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
760 sv->sv_id, sv->sv_name);
762 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
763 spin_lock(&scd->scd_lock);
765 sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
767 cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
768 spin_unlock(&scd->scd_lock);
770 cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
771 spin_lock(&scd->scd_lock);
773 /* schedule in-flight RPCs to notice the shutdown */
774 list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
775 swi_schedule_workitem(&rpc->srpc_wi);
777 spin_unlock(&scd->scd_lock);
779 /* OK to traverse scd_buf_posted without lock, since no one
780 * touches scd_buf_posted now
782 list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
783 LNetMDUnlink(buf->buf_mdh);
788 srpc_send_request(struct srpc_client_rpc *rpc)
790 struct srpc_event *ev = &rpc->crpc_reqstev;
795 ev->ev_type = SRPC_REQUEST_SENT;
797 rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
798 rpc->crpc_service, &rpc->crpc_reqstmsg,
799 sizeof(struct srpc_msg), LNET_MD_OP_PUT,
800 rpc->crpc_dest, LNET_NID_ANY,
801 &rpc->crpc_reqstmdh, ev);
803 LASSERT(rc == -ENOMEM);
804 ev->ev_fired = 1; /* no more event expected */
810 srpc_prepare_reply(struct srpc_client_rpc *rpc)
812 struct srpc_event *ev = &rpc->crpc_replyev;
813 u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
818 ev->ev_type = SRPC_REPLY_RCVD;
820 *id = srpc_next_id();
822 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
824 sizeof(struct srpc_msg),
825 LNET_MD_OP_PUT, rpc->crpc_dest,
826 &rpc->crpc_replymdh, ev);
828 LASSERT(rc == -ENOMEM);
829 ev->ev_fired = 1; /* no more event expected */
835 srpc_prepare_bulk(struct srpc_client_rpc *rpc)
837 struct srpc_bulk *bk = &rpc->crpc_bulk;
838 struct srpc_event *ev = &rpc->crpc_bulkev;
839 __u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
843 LASSERT(bk->bk_niov <= LNET_MAX_IOV);
846 if (bk->bk_niov == 0)
849 opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
854 ev->ev_type = SRPC_BULK_REQ_RCVD;
856 *id = srpc_next_id();
858 rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
859 &bk->bk_iovs[0], bk->bk_niov, opt,
860 rpc->crpc_dest, &bk->bk_mdh, ev);
862 LASSERT(rc == -ENOMEM);
863 ev->ev_fired = 1; /* no more event expected */
869 srpc_do_bulk(struct srpc_server_rpc *rpc)
871 struct srpc_event *ev = &rpc->srpc_ev;
872 struct srpc_bulk *bk = rpc->srpc_bulk;
873 __u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
879 opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
884 ev->ev_type = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
886 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
887 &bk->bk_iovs[0], bk->bk_niov, opt,
888 rpc->srpc_peer, rpc->srpc_self,
891 ev->ev_fired = 1; /* no more event expected */
895 /* only called from srpc_handle_rpc */
897 srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
899 struct srpc_service_cd *scd = rpc->srpc_scd;
900 struct srpc_service *sv = scd->scd_svc;
901 struct srpc_buffer *buffer;
903 LASSERT(status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
905 rpc->srpc_status = status;
907 CDEBUG_LIMIT(status == 0 ? D_NET : D_NETERROR,
908 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
909 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
910 swi_state2str(rpc->srpc_wi.swi_state), status);
913 spin_lock(&srpc_data.rpc_glock);
914 srpc_data.rpc_counters.rpcs_dropped++;
915 spin_unlock(&srpc_data.rpc_glock);
918 if (rpc->srpc_done != NULL)
919 (*rpc->srpc_done) (rpc);
920 LASSERT(rpc->srpc_bulk == NULL);
922 spin_lock(&scd->scd_lock);
924 if (rpc->srpc_reqstbuf != NULL) {
925 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
926 * sv won't go away for scd_rpc_active must not be empty
928 srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
929 rpc->srpc_reqstbuf = NULL;
932 list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
935 * No one can schedule me now since:
936 * - I'm not on scd_rpc_active.
937 * - all LNet events have been fired.
938 * Cancel pending schedules and prevent future schedule attempts:
940 LASSERT(rpc->srpc_ev.ev_fired);
941 swi_exit_workitem(&rpc->srpc_wi);
943 if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
944 buffer = list_entry(scd->scd_buf_blocked.next,
945 struct srpc_buffer, buf_list);
946 list_del(&buffer->buf_list);
948 srpc_init_server_rpc(rpc, scd, buffer);
949 list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
950 swi_schedule_workitem(&rpc->srpc_wi);
952 list_add(&rpc->srpc_list, &scd->scd_rpc_free);
955 spin_unlock(&scd->scd_lock);
959 /* handles an incoming RPC */
960 static int srpc_handle_rpc(struct swi_workitem *wi)
962 struct srpc_server_rpc *rpc = container_of(wi, struct srpc_server_rpc,
964 struct srpc_service_cd *scd = rpc->srpc_scd;
965 struct srpc_service *sv = scd->scd_svc;
966 struct srpc_event *ev = &rpc->srpc_ev;
969 LASSERT(wi == &rpc->srpc_wi);
971 spin_lock(&scd->scd_lock);
973 if (sv->sv_shuttingdown || rpc->srpc_aborted) {
974 spin_unlock(&scd->scd_lock);
976 if (rpc->srpc_bulk != NULL)
977 LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
978 LNetMDUnlink(rpc->srpc_replymdh);
980 if (ev->ev_fired) { /* no more event, OK to finish */
981 srpc_server_rpc_done(rpc, -ESHUTDOWN);
987 spin_unlock(&scd->scd_lock);
989 switch (wi->swi_state) {
993 case SWI_STATE_NEWBORN: {
994 struct srpc_msg *msg;
995 struct srpc_generic_reply *reply;
997 msg = &rpc->srpc_reqstbuf->buf_msg;
998 reply = &rpc->srpc_replymsg.msg_body.reply;
1000 if (msg->msg_magic == 0) {
1001 /* moaned already in srpc_lnet_ev_handler */
1002 srpc_server_rpc_done(rpc, EBADMSG);
1006 srpc_unpack_msg_hdr(msg);
1007 if (msg->msg_version != SRPC_MSG_VERSION) {
1008 CWARN("Version mismatch: %u, %u expected, from %s\n",
1009 msg->msg_version, SRPC_MSG_VERSION,
1010 libcfs_id2str(rpc->srpc_peer));
1011 reply->status = EPROTO;
1012 /* drop through and send reply */
1015 rc = (*sv->sv_handler)(rpc);
1016 LASSERT(reply->status == 0 || !rpc->srpc_bulk);
1018 srpc_server_rpc_done(rpc, rc);
1023 wi->swi_state = SWI_STATE_BULK_STARTED;
1025 if (rpc->srpc_bulk != NULL) {
1026 rc = srpc_do_bulk(rpc);
1028 return 0; /* wait for bulk */
1030 LASSERT(ev->ev_fired);
1035 case SWI_STATE_BULK_STARTED:
1036 LASSERT(rpc->srpc_bulk == NULL || ev->ev_fired);
1038 if (rpc->srpc_bulk != NULL) {
1041 if (sv->sv_bulk_ready != NULL)
1042 rc = (*sv->sv_bulk_ready) (rpc, rc);
1045 srpc_server_rpc_done(rpc, rc);
1050 wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
1051 rc = srpc_send_reply(rpc);
1053 return 0; /* wait for reply */
1054 srpc_server_rpc_done(rpc, rc);
1057 case SWI_STATE_REPLY_SUBMITTED:
1058 if (!ev->ev_fired) {
1059 CERROR("RPC %p: bulk %p, service %d\n",
1060 rpc, rpc->srpc_bulk, sv->sv_id);
1061 CERROR("Event: status %d, type %d, lnet %d\n",
1062 ev->ev_status, ev->ev_type, ev->ev_lnet);
1063 LASSERT(ev->ev_fired);
1066 wi->swi_state = SWI_STATE_DONE;
1067 srpc_server_rpc_done(rpc, ev->ev_status);
1075 srpc_client_rpc_expired (void *data)
1077 struct srpc_client_rpc *rpc = data;
1079 CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
1080 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1083 spin_lock(&rpc->crpc_lock);
1085 rpc->crpc_timeout = 0;
1086 srpc_abort_rpc(rpc, -ETIMEDOUT);
1088 spin_unlock(&rpc->crpc_lock);
1090 spin_lock(&srpc_data.rpc_glock);
1091 srpc_data.rpc_counters.rpcs_expired++;
1092 spin_unlock(&srpc_data.rpc_glock);
1096 srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
1098 struct stt_timer *timer = &rpc->crpc_timer;
1100 if (rpc->crpc_timeout == 0)
1103 INIT_LIST_HEAD(&timer->stt_list);
1104 timer->stt_data = rpc;
1105 timer->stt_func = srpc_client_rpc_expired;
1106 timer->stt_expires = ktime_get_real_seconds() + rpc->crpc_timeout;
1107 stt_add_timer(timer);
1112 * Called with rpc->crpc_lock held.
1114 * Upon exit the RPC expiry timer is not queued and the handler is not
1115 * running on any CPU.
1118 srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
1120 /* timer not planted or already exploded */
1121 if (rpc->crpc_timeout == 0)
1124 /* timer successfully defused */
1125 if (stt_del_timer(&rpc->crpc_timer))
1128 /* timer detonated, wait for it to explode */
1129 while (rpc->crpc_timeout != 0) {
1130 spin_unlock(&rpc->crpc_lock);
1134 spin_lock(&rpc->crpc_lock);
1139 srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
1141 struct swi_workitem *wi = &rpc->crpc_wi;
1143 LASSERT(status != 0 || wi->swi_state == SWI_STATE_DONE);
1145 spin_lock(&rpc->crpc_lock);
1147 rpc->crpc_closed = 1;
1148 if (rpc->crpc_status == 0)
1149 rpc->crpc_status = status;
1151 srpc_del_client_rpc_timer(rpc);
1153 CDEBUG_LIMIT((status == 0) ? D_NET : D_NETERROR,
1154 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1155 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1156 swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
1159 * No one can schedule me now since:
1160 * - RPC timer has been defused.
1161 * - all LNet events have been fired.
1162 * - crpc_closed has been set, preventing srpc_abort_rpc from
1164 * Cancel pending schedules and prevent future schedule attempts:
1166 LASSERT(!srpc_event_pending(rpc));
1167 swi_exit_workitem(wi);
1169 spin_unlock(&rpc->crpc_lock);
1171 (*rpc->crpc_done)(rpc);
1175 /* sends an outgoing RPC */
1177 srpc_send_rpc(struct swi_workitem *wi)
1180 struct srpc_client_rpc *rpc;
1181 struct srpc_msg *reply;
1184 LASSERT(wi != NULL);
1186 rpc = container_of(wi, struct srpc_client_rpc, crpc_wi);
1188 LASSERT(rpc != NULL);
1189 LASSERT(wi == &rpc->crpc_wi);
1191 reply = &rpc->crpc_replymsg;
1192 do_bulk = rpc->crpc_bulk.bk_niov > 0;
1194 spin_lock(&rpc->crpc_lock);
1196 if (rpc->crpc_aborted) {
1197 spin_unlock(&rpc->crpc_lock);
1201 spin_unlock(&rpc->crpc_lock);
1203 switch (wi->swi_state) {
1206 case SWI_STATE_NEWBORN:
1207 LASSERT(!srpc_event_pending(rpc));
1209 rc = srpc_prepare_reply(rpc);
1211 srpc_client_rpc_done(rpc, rc);
1215 rc = srpc_prepare_bulk(rpc);
1219 wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
1220 rc = srpc_send_request(rpc);
1223 case SWI_STATE_REQUEST_SUBMITTED:
1224 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1225 * order; however, they're processed in a strict order:
1226 * rqt, rpy, and bulk.
1228 if (!rpc->crpc_reqstev.ev_fired)
1231 rc = rpc->crpc_reqstev.ev_status;
1235 wi->swi_state = SWI_STATE_REQUEST_SENT;
1237 case SWI_STATE_REQUEST_SENT: {
1238 enum srpc_msg_type type;
1240 type = srpc_service2reply(rpc->crpc_service);
1242 if (!rpc->crpc_replyev.ev_fired)
1245 rc = rpc->crpc_replyev.ev_status;
1249 srpc_unpack_msg_hdr(reply);
1250 if (reply->msg_type != type ||
1251 (reply->msg_magic != SRPC_MSG_MAGIC &&
1252 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1253 CWARN("Bad message from %s: type %u (%d expected), magic %u (%d expected).\n",
1254 libcfs_id2str(rpc->crpc_dest),
1255 reply->msg_type, type,
1256 reply->msg_magic, SRPC_MSG_MAGIC);
1261 if (do_bulk && reply->msg_body.reply.status != 0) {
1262 CWARN("Remote error %d at %s, unlink bulk buffer in case peer didn't initiate bulk transfer\n",
1263 reply->msg_body.reply.status,
1264 libcfs_id2str(rpc->crpc_dest));
1265 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1268 wi->swi_state = SWI_STATE_REPLY_RECEIVED;
1271 case SWI_STATE_REPLY_RECEIVED:
1272 if (do_bulk && !rpc->crpc_bulkev.ev_fired)
1275 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1277 /* Bulk buffer was unlinked due to remote error. Clear error
1278 * since reply buffer still contains valid data.
1279 * NB rpc->crpc_done shouldn't look into bulk data in case of
1282 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1283 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1286 wi->swi_state = SWI_STATE_DONE;
1287 srpc_client_rpc_done(rpc, rc);
1292 spin_lock(&rpc->crpc_lock);
1293 srpc_abort_rpc(rpc, rc);
1294 spin_unlock(&rpc->crpc_lock);
1298 if (rpc->crpc_aborted) {
1299 LNetMDUnlink(rpc->crpc_reqstmdh);
1300 LNetMDUnlink(rpc->crpc_replymdh);
1301 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1303 if (!srpc_event_pending(rpc)) {
1304 srpc_client_rpc_done(rpc, -EINTR);
1311 struct srpc_client_rpc *
1312 srpc_create_client_rpc(struct lnet_process_id peer, int service,
1313 int nbulkiov, int bulklen,
1314 void (*rpc_done)(struct srpc_client_rpc *),
1315 void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
1317 struct srpc_client_rpc *rpc;
1319 LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
1320 crpc_bulk.bk_iovs[nbulkiov]));
1324 srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1325 bulklen, rpc_done, rpc_fini, priv);
1329 /* called with rpc->crpc_lock held */
1331 srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
1335 if (rpc->crpc_aborted || /* already aborted */
1336 rpc->crpc_closed) /* callback imminent */
1340 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1341 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1342 swi_state2str(rpc->crpc_wi.swi_state), why);
1344 rpc->crpc_aborted = 1;
1345 rpc->crpc_status = why;
1346 swi_schedule_workitem(&rpc->crpc_wi);
1350 /* called with rpc->crpc_lock held */
1352 srpc_post_rpc(struct srpc_client_rpc *rpc)
1354 LASSERT(!rpc->crpc_aborted);
1355 LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
1357 CDEBUG(D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1358 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1361 srpc_add_client_rpc_timer(rpc);
1362 swi_schedule_workitem(&rpc->crpc_wi);
1368 srpc_send_reply(struct srpc_server_rpc *rpc)
1370 struct srpc_event *ev = &rpc->srpc_ev;
1371 struct srpc_msg *msg = &rpc->srpc_replymsg;
1372 struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
1373 struct srpc_service_cd *scd = rpc->srpc_scd;
1374 struct srpc_service *sv = scd->scd_svc;
1378 LASSERT(buffer != NULL);
1379 rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1381 spin_lock(&scd->scd_lock);
1383 if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
1384 /* Repost buffer before replying since test client
1385 * might send me another RPC once it gets the reply
1387 if (srpc_service_post_buffer(scd, buffer) != 0)
1388 CWARN("Failed to repost %s buffer\n", sv->sv_name);
1389 rpc->srpc_reqstbuf = NULL;
1392 spin_unlock(&scd->scd_lock);
1396 ev->ev_type = SRPC_REPLY_SENT;
1398 msg->msg_magic = SRPC_MSG_MAGIC;
1399 msg->msg_version = SRPC_MSG_VERSION;
1400 msg->msg_type = srpc_service2reply(sv->sv_id);
1402 rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1403 sizeof(*msg), LNET_MD_OP_PUT,
1404 rpc->srpc_peer, rpc->srpc_self,
1405 &rpc->srpc_replymdh, ev);
1407 ev->ev_fired = 1; /* no more event expected */
1411 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1413 srpc_lnet_ev_handler(struct lnet_event *ev)
1415 struct srpc_service_cd *scd;
1416 struct srpc_event *rpcev = ev->md.user_ptr;
1417 struct srpc_client_rpc *crpc;
1418 struct srpc_server_rpc *srpc;
1419 struct srpc_buffer *buffer;
1420 struct srpc_service *sv;
1421 struct srpc_msg *msg;
1422 enum srpc_msg_type type;
1424 LASSERT(!in_interrupt());
1426 if (ev->status != 0) {
1429 spin_lock(&srpc_data.rpc_glock);
1430 if (ev->status != -ECANCELED) /* cancellation is not error */
1431 srpc_data.rpc_counters.errors++;
1432 errors = srpc_data.rpc_counters.errors;
1433 spin_unlock(&srpc_data.rpc_glock);
1435 CNETERR("LNet event status %d type %d, RPC errors %u\n",
1436 ev->status, ev->type, errors);
1439 rpcev->ev_lnet = ev->type;
1441 switch (rpcev->ev_type) {
1443 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1444 rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1447 case SRPC_REQUEST_SENT:
1448 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1449 spin_lock(&srpc_data.rpc_glock);
1450 srpc_data.rpc_counters.rpcs_sent++;
1451 spin_unlock(&srpc_data.rpc_glock);
1454 case SRPC_REPLY_RCVD:
1455 case SRPC_BULK_REQ_RCVD:
1456 crpc = rpcev->ev_data;
1458 if (rpcev != &crpc->crpc_reqstev &&
1459 rpcev != &crpc->crpc_replyev &&
1460 rpcev != &crpc->crpc_bulkev) {
1461 CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1462 rpcev, crpc, &crpc->crpc_reqstev,
1463 &crpc->crpc_replyev, &crpc->crpc_bulkev);
1464 CERROR("Bad event: status %d, type %d, lnet %d\n",
1465 rpcev->ev_status, rpcev->ev_type,
1470 spin_lock(&crpc->crpc_lock);
1472 LASSERT(rpcev->ev_fired == 0);
1473 rpcev->ev_fired = 1;
1474 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1475 -EINTR : ev->status;
1476 swi_schedule_workitem(&crpc->crpc_wi);
1478 spin_unlock(&crpc->crpc_lock);
1481 case SRPC_REQUEST_RCVD:
1482 scd = rpcev->ev_data;
1485 LASSERT(rpcev == &scd->scd_ev);
1487 spin_lock(&scd->scd_lock);
1489 LASSERT(ev->unlinked);
1490 LASSERT(ev->type == LNET_EVENT_PUT ||
1491 ev->type == LNET_EVENT_UNLINK);
1492 LASSERT(ev->type != LNET_EVENT_UNLINK ||
1493 sv->sv_shuttingdown);
1495 buffer = container_of(ev->md.start, struct srpc_buffer,
1497 buffer->buf_peer = ev->source;
1498 buffer->buf_self = ev->target.nid;
1500 LASSERT(scd->scd_buf_nposted > 0);
1501 scd->scd_buf_nposted--;
1503 if (sv->sv_shuttingdown) {
1504 /* Leave buffer on scd->scd_buf_nposted since
1505 * srpc_finish_service needs to traverse it.
1507 spin_unlock(&scd->scd_lock);
1511 if (scd->scd_buf_err_stamp != 0 &&
1512 scd->scd_buf_err_stamp < ktime_get_real_seconds()) {
1513 /* re-enable adding buffer */
1514 scd->scd_buf_err_stamp = 0;
1515 scd->scd_buf_err = 0;
1518 if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
1519 scd->scd_buf_adjust == 0 &&
1520 scd->scd_buf_nposted < scd->scd_buf_low) {
1521 scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
1523 swi_schedule_workitem(&scd->scd_buf_wi);
1526 list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
1527 msg = &buffer->buf_msg;
1528 type = srpc_service2request(sv->sv_id);
1530 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1531 (msg->msg_type != type &&
1532 msg->msg_type != __swab32(type)) ||
1533 (msg->msg_magic != SRPC_MSG_MAGIC &&
1534 msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1535 CERROR("Dropping RPC (%s) from %s: status %d mlength %d type %u magic %u.\n",
1536 sv->sv_name, libcfs_id2str(ev->initiator),
1537 ev->status, ev->mlength,
1538 msg->msg_type, msg->msg_magic);
1540 /* NB can't call srpc_service_recycle_buffer here since
1541 * it may call LNetM[DE]Attach. The invalid magic tells
1542 * srpc_handle_rpc to drop this RPC
1547 if (!list_empty(&scd->scd_rpc_free)) {
1548 srpc = list_entry(scd->scd_rpc_free.next,
1549 struct srpc_server_rpc,
1551 list_del(&srpc->srpc_list);
1553 srpc_init_server_rpc(srpc, scd, buffer);
1554 list_add_tail(&srpc->srpc_list,
1555 &scd->scd_rpc_active);
1556 swi_schedule_workitem(&srpc->srpc_wi);
1558 list_add_tail(&buffer->buf_list,
1559 &scd->scd_buf_blocked);
1562 spin_unlock(&scd->scd_lock);
1564 spin_lock(&srpc_data.rpc_glock);
1565 srpc_data.rpc_counters.rpcs_rcvd++;
1566 spin_unlock(&srpc_data.rpc_glock);
1569 case SRPC_BULK_GET_RPLD:
1570 LASSERT(ev->type == LNET_EVENT_SEND ||
1571 ev->type == LNET_EVENT_REPLY ||
1572 ev->type == LNET_EVENT_UNLINK);
1575 break; /* wait for final event */
1577 case SRPC_BULK_PUT_SENT:
1578 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1579 spin_lock(&srpc_data.rpc_glock);
1581 if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1582 srpc_data.rpc_counters.bulk_get += ev->mlength;
1584 srpc_data.rpc_counters.bulk_put += ev->mlength;
1586 spin_unlock(&srpc_data.rpc_glock);
1589 case SRPC_REPLY_SENT:
1590 srpc = rpcev->ev_data;
1591 scd = srpc->srpc_scd;
1593 LASSERT(rpcev == &srpc->srpc_ev);
1595 spin_lock(&scd->scd_lock);
1597 rpcev->ev_fired = 1;
1598 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1599 -EINTR : ev->status;
1600 swi_schedule_workitem(&srpc->srpc_wi);
1602 spin_unlock(&scd->scd_lock);
1613 memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1614 spin_lock_init(&srpc_data.rpc_glock);
1616 /* 1 second pause to avoid timestamp reuse */
1617 set_current_state(TASK_UNINTERRUPTIBLE);
1618 schedule_timeout(cfs_time_seconds(1));
1619 srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48;
1621 srpc_data.rpc_state = SRPC_STATE_NONE;
1623 rc = LNetNIInit(LNET_PID_LUSTRE);
1625 CERROR("LNetNIInit() has failed: %d\n", rc);
1629 srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1631 LNetInvalidateEQHandle(&srpc_data.rpc_lnet_eq);
1632 rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
1634 CERROR("LNetEQAlloc() has failed: %d\n", rc);
1638 rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1640 rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
1643 srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1651 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1657 srpc_shutdown (void)
1663 state = srpc_data.rpc_state;
1664 srpc_data.rpc_state = SRPC_STATE_STOPPING;
1670 case SRPC_STATE_RUNNING:
1671 spin_lock(&srpc_data.rpc_glock);
1673 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1674 struct srpc_service *sv = srpc_data.rpc_services[i];
1676 LASSERTF(sv == NULL,
1677 "service not empty: id %d, name %s\n",
1681 spin_unlock(&srpc_data.rpc_glock);
1686 case SRPC_STATE_EQ_INIT:
1687 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1688 rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
1690 rc = LNetEQFree(srpc_data.rpc_lnet_eq);
1691 LASSERT(rc == 0); /* the EQ should have no user by now */
1694 case SRPC_STATE_NI_INIT: