Whamcloud - gitweb
LU-56 lnet: SMP improvements for LNet selftest
[fs/lustre-release.git] / lnet / selftest / rpc.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  */
30 /*
31  * This file is part of Lustre, http://www.lustre.org/
32  * Lustre is a trademark of Sun Microsystems, Inc.
33  *
34  * lnet/selftest/rpc.c
35  *
36  * Author: Isaac Huang <isaac@clusterfs.com>
37  *
38  * 2012-05-13: Liang Zhen <liang@whamcloud.com>
39  * - percpt data for service to improve smp performance
40  * - code cleanup
41  */
42
43 #define DEBUG_SUBSYSTEM S_LNET
44
45 #include "selftest.h"
46
47 typedef enum {
48         SRPC_STATE_NONE,
49         SRPC_STATE_NI_INIT,
50         SRPC_STATE_EQ_INIT,
51         SRPC_STATE_RUNNING,
52         SRPC_STATE_STOPPING,
53 } srpc_state_t;
54
55 struct smoketest_rpc {
56         cfs_spinlock_t    rpc_glock;     /* global lock */
57         srpc_service_t   *rpc_services[SRPC_SERVICE_MAX_ID + 1];
58         lnet_handle_eq_t  rpc_lnet_eq;   /* _the_ LNet event queue */
59         srpc_state_t      rpc_state;
60         srpc_counters_t   rpc_counters;
61         __u64             rpc_matchbits; /* matchbits counter */
62 } srpc_data;
63
64 static inline int
65 srpc_serv_portal(int svc_id)
66 {
67         return svc_id < SRPC_FRAMEWORK_SERVICE_MAX_ID ?
68                SRPC_FRAMEWORK_REQUEST_PORTAL : SRPC_REQUEST_PORTAL;
69 }
70
71 /* forward ref's */
72 int srpc_handle_rpc (swi_workitem_t *wi);
73
74 void srpc_get_counters (srpc_counters_t *cnt)
75 {
76         cfs_spin_lock(&srpc_data.rpc_glock);
77         *cnt = srpc_data.rpc_counters;
78         cfs_spin_unlock(&srpc_data.rpc_glock);
79 }
80
81 void srpc_set_counters (const srpc_counters_t *cnt)
82 {
83         cfs_spin_lock(&srpc_data.rpc_glock);
84         srpc_data.rpc_counters = *cnt;
85         cfs_spin_unlock(&srpc_data.rpc_glock);
86 }
87
88 void
89 srpc_add_bulk_page (srpc_bulk_t *bk, cfs_page_t *pg, int i)
90 {
91         LASSERT (i >= 0 && i < bk->bk_niov);
92
93 #ifdef __KERNEL__
94         bk->bk_iovs[i].kiov_offset = 0;
95         bk->bk_iovs[i].kiov_page   = pg;
96         bk->bk_iovs[i].kiov_len    = CFS_PAGE_SIZE;
97 #else
98         LASSERT (bk->bk_pages != NULL);
99
100         bk->bk_pages[i] = pg;
101         bk->bk_iovs[i].iov_len  = CFS_PAGE_SIZE;
102         bk->bk_iovs[i].iov_base = cfs_page_address(pg);
103 #endif
104         return;
105 }
106
107 void
108 srpc_free_bulk (srpc_bulk_t *bk)
109 {
110         int         i;
111         cfs_page_t *pg;
112
113         LASSERT (bk != NULL);
114 #ifndef __KERNEL__
115         LASSERT (bk->bk_pages != NULL);
116 #endif
117
118         for (i = 0; i < bk->bk_niov; i++) {
119 #ifdef __KERNEL__
120                 pg = bk->bk_iovs[i].kiov_page;
121 #else
122                 pg = bk->bk_pages[i];
123 #endif
124                 if (pg == NULL) break;
125
126                 cfs_free_page(pg);
127         }
128
129 #ifndef __KERNEL__
130         LIBCFS_FREE(bk->bk_pages, sizeof(cfs_page_t *) * bk->bk_niov);
131 #endif
132         LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
133         return;
134 }
135
136 srpc_bulk_t *
137 srpc_alloc_bulk(int cpt, int npages, int sink)
138 {
139         srpc_bulk_t  *bk;
140         cfs_page_t  **pages;
141         int           i;
142
143         LASSERT (npages > 0 && npages <= LNET_MAX_IOV);
144
145         LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
146                          offsetof(srpc_bulk_t, bk_iovs[npages]));
147         if (bk == NULL) {
148                 CERROR ("Can't allocate descriptor for %d pages\n", npages);
149                 return NULL;
150         }
151
152         memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[npages]));
153         bk->bk_sink = sink;
154         bk->bk_niov = npages;
155         bk->bk_len  = npages * CFS_PAGE_SIZE;
156 #ifndef __KERNEL__
157         LIBCFS_CPT_ALLOC(pages, lnet_cpt_table(), cpt,
158                          sizeof(cfs_page_t *) * npages);
159         if (pages == NULL) {
160                 LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[npages]));
161                 CERROR ("Can't allocate page array for %d pages\n", npages);
162                 return NULL;
163         }
164
165         memset(pages, 0, sizeof(cfs_page_t *) * npages);
166         bk->bk_pages = pages;
167 #else
168         UNUSED (pages);
169 #endif
170
171         for (i = 0; i < npages; i++) {
172                 cfs_page_t *pg;
173
174                 pg = cfs_page_cpt_alloc(lnet_cpt_table(), cpt, CFS_ALLOC_STD);
175                 if (pg == NULL) {
176                         CERROR ("Can't allocate page %d of %d\n", i, npages);
177                         srpc_free_bulk(bk);
178                         return NULL;
179                 }
180
181                 srpc_add_bulk_page(bk, pg, i);
182         }
183
184         return bk;
185 }
186
187 static inline __u64
188 srpc_next_id (void)
189 {
190         __u64 id;
191
192         cfs_spin_lock(&srpc_data.rpc_glock);
193         id = srpc_data.rpc_matchbits++;
194         cfs_spin_unlock(&srpc_data.rpc_glock);
195         return id;
196 }
197
198 void
199 srpc_init_server_rpc(struct srpc_server_rpc *rpc,
200                      struct srpc_service_cd *scd,
201                      struct srpc_buffer *buffer)
202 {
203         memset(rpc, 0, sizeof(*rpc));
204         swi_init_workitem(&rpc->srpc_wi, rpc, srpc_handle_rpc,
205                           srpc_serv_is_framework(scd->scd_svc) ?
206                           lst_sched_serial : lst_sched_test[scd->scd_cpt]);
207
208         rpc->srpc_ev.ev_fired = 1; /* no event expected now */
209
210         rpc->srpc_scd      = scd;
211         rpc->srpc_reqstbuf = buffer;
212         rpc->srpc_peer     = buffer->buf_peer;
213         rpc->srpc_self     = buffer->buf_self;
214         LNetInvalidateHandle(&rpc->srpc_replymdh);
215 }
216
217 static void
218 srpc_service_fini(struct srpc_service *svc)
219 {
220         struct srpc_service_cd  *scd;
221         struct srpc_server_rpc  *rpc;
222         struct srpc_buffer      *buf;
223         cfs_list_t              *q;
224         int                     i;
225
226         if (svc->sv_cpt_data == NULL)
227                 return;
228
229         cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
230                 while (1) {
231                         if (!cfs_list_empty(&scd->scd_buf_posted))
232                                 q = &scd->scd_buf_posted;
233                         else if (!cfs_list_empty(&scd->scd_buf_blocked))
234                                 q = &scd->scd_buf_blocked;
235                         else
236                                 break;
237
238                         while (!cfs_list_empty(q)) {
239                                 buf = cfs_list_entry(q->next,
240                                                      struct srpc_buffer,
241                                                      buf_list);
242                                 cfs_list_del(&buf->buf_list);
243                                 LIBCFS_FREE(buf, sizeof(*buf));
244                         }
245                 }
246
247                 LASSERT(cfs_list_empty(&scd->scd_rpc_active));
248
249                 while (!cfs_list_empty(&scd->scd_rpc_free)) {
250                         rpc = cfs_list_entry(scd->scd_rpc_free.next,
251                                              struct srpc_server_rpc,
252                                              srpc_list);
253                         cfs_list_del(&rpc->srpc_list);
254                         LIBCFS_FREE(rpc, sizeof(*rpc));
255                 }
256         }
257
258         cfs_percpt_free(svc->sv_cpt_data);
259         svc->sv_cpt_data = NULL;
260 }
261
262 static int
263 srpc_service_nrpcs(struct srpc_service *svc)
264 {
265         int nrpcs = svc->sv_wi_total / svc->sv_ncpts;
266
267         return srpc_serv_is_framework(svc) ?
268                max(nrpcs, SFW_FRWK_WI_MIN) : max(nrpcs, SFW_TEST_WI_MIN);
269 }
270
271 int srpc_add_buffer(struct swi_workitem *wi);
272
273 static int
274 srpc_service_init(struct srpc_service *svc)
275 {
276         struct srpc_service_cd  *scd;
277         struct srpc_server_rpc  *rpc;
278         int                     nrpcs;
279         int                     i;
280         int                     j;
281
282         svc->sv_shuttingdown = 0;
283
284         svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
285                                             sizeof(struct srpc_service_cd));
286         if (svc->sv_cpt_data == NULL)
287                 return -ENOMEM;
288
289         svc->sv_ncpts = srpc_serv_is_framework(svc) ?
290                         1 : cfs_cpt_number(lnet_cpt_table());
291         nrpcs = srpc_service_nrpcs(svc);
292
293         cfs_percpt_for_each(scd, i, svc->sv_cpt_data) {
294                 scd->scd_cpt = i;
295                 scd->scd_svc = svc;
296                 cfs_spin_lock_init(&scd->scd_lock);
297                 CFS_INIT_LIST_HEAD(&scd->scd_rpc_free);
298                 CFS_INIT_LIST_HEAD(&scd->scd_rpc_active);
299                 CFS_INIT_LIST_HEAD(&scd->scd_buf_posted);
300                 CFS_INIT_LIST_HEAD(&scd->scd_buf_blocked);
301
302                 scd->scd_ev.ev_data = scd;
303                 scd->scd_ev.ev_type = SRPC_REQUEST_RCVD;
304
305                 /* NB: don't use lst_sched_serial for adding buffer,
306                  * see details in srpc_service_add_buffers() */
307                 swi_init_workitem(&scd->scd_buf_wi, scd,
308                                   srpc_add_buffer, lst_sched_test[i]);
309
310                 if (i != 0 && srpc_serv_is_framework(svc)) {
311                         /* NB: framework service only needs srpc_service_cd for
312                          * one partition, but we allocate for all to make
313                          * it easier to implement, it will waste a little
314                          * memory but nobody should care about this */
315                         continue;
316                 }
317
318                 for (j = 0; j < nrpcs; j++) {
319                         LIBCFS_CPT_ALLOC(rpc, lnet_cpt_table(),
320                                          i, sizeof(*rpc));
321                         if (rpc == NULL) {
322                                 srpc_service_fini(svc);
323                                 return -ENOMEM;
324                         }
325                         cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
326                 }
327         }
328
329         return 0;
330 }
331
332 int
333 srpc_add_service(struct srpc_service *sv)
334 {
335         int id = sv->sv_id;
336
337         LASSERT(0 <= id && id <= SRPC_SERVICE_MAX_ID);
338
339         if (srpc_service_init(sv) != 0)
340                 return -ENOMEM;
341
342         cfs_spin_lock(&srpc_data.rpc_glock);
343
344         LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
345
346         if (srpc_data.rpc_services[id] != NULL) {
347                 cfs_spin_unlock(&srpc_data.rpc_glock);
348                 goto failed;
349         }
350
351         srpc_data.rpc_services[id] = sv;
352         cfs_spin_unlock(&srpc_data.rpc_glock);
353
354         CDEBUG(D_NET, "Adding service: id %d, name %s\n", id, sv->sv_name);
355         return 0;
356
357  failed:
358         srpc_service_fini(sv);
359         return -EBUSY;
360 }
361
362 int
363 srpc_remove_service (srpc_service_t *sv)
364 {
365         int id = sv->sv_id;
366
367         cfs_spin_lock(&srpc_data.rpc_glock);
368
369         if (srpc_data.rpc_services[id] != sv) {
370                 cfs_spin_unlock(&srpc_data.rpc_glock);
371                 return -ENOENT;
372         }
373
374         srpc_data.rpc_services[id] = NULL;
375         cfs_spin_unlock(&srpc_data.rpc_glock);
376         return 0;
377 }
378
379 int
380 srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
381                        int len, int options, lnet_process_id_t peer,
382                        lnet_handle_md_t *mdh, srpc_event_t *ev)
383 {
384         int              rc;
385         lnet_md_t        md;
386         lnet_handle_me_t meh;
387
388         rc = LNetMEAttach(portal, peer, matchbits, 0, LNET_UNLINK,
389                           local ? LNET_INS_LOCAL : LNET_INS_AFTER, &meh);
390         if (rc != 0) {
391                 CERROR ("LNetMEAttach failed: %d\n", rc);
392                 LASSERT (rc == -ENOMEM);
393                 return -ENOMEM;
394         }
395
396         md.threshold = 1;
397         md.user_ptr  = ev;
398         md.start     = buf;
399         md.length    = len;
400         md.options   = options;
401         md.eq_handle = srpc_data.rpc_lnet_eq;
402
403         rc = LNetMDAttach(meh, md, LNET_UNLINK, mdh);
404         if (rc != 0) {
405                 CERROR ("LNetMDAttach failed: %d\n", rc);
406                 LASSERT (rc == -ENOMEM);
407
408                 rc = LNetMEUnlink(meh);
409                 LASSERT (rc == 0);
410                 return -ENOMEM;
411         }
412
413         CDEBUG (D_NET,
414                 "Posted passive RDMA: peer %s, portal %d, matchbits "LPX64"\n",
415                 libcfs_id2str(peer), portal, matchbits);
416         return 0;
417 }
418
419 int
420 srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
421                       int options, lnet_process_id_t peer, lnet_nid_t self,
422                       lnet_handle_md_t *mdh, srpc_event_t *ev)
423 {
424         int       rc;
425         lnet_md_t md;
426
427         md.user_ptr  = ev;
428         md.start     = buf;
429         md.length    = len;
430         md.eq_handle = srpc_data.rpc_lnet_eq;
431         md.threshold = ((options & LNET_MD_OP_GET) != 0) ? 2 : 1;
432         md.options   = options & ~(LNET_MD_OP_PUT | LNET_MD_OP_GET);
433
434         rc = LNetMDBind(md, LNET_UNLINK, mdh);
435         if (rc != 0) {
436                 CERROR ("LNetMDBind failed: %d\n", rc);
437                 LASSERT (rc == -ENOMEM);
438                 return -ENOMEM;
439         }
440
441         /* this is kind of an abuse of the LNET_MD_OP_{PUT,GET} options.
442          * they're only meaningful for MDs attached to an ME (i.e. passive
443          * buffers... */
444         if ((options & LNET_MD_OP_PUT) != 0) {
445                 rc = LNetPut(self, *mdh, LNET_NOACK_REQ, peer,
446                              portal, matchbits, 0, 0);
447         } else {
448                 LASSERT ((options & LNET_MD_OP_GET) != 0);
449
450                 rc = LNetGet(self, *mdh, peer, portal, matchbits, 0);
451         }
452
453         if (rc != 0) {
454                 CERROR ("LNet%s(%s, %d, "LPD64") failed: %d\n",
455                         ((options & LNET_MD_OP_PUT) != 0) ? "Put" : "Get",
456                         libcfs_id2str(peer), portal, matchbits, rc);
457
458                 /* The forthcoming unlink event will complete this operation
459                  * with failure, so fall through and return success here.
460                  */
461                 rc = LNetMDUnlink(*mdh);
462                 LASSERT (rc == 0);
463         } else {
464                 CDEBUG (D_NET,
465                         "Posted active RDMA: peer %s, portal %u, matchbits "LPX64"\n",
466                         libcfs_id2str(peer), portal, matchbits);
467         }
468         return 0;
469 }
470
471 int
472 srpc_post_active_rqtbuf(lnet_process_id_t peer, int service, void *buf,
473                         int len, lnet_handle_md_t *mdh, srpc_event_t *ev)
474 {
475         return srpc_post_active_rdma(srpc_serv_portal(service), service,
476                                      buf, len, LNET_MD_OP_PUT, peer,
477                                      LNET_NID_ANY, mdh, ev);
478 }
479
480 int
481 srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
482                          lnet_handle_md_t *mdh, srpc_event_t *ev)
483 {
484         lnet_process_id_t any = {0};
485
486         any.nid = LNET_NID_ANY;
487         any.pid = LNET_PID_ANY;
488
489         return srpc_post_passive_rdma(srpc_serv_portal(service),
490                                       local, service, buf, len,
491                                       LNET_MD_OP_PUT, any, mdh, ev);
492 }
493
494 int
495 srpc_service_post_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
496 {
497         struct srpc_service     *sv = scd->scd_svc;
498         struct srpc_msg         *msg = &buf->buf_msg;
499         int                     rc;
500
501         LNetInvalidateHandle(&buf->buf_mdh);
502         cfs_list_add(&buf->buf_list, &scd->scd_buf_posted);
503         scd->scd_buf_nposted++;
504         cfs_spin_unlock(&scd->scd_lock);
505
506         rc = srpc_post_passive_rqtbuf(sv->sv_id,
507                                       !srpc_serv_is_framework(sv),
508                                       msg, sizeof(*msg), &buf->buf_mdh,
509                                       &scd->scd_ev);
510
511         /* At this point, a RPC (new or delayed) may have arrived in
512          * msg and its event handler has been called. So we must add
513          * buf to scd_buf_posted _before_ dropping scd_lock */
514
515         cfs_spin_lock(&scd->scd_lock);
516
517         if (rc == 0) {
518                 if (!sv->sv_shuttingdown)
519                         return 0;
520
521                 cfs_spin_unlock(&scd->scd_lock);
522                 /* srpc_shutdown_service might have tried to unlink me
523                  * when my buf_mdh was still invalid */
524                 LNetMDUnlink(buf->buf_mdh);
525                 cfs_spin_lock(&scd->scd_lock);
526                 return 0;
527         }
528
529         scd->scd_buf_nposted--;
530         if (sv->sv_shuttingdown)
531                 return rc; /* don't allow to change scd_buf_posted */
532
533         cfs_list_del(&buf->buf_list);
534         cfs_spin_unlock(&scd->scd_lock);
535
536         LIBCFS_FREE(buf, sizeof(*buf));
537
538         cfs_spin_lock(&scd->scd_lock);
539         return rc;
540 }
541
542 int
543 srpc_add_buffer(struct swi_workitem *wi)
544 {
545         struct srpc_service_cd  *scd = wi->swi_workitem.wi_data;
546         struct srpc_buffer      *buf;
547         int                     rc = 0;
548
549         /* it's called by workitem scheduler threads, these threads
550          * should have been set CPT affinity, so buffers will be posted
551          * on CPT local list of Portal */
552         cfs_spin_lock(&scd->scd_lock);
553
554         while (scd->scd_buf_adjust > 0 &&
555                !scd->scd_svc->sv_shuttingdown) {
556                 scd->scd_buf_adjust--; /* consume it */
557                 scd->scd_buf_posting++;
558
559                 cfs_spin_unlock(&scd->scd_lock);
560
561                 LIBCFS_ALLOC(buf, sizeof(*buf));
562                 if (buf == NULL) {
563                         CERROR("Failed to add new buf to service: %s\n",
564                                scd->scd_svc->sv_name);
565                         cfs_spin_lock(&scd->scd_lock);
566                         rc = -ENOMEM;
567                         break;
568                 }
569
570                 cfs_spin_lock(&scd->scd_lock);
571                 if (scd->scd_svc->sv_shuttingdown) {
572                         cfs_spin_unlock(&scd->scd_lock);
573                         LIBCFS_FREE(buf, sizeof(*buf));
574
575                         cfs_spin_lock(&scd->scd_lock);
576                         rc = -ESHUTDOWN;
577                         break;
578                 }
579
580                 rc = srpc_service_post_buffer(scd, buf);
581                 if (rc != 0)
582                         break; /* buf has been freed inside */
583
584                 LASSERT(scd->scd_buf_posting > 0);
585                 scd->scd_buf_posting--;
586                 scd->scd_buf_total++;
587                 scd->scd_buf_low = MAX(2, scd->scd_buf_total / 4);
588         }
589
590         if (rc != 0) {
591                 scd->scd_buf_err_stamp = cfs_time_current_sec();
592                 scd->scd_buf_err = rc;
593
594                 LASSERT(scd->scd_buf_posting > 0);
595                 scd->scd_buf_posting--;
596         }
597
598         cfs_spin_unlock(&scd->scd_lock);
599         return 0;
600 }
601
602 int
603 srpc_service_add_buffers(struct srpc_service *sv, int nbuffer)
604 {
605         struct srpc_service_cd  *scd;
606         int                     rc = 0;
607         int                     i;
608
609         LASSERTF(nbuffer > 0, "nbuffer must be positive: %d\n", nbuffer);
610
611         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
612                 cfs_spin_lock(&scd->scd_lock);
613
614                 scd->scd_buf_err = 0;
615                 scd->scd_buf_err_stamp = 0;
616                 scd->scd_buf_posting = 0;
617                 scd->scd_buf_adjust = nbuffer;
618                 /* start to post buffers */
619                 swi_schedule_workitem(&scd->scd_buf_wi);
620                 cfs_spin_unlock(&scd->scd_lock);
621
622                 /* framework service only post buffer for one partition  */
623                 if (srpc_serv_is_framework(sv))
624                         break;
625         }
626
627         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
628                 cfs_spin_lock(&scd->scd_lock);
629                 /*
630                  * NB: srpc_service_add_buffers() can be called inside
631                  * thread context of lst_sched_serial, and we don't normally
632                  * allow to sleep inside thread context of WI scheduler
633                  * because it will block current scheduler thread from doing
634                  * anything else, even worse, it could deadlock if it's
635                  * waiting on result from another WI of the same scheduler.
636                  * However, it's safe at here because scd_buf_wi is scheduled
637                  * by thread in a different WI scheduler (lst_sched_test),
638                  * so we don't have any risk of deadlock, though this could
639                  * block all WIs pending on lst_sched_serial for a moment
640                  * which is not good but not fatal.
641                  */
642                 lst_wait_until(scd->scd_buf_err != 0 ||
643                                (scd->scd_buf_adjust == 0 &&
644                                 scd->scd_buf_posting == 0),
645                                scd->scd_lock, "waiting for adding buffer\n");
646
647                 if (scd->scd_buf_err != 0 && rc == 0)
648                         rc = scd->scd_buf_err;
649
650                 cfs_spin_unlock(&scd->scd_lock);
651         }
652
653         return rc;
654 }
655
656 void
657 srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer)
658 {
659         struct srpc_service_cd  *scd;
660         int                     num;
661         int                     i;
662
663         LASSERT(!sv->sv_shuttingdown);
664
665         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
666                 cfs_spin_lock(&scd->scd_lock);
667
668                 num = scd->scd_buf_total + scd->scd_buf_posting;
669                 scd->scd_buf_adjust -= min(nbuffer, num);
670
671                 cfs_spin_unlock(&scd->scd_lock);
672         }
673 }
674
675 /* returns 1 if sv has finished, otherwise 0 */
676 int
677 srpc_finish_service(struct srpc_service *sv)
678 {
679         struct srpc_service_cd  *scd;
680         struct srpc_server_rpc  *rpc;
681         int                     i;
682
683         LASSERT(sv->sv_shuttingdown); /* srpc_shutdown_service called */
684
685         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
686                 cfs_spin_lock(&scd->scd_lock);
687                 if (!swi_deschedule_workitem(&scd->scd_buf_wi))
688                         return 0;
689
690                 if (scd->scd_buf_nposted > 0) {
691                         CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
692                                scd->scd_buf_nposted);
693                         cfs_spin_unlock(&scd->scd_lock);
694                         return 0;
695                 }
696
697                 if (cfs_list_empty(&scd->scd_rpc_active)) {
698                         cfs_spin_unlock(&scd->scd_lock);
699                         continue;
700                 }
701
702                 rpc = cfs_list_entry(scd->scd_rpc_active.next,
703                                      struct srpc_server_rpc, srpc_list);
704                 CNETERR("Active RPC %p on shutdown: sv %s, peer %s, "
705                         "wi %s scheduled %d running %d, "
706                         "ev fired %d type %d status %d lnet %d\n",
707                         rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
708                         swi_state2str(rpc->srpc_wi.swi_state),
709                         rpc->srpc_wi.swi_workitem.wi_scheduled,
710                         rpc->srpc_wi.swi_workitem.wi_running,
711                         rpc->srpc_ev.ev_fired, rpc->srpc_ev.ev_type,
712                         rpc->srpc_ev.ev_status, rpc->srpc_ev.ev_lnet);
713                 cfs_spin_unlock(&scd->scd_lock);
714                 return 0;
715         }
716
717         /* no lock needed from now on */
718         srpc_service_fini(sv);
719         return 1;
720 }
721
722 /* called with sv->sv_lock held */
723 void
724 srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
725 {
726         if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
727                 if (srpc_service_post_buffer(scd, buf) != 0) {
728                         CWARN("Failed to post %s buffer\n",
729                               scd->scd_svc->sv_name);
730                 }
731                 return;
732         }
733
734         /* service is shutting down, or we want to recycle some buffers */
735         scd->scd_buf_total--;
736
737         if (scd->scd_buf_adjust < 0) {
738                 scd->scd_buf_adjust++;
739                 if (scd->scd_buf_adjust < 0 &&
740                     scd->scd_buf_total == 0 && scd->scd_buf_posting == 0) {
741                         CDEBUG(D_INFO,
742                                "Try to recyle %d buffers but nothing left\n",
743                                scd->scd_buf_adjust);
744                         scd->scd_buf_adjust = 0;
745                 }
746         }
747
748         cfs_spin_unlock(&scd->scd_lock);
749         LIBCFS_FREE(buf, sizeof(*buf));
750         cfs_spin_lock(&scd->scd_lock);
751 }
752
753 void
754 srpc_abort_service(struct srpc_service *sv)
755 {
756         struct srpc_service_cd  *scd;
757         struct srpc_server_rpc  *rpc;
758         int                     i;
759
760         CDEBUG(D_NET, "Aborting service: id %d, name %s\n",
761                sv->sv_id, sv->sv_name);
762
763         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
764                 cfs_spin_lock(&scd->scd_lock);
765
766                 /* schedule in-flight RPCs to notice the abort, NB:
767                  * racing with incoming RPCs; complete fix should make test
768                  * RPCs carry session ID in its headers */
769                 cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list) {
770                         rpc->srpc_aborted = 1;
771                         swi_schedule_workitem(&rpc->srpc_wi);
772                 }
773
774                 cfs_spin_unlock(&scd->scd_lock);
775         }
776 }
777
778 void
779 srpc_shutdown_service(srpc_service_t *sv)
780 {
781         struct srpc_service_cd  *scd;
782         struct srpc_server_rpc  *rpc;
783         srpc_buffer_t           *buf;
784         int                     i;
785
786         CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
787                sv->sv_id, sv->sv_name);
788
789         cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
790                 cfs_spin_lock(&scd->scd_lock);
791
792         sv->sv_shuttingdown = 1; /* i.e. no new active RPC */
793
794         cfs_percpt_for_each(scd, i, sv->sv_cpt_data)
795                 cfs_spin_unlock(&scd->scd_lock);
796
797         cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
798                 cfs_spin_lock(&scd->scd_lock);
799
800                 /* schedule in-flight RPCs to notice the shutdown */
801                 cfs_list_for_each_entry(rpc, &scd->scd_rpc_active, srpc_list)
802                         swi_schedule_workitem(&rpc->srpc_wi);
803
804                 cfs_spin_unlock(&scd->scd_lock);
805
806                 /* OK to traverse scd_buf_posted without lock, since no one
807                  * touches scd_buf_posted now */
808                 cfs_list_for_each_entry(buf, &scd->scd_buf_posted, buf_list)
809                         LNetMDUnlink(buf->buf_mdh);
810         }
811 }
812
813 int
814 srpc_send_request (srpc_client_rpc_t *rpc)
815 {
816         srpc_event_t *ev = &rpc->crpc_reqstev;
817         int           rc;
818
819         ev->ev_fired = 0;
820         ev->ev_data  = rpc;
821         ev->ev_type  = SRPC_REQUEST_SENT;
822
823         rc = srpc_post_active_rqtbuf(rpc->crpc_dest, rpc->crpc_service,
824                                      &rpc->crpc_reqstmsg, sizeof(srpc_msg_t),
825                                      &rpc->crpc_reqstmdh, ev);
826         if (rc != 0) {
827                 LASSERT (rc == -ENOMEM);
828                 ev->ev_fired = 1;  /* no more event expected */
829         }
830         return rc;
831 }
832
833 int
834 srpc_prepare_reply (srpc_client_rpc_t *rpc)
835 {
836         srpc_event_t *ev = &rpc->crpc_replyev;
837         __u64        *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
838         int           rc;
839
840         ev->ev_fired = 0;
841         ev->ev_data  = rpc;
842         ev->ev_type  = SRPC_REPLY_RCVD;
843
844         *id = srpc_next_id();
845
846         rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
847                                     &rpc->crpc_replymsg, sizeof(srpc_msg_t),
848                                     LNET_MD_OP_PUT, rpc->crpc_dest,
849                                     &rpc->crpc_replymdh, ev);
850         if (rc != 0) {
851                 LASSERT (rc == -ENOMEM);
852                 ev->ev_fired = 1;  /* no more event expected */
853         }
854         return rc;
855 }
856
857 int
858 srpc_prepare_bulk (srpc_client_rpc_t *rpc)
859 {
860         srpc_bulk_t  *bk = &rpc->crpc_bulk;
861         srpc_event_t *ev = &rpc->crpc_bulkev;
862         __u64        *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
863         int           rc;
864         int           opt;
865
866         LASSERT (bk->bk_niov <= LNET_MAX_IOV);
867
868         if (bk->bk_niov == 0) return 0; /* nothing to do */
869
870         opt = bk->bk_sink ? LNET_MD_OP_PUT : LNET_MD_OP_GET;
871 #ifdef __KERNEL__
872         opt |= LNET_MD_KIOV;
873 #else
874         opt |= LNET_MD_IOVEC;
875 #endif
876
877         ev->ev_fired = 0;
878         ev->ev_data  = rpc;
879         ev->ev_type  = SRPC_BULK_REQ_RCVD;
880
881         *id = srpc_next_id();
882
883         rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
884                                     &bk->bk_iovs[0], bk->bk_niov, opt,
885                                     rpc->crpc_dest, &bk->bk_mdh, ev);
886         if (rc != 0) {
887                 LASSERT (rc == -ENOMEM);
888                 ev->ev_fired = 1;  /* no more event expected */
889         }
890         return rc;
891 }
892
893 int
894 srpc_do_bulk (srpc_server_rpc_t *rpc)
895 {
896         srpc_event_t  *ev = &rpc->srpc_ev;
897         srpc_bulk_t   *bk = rpc->srpc_bulk;
898         __u64          id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
899         int            rc;
900         int            opt;
901
902         LASSERT (bk != NULL);
903
904         opt = bk->bk_sink ? LNET_MD_OP_GET : LNET_MD_OP_PUT;
905 #ifdef __KERNEL__
906         opt |= LNET_MD_KIOV;
907 #else
908         opt |= LNET_MD_IOVEC;
909 #endif
910
911         ev->ev_fired = 0;
912         ev->ev_data  = rpc;
913         ev->ev_type  = bk->bk_sink ? SRPC_BULK_GET_RPLD : SRPC_BULK_PUT_SENT;
914
915         rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, id,
916                                    &bk->bk_iovs[0], bk->bk_niov, opt,
917                                    rpc->srpc_peer, rpc->srpc_self,
918                                    &bk->bk_mdh, ev);
919         if (rc != 0)
920                 ev->ev_fired = 1;  /* no more event expected */
921         return rc;
922 }
923
924 /* only called from srpc_handle_rpc */
925 void
926 srpc_server_rpc_done(srpc_server_rpc_t *rpc, int status)
927 {
928         struct srpc_service_cd  *scd = rpc->srpc_scd;
929         struct srpc_service     *sv  = scd->scd_svc;
930         srpc_buffer_t           *buffer;
931
932         LASSERT (status != 0 || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
933
934         rpc->srpc_status = status;
935
936         CDEBUG_LIMIT (status == 0 ? D_NET : D_NETERROR,
937                 "Server RPC %p done: service %s, peer %s, status %s:%d\n",
938                 rpc, sv->sv_name, libcfs_id2str(rpc->srpc_peer),
939                 swi_state2str(rpc->srpc_wi.swi_state), status);
940
941         if (status != 0) {
942                 cfs_spin_lock(&srpc_data.rpc_glock);
943                 srpc_data.rpc_counters.rpcs_dropped++;
944                 cfs_spin_unlock(&srpc_data.rpc_glock);
945         }
946
947         if (rpc->srpc_done != NULL)
948                 (*rpc->srpc_done) (rpc);
949         LASSERT (rpc->srpc_bulk == NULL);
950
951         cfs_spin_lock(&scd->scd_lock);
952
953         if (rpc->srpc_reqstbuf != NULL) {
954                 /* NB might drop sv_lock in srpc_service_recycle_buffer, but
955                  * sv won't go away for scd_rpc_active must not be empty */
956                 srpc_service_recycle_buffer(scd, rpc->srpc_reqstbuf);
957                 rpc->srpc_reqstbuf = NULL;
958         }
959
960         cfs_list_del(&rpc->srpc_list); /* from scd->scd_rpc_active */
961
962         /*
963          * No one can schedule me now since:
964          * - I'm not on scd_rpc_active.
965          * - all LNet events have been fired.
966          * Cancel pending schedules and prevent future schedule attempts:
967          */
968         LASSERT(rpc->srpc_ev.ev_fired);
969         swi_exit_workitem(&rpc->srpc_wi);
970
971         if (!sv->sv_shuttingdown && !cfs_list_empty(&scd->scd_buf_blocked)) {
972                 buffer = cfs_list_entry(scd->scd_buf_blocked.next,
973                                         srpc_buffer_t, buf_list);
974                 cfs_list_del(&buffer->buf_list);
975
976                 srpc_init_server_rpc(rpc, scd, buffer);
977                 cfs_list_add_tail(&rpc->srpc_list, &scd->scd_rpc_active);
978                 swi_schedule_workitem(&rpc->srpc_wi);
979         } else {
980                 cfs_list_add(&rpc->srpc_list, &scd->scd_rpc_free);
981         }
982
983         cfs_spin_unlock(&scd->scd_lock);
984         return;
985 }
986
987 /* handles an incoming RPC */
988 int
989 srpc_handle_rpc(swi_workitem_t *wi)
990 {
991         struct srpc_server_rpc  *rpc = wi->swi_workitem.wi_data;
992         struct srpc_service_cd  *scd = rpc->srpc_scd;
993         struct srpc_service     *sv = scd->scd_svc;
994         srpc_event_t            *ev = &rpc->srpc_ev;
995         int                     rc = 0;
996
997         LASSERT(wi == &rpc->srpc_wi);
998
999         cfs_spin_lock(&scd->scd_lock);
1000
1001         if (sv->sv_shuttingdown || rpc->srpc_aborted) {
1002                 cfs_spin_unlock(&scd->scd_lock);
1003
1004                 if (rpc->srpc_bulk != NULL)
1005                         LNetMDUnlink(rpc->srpc_bulk->bk_mdh);
1006                 LNetMDUnlink(rpc->srpc_replymdh);
1007
1008                 if (ev->ev_fired) { /* no more event, OK to finish */
1009                         srpc_server_rpc_done(rpc, -ESHUTDOWN);
1010                         return 1;
1011                 }
1012                 return 0;
1013         }
1014
1015         cfs_spin_unlock(&scd->scd_lock);
1016
1017         switch (wi->swi_state) {
1018         default:
1019                 LBUG ();
1020         case SWI_STATE_NEWBORN: {
1021                 srpc_msg_t           *msg;
1022                 srpc_generic_reply_t *reply;
1023
1024                 msg = &rpc->srpc_reqstbuf->buf_msg;
1025                 reply = &rpc->srpc_replymsg.msg_body.reply;
1026
1027                 if (msg->msg_magic == 0) {
1028                         /* moaned already in srpc_lnet_ev_handler */
1029                         rc = EBADMSG;
1030                 } else if (msg->msg_version != SRPC_MSG_VERSION &&
1031                            msg->msg_version != __swab32(SRPC_MSG_VERSION)) {
1032                         CWARN ("Version mismatch: %u, %u expected, from %s\n",
1033                                msg->msg_version, SRPC_MSG_VERSION,
1034                                libcfs_id2str(rpc->srpc_peer));
1035                         reply->status = EPROTO;
1036                 } else {
1037                         reply->status = 0;
1038                         rc = (*sv->sv_handler) (rpc);
1039                         LASSERT (reply->status == 0 || !rpc->srpc_bulk);
1040                 }
1041
1042                 if (rc != 0) {
1043                         srpc_server_rpc_done(rpc, rc);
1044                         return 1;
1045                 }
1046
1047                 wi->swi_state = SWI_STATE_BULK_STARTED;
1048
1049                 if (rpc->srpc_bulk != NULL) {
1050                         rc = srpc_do_bulk(rpc);
1051                         if (rc == 0)
1052                                 return 0; /* wait for bulk */
1053
1054                         LASSERT (ev->ev_fired);
1055                         ev->ev_status = rc;
1056                 }
1057         }
1058         case SWI_STATE_BULK_STARTED:
1059                 LASSERT (rpc->srpc_bulk == NULL || ev->ev_fired);
1060
1061                 if (rpc->srpc_bulk != NULL) {
1062                         rc = ev->ev_status;
1063
1064                         if (sv->sv_bulk_ready != NULL)
1065                                 rc = (*sv->sv_bulk_ready) (rpc, rc);
1066
1067                         if (rc != 0) {
1068                                 srpc_server_rpc_done(rpc, rc);
1069                                 return 1;
1070                         }
1071                 }
1072
1073                 wi->swi_state = SWI_STATE_REPLY_SUBMITTED;
1074                 rc = srpc_send_reply(rpc);
1075                 if (rc == 0)
1076                         return 0; /* wait for reply */
1077                 srpc_server_rpc_done(rpc, rc);
1078                 return 1;
1079
1080         case SWI_STATE_REPLY_SUBMITTED:
1081                 if (!ev->ev_fired) {
1082                         CERROR("RPC %p: bulk %p, service %d\n",
1083                                rpc, rpc->srpc_bulk, sv->sv_id);
1084                         CERROR("Event: status %d, type %d, lnet %d\n",
1085                                ev->ev_status, ev->ev_type, ev->ev_lnet);
1086                         LASSERT (ev->ev_fired);
1087                 }
1088
1089                 wi->swi_state = SWI_STATE_DONE;
1090                 srpc_server_rpc_done(rpc, ev->ev_status);
1091                 return 1;
1092         }
1093
1094         return 0;
1095 }
1096
1097 void
1098 srpc_client_rpc_expired (void *data)
1099 {
1100         srpc_client_rpc_t *rpc = data;
1101
1102         CWARN ("Client RPC expired: service %d, peer %s, timeout %d.\n",
1103                rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1104                rpc->crpc_timeout);
1105
1106         cfs_spin_lock(&rpc->crpc_lock);
1107
1108         rpc->crpc_timeout = 0;
1109         srpc_abort_rpc(rpc, -ETIMEDOUT);
1110
1111         cfs_spin_unlock(&rpc->crpc_lock);
1112
1113         cfs_spin_lock(&srpc_data.rpc_glock);
1114         srpc_data.rpc_counters.rpcs_expired++;
1115         cfs_spin_unlock(&srpc_data.rpc_glock);
1116         return;
1117 }
1118
1119 inline void
1120 srpc_add_client_rpc_timer (srpc_client_rpc_t *rpc)
1121 {
1122         stt_timer_t *timer = &rpc->crpc_timer;
1123
1124         if (rpc->crpc_timeout == 0) return;
1125
1126         CFS_INIT_LIST_HEAD(&timer->stt_list);
1127         timer->stt_data    = rpc;
1128         timer->stt_func    = srpc_client_rpc_expired;
1129         timer->stt_expires = cfs_time_add(rpc->crpc_timeout,
1130                                           cfs_time_current_sec());
1131         stt_add_timer(timer);
1132         return;
1133 }
1134
1135 /*
1136  * Called with rpc->crpc_lock held.
1137  *
1138  * Upon exit the RPC expiry timer is not queued and the handler is not
1139  * running on any CPU. */
1140 void
1141 srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
1142 {
1143         /* timer not planted or already exploded */
1144         if (rpc->crpc_timeout == 0) return;
1145
1146         /* timer sucessfully defused */
1147         if (stt_del_timer(&rpc->crpc_timer)) return;
1148
1149 #ifdef __KERNEL__
1150         /* timer detonated, wait for it to explode */
1151         while (rpc->crpc_timeout != 0) {
1152                 cfs_spin_unlock(&rpc->crpc_lock);
1153
1154                 cfs_schedule();
1155
1156                 cfs_spin_lock(&rpc->crpc_lock);
1157         }
1158 #else
1159         LBUG(); /* impossible in single-threaded runtime */
1160 #endif
1161         return;
1162 }
1163
1164 void
1165 srpc_client_rpc_done (srpc_client_rpc_t *rpc, int status)
1166 {
1167         swi_workitem_t *wi = &rpc->crpc_wi;
1168
1169         LASSERT (status != 0 || wi->swi_state == SWI_STATE_DONE);
1170
1171         cfs_spin_lock(&rpc->crpc_lock);
1172
1173         rpc->crpc_closed = 1;
1174         if (rpc->crpc_status == 0)
1175                 rpc->crpc_status = status;
1176
1177         srpc_del_client_rpc_timer(rpc);
1178
1179         CDEBUG_LIMIT ((status == 0) ? D_NET : D_NETERROR,
1180                 "Client RPC done: service %d, peer %s, status %s:%d:%d\n",
1181                 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1182                 swi_state2str(wi->swi_state), rpc->crpc_aborted, status);
1183
1184         /*
1185          * No one can schedule me now since:
1186          * - RPC timer has been defused.
1187          * - all LNet events have been fired.
1188          * - crpc_closed has been set, preventing srpc_abort_rpc from
1189          *   scheduling me.
1190          * Cancel pending schedules and prevent future schedule attempts:
1191          */
1192         LASSERT (!srpc_event_pending(rpc));
1193         swi_exit_workitem(wi);
1194
1195         cfs_spin_unlock(&rpc->crpc_lock);
1196
1197         (*rpc->crpc_done)(rpc);
1198         return;
1199 }
1200
1201 /* sends an outgoing RPC */
1202 int
1203 srpc_send_rpc (swi_workitem_t *wi)
1204 {
1205         int                rc = 0;
1206         srpc_client_rpc_t *rpc = wi->swi_workitem.wi_data;
1207         srpc_msg_t        *reply = &rpc->crpc_replymsg;
1208         int                do_bulk = rpc->crpc_bulk.bk_niov > 0;
1209
1210         LASSERT (rpc != NULL);
1211         LASSERT (wi == &rpc->crpc_wi);
1212
1213         cfs_spin_lock(&rpc->crpc_lock);
1214
1215         if (rpc->crpc_aborted) {
1216                 cfs_spin_unlock(&rpc->crpc_lock);
1217                 goto abort;
1218         }
1219
1220         cfs_spin_unlock(&rpc->crpc_lock);
1221
1222         switch (wi->swi_state) {
1223         default:
1224                 LBUG ();
1225         case SWI_STATE_NEWBORN:
1226                 LASSERT (!srpc_event_pending(rpc));
1227
1228                 rc = srpc_prepare_reply(rpc);
1229                 if (rc != 0) {
1230                         srpc_client_rpc_done(rpc, rc);
1231                         return 1;
1232                 }
1233
1234                 rc = srpc_prepare_bulk(rpc);
1235                 if (rc != 0) break;
1236
1237                 wi->swi_state = SWI_STATE_REQUEST_SUBMITTED;
1238                 rc = srpc_send_request(rpc);
1239                 break;
1240
1241         case SWI_STATE_REQUEST_SUBMITTED:
1242                 /* CAVEAT EMPTOR: rqtev, rpyev, and bulkev may come in any
1243                  * order; however, they're processed in a strict order:
1244                  * rqt, rpy, and bulk. */
1245                 if (!rpc->crpc_reqstev.ev_fired) break;
1246
1247                 rc = rpc->crpc_reqstev.ev_status;
1248                 if (rc != 0) break;
1249
1250                 wi->swi_state = SWI_STATE_REQUEST_SENT;
1251                 /* perhaps more events, fall thru */
1252         case SWI_STATE_REQUEST_SENT: {
1253                 srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
1254
1255                 if (!rpc->crpc_replyev.ev_fired) break;
1256
1257                 rc = rpc->crpc_replyev.ev_status;
1258                 if (rc != 0) break;
1259
1260                 if ((reply->msg_type != type &&
1261                      reply->msg_type != __swab32(type)) ||
1262                     (reply->msg_magic != SRPC_MSG_MAGIC &&
1263                      reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1264                         CWARN ("Bad message from %s: type %u (%d expected),"
1265                                " magic %u (%d expected).\n",
1266                                libcfs_id2str(rpc->crpc_dest),
1267                                reply->msg_type, type,
1268                                reply->msg_magic, SRPC_MSG_MAGIC);
1269                         rc = -EBADMSG;
1270                         break;
1271                 }
1272
1273                 if (do_bulk && reply->msg_body.reply.status != 0) {
1274                         CWARN ("Remote error %d at %s, unlink bulk buffer in "
1275                                "case peer didn't initiate bulk transfer\n",
1276                                reply->msg_body.reply.status,
1277                                libcfs_id2str(rpc->crpc_dest));
1278                         LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1279                 }
1280
1281                 wi->swi_state = SWI_STATE_REPLY_RECEIVED;
1282         }
1283         case SWI_STATE_REPLY_RECEIVED:
1284                 if (do_bulk && !rpc->crpc_bulkev.ev_fired) break;
1285
1286                 rc = do_bulk ? rpc->crpc_bulkev.ev_status : 0;
1287
1288                 /* Bulk buffer was unlinked due to remote error. Clear error
1289                  * since reply buffer still contains valid data.
1290                  * NB rpc->crpc_done shouldn't look into bulk data in case of
1291                  * remote error. */
1292                 if (do_bulk && rpc->crpc_bulkev.ev_lnet == LNET_EVENT_UNLINK &&
1293                     rpc->crpc_status == 0 && reply->msg_body.reply.status != 0)
1294                         rc = 0;
1295
1296                 wi->swi_state = SWI_STATE_DONE;
1297                 srpc_client_rpc_done(rpc, rc);
1298                 return 1;
1299         }
1300
1301         if (rc != 0) {
1302                 cfs_spin_lock(&rpc->crpc_lock);
1303                 srpc_abort_rpc(rpc, rc);
1304                 cfs_spin_unlock(&rpc->crpc_lock);
1305         }
1306
1307 abort:
1308         if (rpc->crpc_aborted) {
1309                 LNetMDUnlink(rpc->crpc_reqstmdh);
1310                 LNetMDUnlink(rpc->crpc_replymdh);
1311                 LNetMDUnlink(rpc->crpc_bulk.bk_mdh);
1312
1313                 if (!srpc_event_pending(rpc)) {
1314                         srpc_client_rpc_done(rpc, -EINTR);
1315                         return 1;
1316                 }
1317         }
1318         return 0;
1319 }
1320
1321 srpc_client_rpc_t *
1322 srpc_create_client_rpc (lnet_process_id_t peer, int service,
1323                         int nbulkiov, int bulklen,
1324                         void (*rpc_done)(srpc_client_rpc_t *),
1325                         void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
1326 {
1327         srpc_client_rpc_t *rpc;
1328
1329         LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
1330                                    crpc_bulk.bk_iovs[nbulkiov]));
1331         if (rpc == NULL)
1332                 return NULL;
1333
1334         srpc_init_client_rpc(rpc, peer, service, nbulkiov,
1335                              bulklen, rpc_done, rpc_fini, priv);
1336         return rpc;
1337 }
1338
1339 /* called with rpc->crpc_lock held */
1340 void
1341 srpc_abort_rpc (srpc_client_rpc_t *rpc, int why)
1342 {
1343         LASSERT (why != 0);
1344
1345         if (rpc->crpc_aborted || /* already aborted */
1346             rpc->crpc_closed)    /* callback imminent */
1347                 return;
1348
1349         CDEBUG (D_NET,
1350                 "Aborting RPC: service %d, peer %s, state %s, why %d\n",
1351                 rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
1352                 swi_state2str(rpc->crpc_wi.swi_state), why);
1353
1354         rpc->crpc_aborted = 1;
1355         rpc->crpc_status  = why;
1356         swi_schedule_workitem(&rpc->crpc_wi);
1357         return;
1358 }
1359
1360 /* called with rpc->crpc_lock held */
1361 void
1362 srpc_post_rpc (srpc_client_rpc_t *rpc)
1363 {
1364         LASSERT (!rpc->crpc_aborted);
1365         LASSERT (srpc_data.rpc_state == SRPC_STATE_RUNNING);
1366         LASSERT ((rpc->crpc_bulk.bk_len & ~CFS_PAGE_MASK) == 0);
1367
1368         CDEBUG (D_NET, "Posting RPC: peer %s, service %d, timeout %d\n",
1369                 libcfs_id2str(rpc->crpc_dest), rpc->crpc_service,
1370                 rpc->crpc_timeout);
1371
1372         srpc_add_client_rpc_timer(rpc);
1373         swi_schedule_workitem(&rpc->crpc_wi);
1374         return;
1375 }
1376
1377
1378 int
1379 srpc_send_reply(struct srpc_server_rpc *rpc)
1380 {
1381         srpc_event_t            *ev = &rpc->srpc_ev;
1382         struct srpc_msg         *msg = &rpc->srpc_replymsg;
1383         struct srpc_buffer      *buffer = rpc->srpc_reqstbuf;
1384         struct srpc_service_cd  *scd = rpc->srpc_scd;
1385         struct srpc_service     *sv = scd->scd_svc;
1386         __u64                   rpyid;
1387         int                     rc;
1388
1389         LASSERT(buffer != NULL);
1390         rpyid = buffer->buf_msg.msg_body.reqst.rpyid;
1391
1392         cfs_spin_lock(&scd->scd_lock);
1393
1394         if (!sv->sv_shuttingdown && !srpc_serv_is_framework(sv)) {
1395                 /* Repost buffer before replying since test client
1396                  * might send me another RPC once it gets the reply */
1397                 if (srpc_service_post_buffer(scd, buffer) != 0)
1398                         CWARN("Failed to repost %s buffer\n", sv->sv_name);
1399                 rpc->srpc_reqstbuf = NULL;
1400         }
1401
1402         cfs_spin_unlock(&scd->scd_lock);
1403
1404         ev->ev_fired = 0;
1405         ev->ev_data  = rpc;
1406         ev->ev_type  = SRPC_REPLY_SENT;
1407
1408         msg->msg_magic   = SRPC_MSG_MAGIC;
1409         msg->msg_version = SRPC_MSG_VERSION;
1410         msg->msg_type    = srpc_service2reply(sv->sv_id);
1411
1412         rc = srpc_post_active_rdma(SRPC_RDMA_PORTAL, rpyid, msg,
1413                                    sizeof(*msg), LNET_MD_OP_PUT,
1414                                    rpc->srpc_peer, rpc->srpc_self,
1415                                    &rpc->srpc_replymdh, ev);
1416         if (rc != 0)
1417                 ev->ev_fired = 1;  /* no more event expected */
1418         return rc;
1419 }
1420
1421 /* when in kernel always called with LNET_LOCK() held, and in thread context */
1422 void
1423 srpc_lnet_ev_handler(lnet_event_t *ev)
1424 {
1425         struct srpc_service_cd  *scd;
1426         srpc_event_t      *rpcev = ev->md.user_ptr;
1427         srpc_client_rpc_t *crpc;
1428         srpc_server_rpc_t *srpc;
1429         srpc_buffer_t     *buffer;
1430         srpc_service_t    *sv;
1431         srpc_msg_t        *msg;
1432         srpc_msg_type_t    type;
1433
1434         LASSERT (!cfs_in_interrupt());
1435
1436         if (ev->status != 0) {
1437                 cfs_spin_lock(&srpc_data.rpc_glock);
1438                 srpc_data.rpc_counters.errors++;
1439                 cfs_spin_unlock(&srpc_data.rpc_glock);
1440         }
1441
1442         rpcev->ev_lnet = ev->type;
1443
1444         switch (rpcev->ev_type) {
1445         default:
1446                 CERROR("Unknown event: status %d, type %d, lnet %d\n",
1447                        rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1448                 LBUG ();
1449         case SRPC_REQUEST_SENT:
1450                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1451                         cfs_spin_lock(&srpc_data.rpc_glock);
1452                         srpc_data.rpc_counters.rpcs_sent++;
1453                         cfs_spin_unlock(&srpc_data.rpc_glock);
1454                 }
1455         case SRPC_REPLY_RCVD:
1456         case SRPC_BULK_REQ_RCVD:
1457                 crpc = rpcev->ev_data;
1458
1459                 if (rpcev != &crpc->crpc_reqstev &&
1460                     rpcev != &crpc->crpc_replyev &&
1461                     rpcev != &crpc->crpc_bulkev) {
1462                         CERROR("rpcev %p, crpc %p, reqstev %p, replyev %p, bulkev %p\n",
1463                                rpcev, crpc, &crpc->crpc_reqstev,
1464                                &crpc->crpc_replyev, &crpc->crpc_bulkev);
1465                         CERROR("Bad event: status %d, type %d, lnet %d\n",
1466                                rpcev->ev_status, rpcev->ev_type, rpcev->ev_lnet);
1467                         LBUG ();
1468                 }
1469
1470                 cfs_spin_lock(&crpc->crpc_lock);
1471
1472                 LASSERT (rpcev->ev_fired == 0);
1473                 rpcev->ev_fired  = 1;
1474                 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1475                                                 -EINTR : ev->status;
1476                 swi_schedule_workitem(&crpc->crpc_wi);
1477
1478                 cfs_spin_unlock(&crpc->crpc_lock);
1479                 break;
1480
1481         case SRPC_REQUEST_RCVD:
1482                 scd = rpcev->ev_data;
1483                 sv = scd->scd_svc;
1484
1485                 LASSERT(rpcev == &scd->scd_ev);
1486
1487                 cfs_spin_lock(&scd->scd_lock);
1488
1489                 LASSERT (ev->unlinked);
1490                 LASSERT (ev->type == LNET_EVENT_PUT ||
1491                          ev->type == LNET_EVENT_UNLINK);
1492                 LASSERT (ev->type != LNET_EVENT_UNLINK ||
1493                          sv->sv_shuttingdown);
1494
1495                 buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
1496                 buffer->buf_peer = ev->initiator;
1497                 buffer->buf_self = ev->target.nid;
1498
1499                 LASSERT(scd->scd_buf_nposted > 0);
1500                 scd->scd_buf_nposted--;
1501
1502                 if (sv->sv_shuttingdown) {
1503                         /* Leave buffer on scd->scd_buf_nposted since
1504                          * srpc_finish_service needs to traverse it. */
1505                         cfs_spin_unlock(&scd->scd_lock);
1506                         break;
1507                 }
1508
1509                 if (scd->scd_buf_err_stamp != 0 &&
1510                     scd->scd_buf_err_stamp < cfs_time_current_sec()) {
1511                         /* re-enable adding buffer */
1512                         scd->scd_buf_err_stamp = 0;
1513                         scd->scd_buf_err = 0;
1514                 }
1515
1516                 if (scd->scd_buf_err == 0 && /* adding buffer is enabled */
1517                     scd->scd_buf_adjust == 0 &&
1518                     scd->scd_buf_nposted < scd->scd_buf_low) {
1519                         scd->scd_buf_adjust = MAX(scd->scd_buf_total / 2,
1520                                                   SFW_TEST_WI_MIN);
1521                         swi_schedule_workitem(&scd->scd_buf_wi);
1522                 }
1523
1524                 cfs_list_del(&buffer->buf_list); /* from scd->scd_buf_posted */
1525                 msg = &buffer->buf_msg;
1526                 type = srpc_service2request(sv->sv_id);
1527
1528                 if (ev->status != 0 || ev->mlength != sizeof(*msg) ||
1529                     (msg->msg_type != type &&
1530                      msg->msg_type != __swab32(type)) ||
1531                     (msg->msg_magic != SRPC_MSG_MAGIC &&
1532                      msg->msg_magic != __swab32(SRPC_MSG_MAGIC))) {
1533                         CERROR ("Dropping RPC (%s) from %s: "
1534                                 "status %d mlength %d type %u magic %u.\n",
1535                                 sv->sv_name, libcfs_id2str(ev->initiator),
1536                                 ev->status, ev->mlength,
1537                                 msg->msg_type, msg->msg_magic);
1538
1539                         /* NB can't call srpc_service_recycle_buffer here since
1540                          * it may call LNetM[DE]Attach. The invalid magic tells
1541                          * srpc_handle_rpc to drop this RPC */
1542                         msg->msg_magic = 0;
1543                 }
1544
1545                 if (!cfs_list_empty(&scd->scd_rpc_free)) {
1546                         srpc = cfs_list_entry(scd->scd_rpc_free.next,
1547                                               struct srpc_server_rpc,
1548                                               srpc_list);
1549                         cfs_list_del(&srpc->srpc_list);
1550
1551                         srpc_init_server_rpc(srpc, scd, buffer);
1552                         cfs_list_add_tail(&srpc->srpc_list,
1553                                           &scd->scd_rpc_active);
1554                         swi_schedule_workitem(&srpc->srpc_wi);
1555                 } else {
1556                         cfs_list_add_tail(&buffer->buf_list,
1557                                           &scd->scd_buf_blocked);
1558                 }
1559
1560                 cfs_spin_unlock(&scd->scd_lock);
1561
1562                 cfs_spin_lock(&srpc_data.rpc_glock);
1563                 srpc_data.rpc_counters.rpcs_rcvd++;
1564                 cfs_spin_unlock(&srpc_data.rpc_glock);
1565                 break;
1566
1567         case SRPC_BULK_GET_RPLD:
1568                 LASSERT (ev->type == LNET_EVENT_SEND ||
1569                          ev->type == LNET_EVENT_REPLY ||
1570                          ev->type == LNET_EVENT_UNLINK);
1571
1572                 if (!ev->unlinked)
1573                         break; /* wait for final event */
1574
1575         case SRPC_BULK_PUT_SENT:
1576                 if (ev->status == 0 && ev->type != LNET_EVENT_UNLINK) {
1577                         cfs_spin_lock(&srpc_data.rpc_glock);
1578
1579                         if (rpcev->ev_type == SRPC_BULK_GET_RPLD)
1580                                 srpc_data.rpc_counters.bulk_get += ev->mlength;
1581                         else
1582                                 srpc_data.rpc_counters.bulk_put += ev->mlength;
1583
1584                         cfs_spin_unlock(&srpc_data.rpc_glock);
1585                 }
1586         case SRPC_REPLY_SENT:
1587                 srpc = rpcev->ev_data;
1588                 scd  = srpc->srpc_scd;
1589
1590                 LASSERT(rpcev == &srpc->srpc_ev);
1591
1592                 cfs_spin_lock(&scd->scd_lock);
1593
1594                 rpcev->ev_fired  = 1;
1595                 rpcev->ev_status = (ev->type == LNET_EVENT_UNLINK) ?
1596                                    -EINTR : ev->status;
1597                 swi_schedule_workitem(&srpc->srpc_wi);
1598
1599                 cfs_spin_unlock(&scd->scd_lock);
1600                 break;
1601         }
1602 }
1603
1604 #ifndef __KERNEL__
1605
1606 int
1607 srpc_check_event (int timeout)
1608 {
1609         lnet_event_t ev;
1610         int          rc;
1611         int          i;
1612
1613         rc = LNetEQPoll(&srpc_data.rpc_lnet_eq, 1,
1614                         timeout * 1000, &ev, &i);
1615         if (rc == 0) return 0;
1616
1617         LASSERT (rc == -EOVERFLOW || rc == 1);
1618
1619         /* We can't affort to miss any events... */
1620         if (rc == -EOVERFLOW) {
1621                 CERROR ("Dropped an event!!!\n");
1622                 abort();
1623         }
1624
1625         srpc_lnet_ev_handler(&ev);
1626         return 1;
1627 }
1628
1629 #endif
1630
1631 int
1632 srpc_startup (void)
1633 {
1634         int rc;
1635
1636         memset(&srpc_data, 0, sizeof(struct smoketest_rpc));
1637         cfs_spin_lock_init(&srpc_data.rpc_glock);
1638
1639         /* 1 second pause to avoid timestamp reuse */
1640         cfs_pause(cfs_time_seconds(1));
1641         srpc_data.rpc_matchbits = ((__u64) cfs_time_current_sec()) << 48;
1642
1643         srpc_data.rpc_state = SRPC_STATE_NONE;
1644
1645 #ifdef __KERNEL__
1646         rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
1647 #else
1648         if (the_lnet.ln_server_mode_flag)
1649                 rc = LNetNIInit(LUSTRE_SRV_LNET_PID);
1650         else
1651                 rc = LNetNIInit(getpid() | LNET_PID_USERFLAG);
1652 #endif
1653         if (rc < 0) {
1654                 CERROR ("LNetNIInit() has failed: %d\n", rc);
1655                 return rc;
1656         }
1657
1658         srpc_data.rpc_state = SRPC_STATE_NI_INIT;
1659
1660         LNetInvalidateHandle(&srpc_data.rpc_lnet_eq);
1661 #ifdef __KERNEL__
1662         rc = LNetEQAlloc(0, srpc_lnet_ev_handler, &srpc_data.rpc_lnet_eq);
1663 #else
1664         rc = LNetEQAlloc(10240, LNET_EQ_HANDLER_NONE, &srpc_data.rpc_lnet_eq);
1665 #endif
1666         if (rc != 0) {
1667                 CERROR("LNetEQAlloc() has failed: %d\n", rc);
1668                 goto bail;
1669         }
1670
1671         rc = LNetSetLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1672         LASSERT(rc == 0);
1673         rc = LNetSetLazyPortal(SRPC_REQUEST_PORTAL);
1674         LASSERT(rc == 0);
1675
1676         srpc_data.rpc_state = SRPC_STATE_EQ_INIT;
1677
1678         rc = stt_startup();
1679
1680 bail:
1681         if (rc != 0)
1682                 srpc_shutdown();
1683         else
1684                 srpc_data.rpc_state = SRPC_STATE_RUNNING;
1685
1686         return rc;
1687 }
1688
1689 void
1690 srpc_shutdown (void)
1691 {
1692         int i;
1693         int rc;
1694         int state;
1695
1696         state = srpc_data.rpc_state;
1697         srpc_data.rpc_state = SRPC_STATE_STOPPING;
1698
1699         switch (state) {
1700         default:
1701                 LBUG ();
1702         case SRPC_STATE_RUNNING:
1703                 cfs_spin_lock(&srpc_data.rpc_glock);
1704
1705                 for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
1706                         srpc_service_t *sv = srpc_data.rpc_services[i];
1707
1708                         LASSERTF (sv == NULL,
1709                                   "service not empty: id %d, name %s\n",
1710                                   i, sv->sv_name);
1711                 }
1712
1713                 cfs_spin_unlock(&srpc_data.rpc_glock);
1714
1715                 stt_shutdown();
1716
1717         case SRPC_STATE_EQ_INIT:
1718                 rc = LNetClearLazyPortal(SRPC_FRAMEWORK_REQUEST_PORTAL);
1719                 rc = LNetClearLazyPortal(SRPC_REQUEST_PORTAL);
1720                 LASSERT (rc == 0);
1721                 rc = LNetEQFree(srpc_data.rpc_lnet_eq);
1722                 LASSERT (rc == 0); /* the EQ should have no user by now */
1723
1724         case SRPC_STATE_NI_INIT:
1725                 LNetNIFini();
1726         }
1727
1728         return;
1729 }