Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lnet/klnds/o2iblnd/o2iblnd_cb.c
37  *
38  * Author: Eric Barton <eric@bartonsoftware.com>
39  */
40
41 #include "o2iblnd.h"
42
43 void
44 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
45 {
46         lnet_msg_t *lntmsg[2];
47         kib_net_t  *net = ni->ni_data;
48         int         rc;
49         int         i;
50
51         LASSERT (net != NULL);
52         LASSERT (!in_interrupt());
53         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
54         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
55         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer response */
56
57         kiblnd_unmap_tx(ni, tx);
58
59         /* tx may have up to 2 lnet msgs to finalise */
60         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
61         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
62         rc = tx->tx_status;
63
64         if (tx->tx_conn != NULL) {
65                 LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
66
67                 kiblnd_conn_decref(tx->tx_conn);
68                 tx->tx_conn = NULL;
69         }
70
71         tx->tx_nwrq = 0;
72         tx->tx_status = 0;
73
74         spin_lock(&net->ibn_tx_lock);
75
76         list_add(&tx->tx_list, &net->ibn_idle_txs);
77
78         spin_unlock(&net->ibn_tx_lock);
79
80         /* delay finalize until my descs have been freed */
81         for (i = 0; i < 2; i++) {
82                 if (lntmsg[i] == NULL)
83                         continue;
84
85                 lnet_finalize(ni, lntmsg[i], rc);
86         }
87 }
88
89 void
90 kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
91 {
92         kib_tx_t *tx;
93
94         while (!list_empty (txlist)) {
95                 tx = list_entry (txlist->next, kib_tx_t, tx_list);
96
97                 list_del(&tx->tx_list);
98                 /* complete now */
99                 tx->tx_waiting = 0;
100                 tx->tx_status = status;
101                 kiblnd_tx_done(ni, tx);
102         }
103 }
104
105 kib_tx_t *
106 kiblnd_get_idle_tx (lnet_ni_t *ni)
107 {
108         kib_net_t     *net = ni->ni_data;
109         kib_tx_t      *tx;
110
111         LASSERT (net != NULL);
112
113         spin_lock(&net->ibn_tx_lock);
114
115         if (list_empty(&net->ibn_idle_txs)) {
116                 spin_unlock(&net->ibn_tx_lock);
117                 return NULL;
118         }
119
120         tx = list_entry(net->ibn_idle_txs.next, kib_tx_t, tx_list);
121         list_del(&tx->tx_list);
122
123         /* Allocate a new completion cookie.  It might not be needed,
124          * but we've got a lock right now and we're unlikely to
125          * wrap... */
126         tx->tx_cookie = net->ibn_tx_next_cookie++;
127
128         spin_unlock(&net->ibn_tx_lock);
129
130         LASSERT (tx->tx_nwrq == 0);
131         LASSERT (!tx->tx_queued);
132         LASSERT (tx->tx_sending == 0);
133         LASSERT (!tx->tx_waiting);
134         LASSERT (tx->tx_status == 0);
135         LASSERT (tx->tx_conn == NULL);
136         LASSERT (tx->tx_lntmsg[0] == NULL);
137         LASSERT (tx->tx_lntmsg[1] == NULL);
138         LASSERT (tx->tx_u.fmr == NULL);
139         LASSERT (tx->tx_nfrags == 0);
140
141         return tx;
142 }
143
144 void
145 kiblnd_drop_rx (kib_rx_t *rx)
146 {
147         kib_conn_t         *conn = rx->rx_conn;
148         unsigned long       flags;
149         
150         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
151         LASSERT (conn->ibc_nrx > 0);
152         conn->ibc_nrx--;
153         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
154
155         kiblnd_conn_decref(conn);
156 }
157
158 int
159 kiblnd_post_rx (kib_rx_t *rx, int credit)
160 {
161         kib_conn_t         *conn = rx->rx_conn;
162         kib_net_t          *net = conn->ibc_peer->ibp_ni->ni_data;
163         struct ib_recv_wr  *bad_wrq = NULL;
164         struct ib_mr       *mr;
165         int                 rc;
166
167         LASSERT (net != NULL);
168         LASSERT (!in_interrupt());
169         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
170                  credit == IBLND_POSTRX_PEER_CREDIT ||
171                  credit == IBLND_POSTRX_RSRVD_CREDIT);
172
173         mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
174         LASSERT (mr != NULL);
175
176         rx->rx_sge.lkey   = mr->lkey;
177         rx->rx_sge.addr   = rx->rx_msgaddr;
178         rx->rx_sge.length = IBLND_MSG_SIZE;
179
180         rx->rx_wrq.next = NULL;
181         rx->rx_wrq.sg_list = &rx->rx_sge;
182         rx->rx_wrq.num_sge = 1;
183         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
184
185         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
186         LASSERT (rx->rx_nob >= 0);              /* not posted */
187
188         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
189                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
190                 return 0;
191         }
192
193         rx->rx_nob = -1;                        /* flag posted */
194
195         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
196         if (rc != 0) {
197                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
198                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
199                 rx->rx_nob = 0;
200         }
201
202         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
203                 return rc;
204
205         if (rc != 0) {
206                 kiblnd_close_conn(conn, rc);
207                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
208                 return rc;
209         }
210
211         if (credit == IBLND_POSTRX_NO_CREDIT)
212                 return 0;
213
214         spin_lock(&conn->ibc_lock);
215         if (credit == IBLND_POSTRX_PEER_CREDIT)
216                 conn->ibc_outstanding_credits++;
217         else
218                 conn->ibc_reserved_credits++;
219         spin_unlock(&conn->ibc_lock);
220
221         kiblnd_check_sends(conn);
222         return 0;
223 }
224
225 kib_tx_t *
226 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
227 {
228         struct list_head   *tmp;
229
230         list_for_each(tmp, &conn->ibc_active_txs) {
231                 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
232
233                 LASSERT (!tx->tx_queued);
234                 LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
235
236                 if (tx->tx_cookie != cookie)
237                         continue;
238
239                 if (tx->tx_waiting &&
240                     tx->tx_msg->ibm_type == txtype)
241                         return tx;
242
243                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
244                       tx->tx_waiting ? "" : "NOT ",
245                       tx->tx_msg->ibm_type, txtype);
246         }
247         return NULL;
248 }
249
250 void
251 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
252 {
253         kib_tx_t    *tx;
254         lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
255         int          idle;
256
257         spin_lock(&conn->ibc_lock);
258
259         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
260         if (tx == NULL) {
261                 spin_unlock(&conn->ibc_lock);
262
263                 CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
264                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
265                 kiblnd_close_conn(conn, -EPROTO);
266                 return;
267         }
268
269         if (tx->tx_status == 0) {               /* success so far */
270                 if (status < 0) {               /* failed? */
271                         tx->tx_status = status;
272                 } else if (txtype == IBLND_MSG_GET_REQ) {
273                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
274                 }
275         }
276
277         tx->tx_waiting = 0;
278
279         idle = !tx->tx_queued && (tx->tx_sending == 0);
280         if (idle)
281                 list_del(&tx->tx_list);
282
283         spin_unlock(&conn->ibc_lock);
284
285         if (idle)
286                 kiblnd_tx_done(ni, tx);
287 }
288
289 void
290 kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
291 {
292         lnet_ni_t   *ni = conn->ibc_peer->ibp_ni;
293         kib_tx_t    *tx = kiblnd_get_idle_tx(ni);
294
295         if (tx == NULL) {
296                 CERROR("Can't get tx for completion %x for %s\n",
297                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
298                 return;
299         }
300
301         tx->tx_msg->ibm_u.completion.ibcm_status = status;
302         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
303         kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
304
305         kiblnd_queue_tx(tx, conn);
306 }
307
308 void
309 kiblnd_handle_rx (kib_rx_t *rx)
310 {
311         kib_msg_t    *msg = rx->rx_msg;
312         kib_conn_t   *conn = rx->rx_conn;
313         lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
314         int           credits = msg->ibm_credits;
315         kib_tx_t     *tx;
316         int           rc = 0;
317         int           rc2;
318         int           post_credit;
319
320         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
321
322         CDEBUG (D_NET, "Received %x[%d] from %s\n",
323                 msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
324
325         if (credits != 0) {
326                 /* Have I received credits that will let me send? */
327                 spin_lock(&conn->ibc_lock);
328
329                 if (conn->ibc_credits + credits >
330                     IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
331                         rc2 = conn->ibc_credits;
332                         spin_unlock(&conn->ibc_lock);
333
334                         CERROR("Bad credits from %s: %d + %d > %d\n",
335                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
336                                rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
337
338                         kiblnd_close_conn(conn, -EPROTO);
339                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
340                         return;
341                 }
342
343                 conn->ibc_credits += credits;
344
345                 spin_unlock(&conn->ibc_lock);
346                 kiblnd_check_sends(conn);
347         }
348
349         switch (msg->ibm_type) {
350         default:
351                 CERROR("Bad IBLND message type %x from %s\n",
352                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
353                 post_credit = IBLND_POSTRX_NO_CREDIT;
354                 rc = -EPROTO;
355                 break;
356
357         case IBLND_MSG_NOOP:
358                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
359                         post_credit = IBLND_POSTRX_NO_CREDIT;
360                 else
361                         post_credit = IBLND_POSTRX_PEER_CREDIT;
362                 break;
363
364         case IBLND_MSG_IMMEDIATE:
365                 post_credit = IBLND_POSTRX_DONT_POST;
366                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
367                                 msg->ibm_srcnid, rx, 0);
368                 if (rc < 0)                     /* repost on error */
369                         post_credit = IBLND_POSTRX_PEER_CREDIT;
370                 break;
371
372         case IBLND_MSG_PUT_REQ:
373                 post_credit = IBLND_POSTRX_DONT_POST;
374                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
375                                 msg->ibm_srcnid, rx, 1);
376                 if (rc < 0)                     /* repost on error */
377                         post_credit = IBLND_POSTRX_PEER_CREDIT;
378                 break;
379
380         case IBLND_MSG_PUT_NAK:
381                 CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
382                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
383                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
384                                          msg->ibm_u.completion.ibcm_status,
385                                          msg->ibm_u.completion.ibcm_cookie);
386                 break;
387
388         case IBLND_MSG_PUT_ACK:
389                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
390
391                 spin_lock(&conn->ibc_lock);
392                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
393                                                    msg->ibm_u.putack.ibpam_src_cookie);
394                 if (tx != NULL)
395                         list_del(&tx->tx_list);
396                 spin_unlock(&conn->ibc_lock);
397
398                 if (tx == NULL) {
399                         CERROR("Unmatched PUT_ACK from %s\n",
400                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
401                         rc = -EPROTO;
402                         break;
403                 }
404
405                 LASSERT (tx->tx_waiting);
406                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
407                  * (a) I can overwrite tx_msg since my peer has received it!
408                  * (b) tx_waiting set tells tx_complete() it's not done. */
409
410                 tx->tx_nwrq = 0;                /* overwrite PUT_REQ */
411
412                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
413                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
414                                        &msg->ibm_u.putack.ibpam_rd,
415                                        msg->ibm_u.putack.ibpam_dst_cookie);
416                 if (rc2 < 0)
417                         CERROR("Can't setup rdma for PUT to %s: %d\n",
418                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
419
420                 spin_lock(&conn->ibc_lock);
421                 tx->tx_waiting = 0;             /* clear waiting and queue atomically */
422                 kiblnd_queue_tx_locked(tx, conn);
423                 spin_unlock(&conn->ibc_lock);
424                 break;
425
426         case IBLND_MSG_PUT_DONE:
427                 post_credit = IBLND_POSTRX_PEER_CREDIT;
428                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
429                                          msg->ibm_u.completion.ibcm_status,
430                                          msg->ibm_u.completion.ibcm_cookie);
431                 break;
432
433         case IBLND_MSG_GET_REQ:
434                 post_credit = IBLND_POSTRX_DONT_POST;
435                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
436                                 msg->ibm_srcnid, rx, 1);
437                 if (rc < 0)                     /* repost on error */
438                         post_credit = IBLND_POSTRX_PEER_CREDIT;
439                 break;
440
441         case IBLND_MSG_GET_DONE:
442                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
443                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
444                                          msg->ibm_u.completion.ibcm_status,
445                                          msg->ibm_u.completion.ibcm_cookie);
446                 break;
447         }
448
449         if (rc < 0)                             /* protocol error */
450                 kiblnd_close_conn(conn, rc);
451
452         if (post_credit != IBLND_POSTRX_DONT_POST)
453                 kiblnd_post_rx(rx, post_credit);
454 }
455
456 void
457 kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
458 {
459         kib_msg_t    *msg = rx->rx_msg;
460         kib_conn_t   *conn = rx->rx_conn;
461         lnet_ni_t    *ni = conn->ibc_peer->ibp_ni;
462         kib_net_t    *net = ni->ni_data;
463         int           rc;
464         int           err = -EIO;
465
466         LASSERT (net != NULL);
467         LASSERT (rx->rx_nob < 0);               /* was posted */
468         rx->rx_nob = 0;                         /* isn't now */
469
470         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
471                 goto ignore;
472
473         if (status != IB_WC_SUCCESS) {
474                 CDEBUG(D_NETERROR, "Rx from %s failed: %d\n",
475                        libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
476                 goto failed;
477         }
478
479         LASSERT (nob >= 0);
480         rx->rx_nob = nob;
481
482         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
483         if (rc != 0) {
484                 CERROR ("Error %d unpacking rx from %s\n",
485                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
486                 goto failed;
487         }
488
489         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
490             msg->ibm_dstnid != ni->ni_nid ||
491             msg->ibm_srcstamp != conn->ibc_incarnation ||
492             msg->ibm_dststamp != net->ibn_incarnation) {
493                 CERROR ("Stale rx from %s\n",
494                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
495                 err = -ESTALE;
496                 goto failed;
497         }
498
499         /* set time last known alive */
500         kiblnd_peer_alive(conn->ibc_peer);
501
502         /* racing with connection establishment/teardown! */
503
504         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
505                 rwlock_t      *g_lock = &kiblnd_data.kib_global_lock;
506                 unsigned long  flags;
507
508                 write_lock_irqsave(g_lock, flags);
509                 /* must check holding global lock to eliminate race */
510                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
511                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
512                         write_unlock_irqrestore(g_lock, flags);
513                         return;
514                 }
515                 write_unlock_irqrestore(g_lock, flags);
516         }
517         kiblnd_handle_rx(rx);
518         return;
519
520  failed:
521         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
522         kiblnd_close_conn(conn, err);
523  ignore:
524         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
525 }
526
527 struct page *
528 kiblnd_kvaddr_to_page (unsigned long vaddr)
529 {
530         struct page *page;
531
532         if (vaddr >= VMALLOC_START &&
533             vaddr < VMALLOC_END) {
534                 page = vmalloc_to_page ((void *)vaddr);
535                 LASSERT (page != NULL);
536                 return page;
537         }
538 #ifdef CONFIG_HIGHMEM
539         if (vaddr >= PKMAP_BASE &&
540             vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
541                 /* No highmem pages only used for bulk (kiov) I/O */
542                 CERROR("find page for address in highmem\n");
543                 LBUG();
544         }
545 #endif
546         page = virt_to_page (vaddr);
547         LASSERT (page != NULL);
548         return page;
549 }
550
551 static void
552 kiblnd_fmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
553 {
554         int     rc;
555
556         if (tx->tx_u.fmr == NULL)
557                 return;
558
559         rc = ib_fmr_pool_unmap(tx->tx_u.fmr);
560         LASSERT (rc == 0);
561
562         if (tx->tx_status != 0) {
563                 rc = ib_flush_fmr_pool(net->ibn_fmrpool);
564                 LASSERT (rc == 0);
565         }
566
567         tx->tx_u.fmr = NULL;
568 }
569
570 static int
571 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
572 {
573         struct ib_pool_fmr *fmr;
574         kib_dev_t          *ibdev  = net->ibn_dev;
575         __u64              *pages  = tx->tx_pages;
576         int                 npages;
577         int                 size;
578         int                 i;
579
580         for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
581                 for (size = 0; size <  rd->rd_frags[i].rf_nob;
582                                size += ibdev->ibd_page_size) {
583                         pages[npages ++] = (rd->rd_frags[i].rf_addr &
584                                             ibdev->ibd_page_mask) + size;
585                 }
586         }
587
588         fmr = ib_fmr_pool_map_phys(net->ibn_fmrpool, pages, npages, 0);
589
590         if (IS_ERR(fmr)) {
591                 CERROR ("Can't map %d pages: %ld\n", npages, PTR_ERR(fmr));
592                 return PTR_ERR(fmr);
593         }
594
595         /* If rd is not tx_rd, it's going to get sent to a peer, who will need
596          * the rkey */
597         rd->rd_key = (rd != tx->tx_rd) ? fmr->fmr->rkey :
598                                          fmr->fmr->lkey;
599         rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
600         rd->rd_frags[0].rf_nob   = nob;
601         rd->rd_nfrags = 1;
602
603         tx->tx_u.fmr = fmr;
604
605         return 0;
606 }
607
608 static void
609 kiblnd_pmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
610 {
611         if (tx->tx_u.pmr == NULL)
612                 return;
613
614         kiblnd_phys_mr_unmap(net, tx->tx_u.pmr);
615
616         tx->tx_u.pmr = NULL;
617 }
618
619 static int
620 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
621 {
622         kib_phys_mr_t      *pmr;
623         __u64               iova;
624
625         iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
626
627         pmr = kiblnd_phys_mr_map(net, rd, tx->tx_ipb, &iova);
628         if (pmr == NULL) {
629                 CERROR("Failed to create MR by phybuf\n");
630                 return -ENOMEM;
631         }
632
633         rd->rd_key = (rd != tx->tx_rd) ? pmr->ibpm_mr->rkey :
634                                          pmr->ibpm_mr->lkey;
635         rd->rd_nfrags = 1;
636         rd->rd_frags[0].rf_addr = iova;
637         rd->rd_frags[0].rf_nob  = nob;
638
639         tx->tx_u.pmr = pmr;
640
641         return 0;
642 }
643
644 void
645 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
646 {
647         kib_net_t  *net = ni->ni_data;
648
649         LASSERT (net != NULL);
650
651         if (net->ibn_fmrpool != NULL)
652                 kiblnd_fmr_unmap_tx(net, tx);
653         else if (net->ibn_pmrpool != NULL)
654                 kiblnd_pmr_unmap_tx(net, tx);
655
656         if (tx->tx_nfrags != 0) {
657                 kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
658                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
659                 tx->tx_nfrags = 0;
660         }
661 }
662
663 int
664 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
665               kib_rdma_desc_t *rd, int nfrags)
666 {
667         kib_net_t          *net   = ni->ni_data;
668         struct ib_mr       *mr    = NULL;
669         __u32               nob;
670         int                 i;
671
672         /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
673          * RDMA sink */
674         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
675         tx->tx_nfrags = nfrags;
676
677         rd->rd_nfrags =
678                 kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
679                                   tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
680
681         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
682                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
683                         net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
684                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
685                         net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
686                 nob += rd->rd_frags[i].rf_nob;
687         }
688
689         /* looking for pre-mapping MR */
690         mr = kiblnd_find_rd_dma_mr(net, rd);
691         if (mr != NULL) {
692                 /* found pre-mapping MR */
693                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
694                 return 0;
695         }
696
697         if (net->ibn_fmrpool != NULL)
698                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
699
700         if (net->ibn_pmrpool != NULL);
701                 return kiblnd_pmr_map_tx(net, tx, rd, nob);
702
703         return -EINVAL;
704 }
705
706
707 int
708 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
709                     unsigned int niov, struct iovec *iov, int offset, int nob)
710 {
711         kib_net_t          *net = ni->ni_data;
712         struct page        *page;
713         struct scatterlist *sg;
714         unsigned long       vaddr;
715         int                 fragnob;
716         int                 page_offset;
717
718         LASSERT (nob > 0);
719         LASSERT (niov > 0);
720         LASSERT (net != NULL);
721
722         while (offset >= iov->iov_len) {
723                 offset -= iov->iov_len;
724                 niov--;
725                 iov++;
726                 LASSERT (niov > 0);
727         }
728
729         sg = tx->tx_frags;
730         do {
731                 LASSERT (niov > 0);
732
733                 vaddr = ((unsigned long)iov->iov_base) + offset;
734                 page_offset = vaddr & (PAGE_SIZE - 1);
735                 page = kiblnd_kvaddr_to_page(vaddr);
736                 if (page == NULL) {
737                         CERROR ("Can't find page\n");
738                         return -EFAULT;
739                 }
740
741                 fragnob = min((int)(iov->iov_len - offset), nob);
742                 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
743
744                 sg_set_page(sg, page, fragnob, page_offset);
745                 sg++;
746
747                 if (offset + fragnob < iov->iov_len) {
748                         offset += fragnob;
749                 } else {
750                         offset = 0;
751                         iov++;
752                         niov--;
753                 }
754                 nob -= fragnob;
755         } while (nob > 0);
756
757         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
758 }
759
760 int
761 kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
762                       int nkiov, lnet_kiov_t *kiov, int offset, int nob)
763 {
764         kib_net_t          *net = ni->ni_data;
765         struct scatterlist *sg;
766         int                 fragnob;
767
768         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
769
770         LASSERT (nob > 0);
771         LASSERT (nkiov > 0);
772         LASSERT (net != NULL);
773
774         while (offset >= kiov->kiov_len) {
775                 offset -= kiov->kiov_len;
776                 nkiov--;
777                 kiov++;
778                 LASSERT (nkiov > 0);
779         }
780
781         sg = tx->tx_frags;
782         do {
783                 LASSERT (nkiov > 0);
784
785                 fragnob = min((int)(kiov->kiov_len - offset), nob);
786
787                 memset(sg, 0, sizeof(*sg));
788                 sg_set_page(sg, kiov->kiov_page, fragnob,
789                             kiov->kiov_offset + offset);
790                 sg++;
791
792                 offset = 0;
793                 kiov++;
794                 nkiov--;
795                 nob -= fragnob;
796         } while (nob > 0);
797
798         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
799 }
800
801 int
802 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
803 {
804         kib_msg_t         *msg = tx->tx_msg;
805         kib_peer_t        *peer = conn->ibc_peer;
806         int                ver = conn->ibc_version;
807         int                rc;
808         int                done;
809         struct ib_send_wr *bad_wrq;
810
811         LASSERT (tx->tx_queued);
812         /* We rely on this for QP sizing */
813         LASSERT (tx->tx_nwrq > 0);
814         LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
815
816         LASSERT (credit == 0 || credit == 1);
817         LASSERT (conn->ibc_outstanding_credits >= 0);
818         LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
819         LASSERT (conn->ibc_credits >= 0);
820         LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
821
822         if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
823                 /* tx completions outstanding... */
824                 CDEBUG(D_NET, "%s: posted enough\n",
825                        libcfs_nid2str(peer->ibp_nid));
826                 return -EAGAIN;
827         }
828
829         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
830                 CDEBUG(D_NET, "%s: no credits\n",
831                        libcfs_nid2str(peer->ibp_nid));
832                 return -EAGAIN;
833         }
834
835         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
836             conn->ibc_credits == 1 &&   /* last credit reserved for */
837             conn->ibc_outstanding_credits == 0) { /* giving back credits */
838                 CDEBUG(D_NET, "%s: not using last credit\n",
839                        libcfs_nid2str(peer->ibp_nid));
840                 return -EAGAIN;
841         }
842
843         /* NB don't drop ibc_lock before bumping tx_sending */
844         list_del(&tx->tx_list);
845         tx->tx_queued = 0;
846
847         if (msg->ibm_type == IBLND_MSG_NOOP &&
848             (!kiblnd_send_noop(conn) ||     /* redundant NOOP */
849              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
850               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
851                 /* OK to drop when posted enough NOOPs, since
852                  * kiblnd_check_sends will queue NOOP again when
853                  * posted NOOPs complete */
854                 spin_unlock(&conn->ibc_lock);
855                 kiblnd_tx_done(peer->ibp_ni, tx);
856                 spin_lock(&conn->ibc_lock);
857                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
858                        libcfs_nid2str(peer->ibp_nid),
859                        conn->ibc_noops_posted);
860                 return 0;
861         }
862
863         kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
864                         peer->ibp_nid, conn->ibc_incarnation);
865
866         conn->ibc_credits -= credit;
867         conn->ibc_outstanding_credits = 0;
868         conn->ibc_nsends_posted++;
869         if (msg->ibm_type == IBLND_MSG_NOOP)
870                 conn->ibc_noops_posted++;
871
872         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
873          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
874          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
875          * and then re-queued here.  It's (just) possible that
876          * tx_sending is non-zero if we've not done the tx_complete()
877          * from the first send; hence the ++ rather than = below. */
878         tx->tx_sending++;
879         list_add(&tx->tx_list, &conn->ibc_active_txs);
880
881         /* I'm still holding ibc_lock! */
882         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
883                 rc = -ECONNABORTED;
884         else
885                 rc = ib_post_send(conn->ibc_cmid->qp,
886                                   tx->tx_wrq, &bad_wrq);
887         conn->ibc_last_send = jiffies;
888
889         if (rc == 0)
890                 return 0;
891
892         /* NB credits are transferred in the actual
893          * message, which can only be the last work item */
894         conn->ibc_credits += credit;
895         conn->ibc_outstanding_credits += msg->ibm_credits;
896         conn->ibc_nsends_posted--;
897         if (msg->ibm_type == IBLND_MSG_NOOP)
898                 conn->ibc_noops_posted--;
899
900         tx->tx_status = rc;
901         tx->tx_waiting = 0;
902         tx->tx_sending--;
903
904         done = (tx->tx_sending == 0);
905         if (done)
906                 list_del(&tx->tx_list);
907
908         spin_unlock(&conn->ibc_lock);
909
910         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
911                 CERROR("Error %d posting transmit to %s\n",
912                        rc, libcfs_nid2str(peer->ibp_nid));
913         else
914                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
915                        rc, libcfs_nid2str(peer->ibp_nid));
916
917         kiblnd_close_conn(conn, rc);
918
919         if (done)
920                 kiblnd_tx_done(peer->ibp_ni, tx);
921         return -EIO;
922 }
923
924 void
925 kiblnd_check_sends (kib_conn_t *conn)
926 {
927         int        ver = conn->ibc_version;
928         lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
929         kib_tx_t  *tx;
930
931         /* Don't send anything until after the connection is established */
932         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
933                 CDEBUG(D_NET, "%s too soon\n",
934                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
935                 return;
936         }
937
938         spin_lock(&conn->ibc_lock);
939
940         LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
941         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
942                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
943         LASSERT (conn->ibc_reserved_credits >= 0);
944
945         while (conn->ibc_reserved_credits > 0 &&
946                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
947                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
948                                 kib_tx_t, tx_list);
949                 list_del(&tx->tx_list);
950                 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
951                 conn->ibc_reserved_credits--;
952         }
953
954         if (kiblnd_send_noop(conn)) {
955                 spin_unlock(&conn->ibc_lock);
956
957                 tx = kiblnd_get_idle_tx(ni);
958                 if (tx != NULL)
959                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
960
961                 spin_lock(&conn->ibc_lock);
962                 if (tx != NULL)
963                         kiblnd_queue_tx_locked(tx, conn);
964         }
965
966         for (;;) {
967                 int credit;
968
969                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
970                         credit = 0;
971                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
972                                         kib_tx_t, tx_list);
973                 } else if (!list_empty(&conn->ibc_tx_queue)) {
974                         credit = 1;
975                         tx = list_entry(conn->ibc_tx_queue.next,
976                                         kib_tx_t, tx_list);
977                 } else
978                         break;
979
980                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
981                         break;
982         }
983
984         spin_unlock(&conn->ibc_lock);
985 }
986
987 void
988 kiblnd_tx_complete (kib_tx_t *tx, int status)
989 {
990         int           failed = (status != IB_WC_SUCCESS);
991         kib_conn_t   *conn = tx->tx_conn;
992         int           idle;
993
994         LASSERT (tx->tx_sending > 0);
995
996         if (failed) {
997                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
998                         CDEBUG(D_NETERROR, "Tx -> %s cookie "LPX64
999                                " sending %d waiting %d: failed %d\n",
1000                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1001                                tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1002                                status);
1003
1004                 kiblnd_close_conn(conn, -EIO);
1005         } else {
1006                 kiblnd_peer_alive(conn->ibc_peer);
1007         }
1008
1009         spin_lock(&conn->ibc_lock);
1010
1011         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
1012          * gets to free it, which also drops its ref on 'conn'. */
1013
1014         tx->tx_sending--;
1015         conn->ibc_nsends_posted--;
1016         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1017                 conn->ibc_noops_posted--;
1018
1019         if (failed) {
1020                 tx->tx_waiting = 0;             /* don't wait for peer */
1021                 tx->tx_status = -EIO;
1022         }
1023
1024         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1025                !tx->tx_waiting &&               /* Not waiting for peer */
1026                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1027         if (idle)
1028                 list_del(&tx->tx_list);
1029
1030         kiblnd_conn_addref(conn);               /* 1 ref for me.... */
1031
1032         spin_unlock(&conn->ibc_lock);
1033
1034         if (idle)
1035                 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
1036
1037         kiblnd_check_sends(conn);
1038
1039         kiblnd_conn_decref(conn);               /* ...until here */
1040 }
1041
1042 void
1043 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1044 {
1045         kib_net_t         *net = ni->ni_data;
1046         struct ib_sge     *sge = &tx->tx_sge[tx->tx_nwrq];
1047         struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
1048         int                nob = offsetof (kib_msg_t, ibm_u) + body_nob;
1049         struct ib_mr      *mr;
1050
1051         LASSERT (net != NULL);
1052         LASSERT (tx->tx_nwrq >= 0);
1053         LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1054         LASSERT (nob <= IBLND_MSG_SIZE);
1055
1056         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1057
1058         mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
1059         LASSERT (mr != NULL);
1060
1061         sge->lkey   = mr->lkey;
1062         sge->addr   = tx->tx_msgaddr;
1063         sge->length = nob;
1064
1065         memset(wrq, 0, sizeof(*wrq));
1066
1067         wrq->next       = NULL;
1068         wrq->wr_id      = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1069         wrq->sg_list    = sge;
1070         wrq->num_sge    = 1;
1071         wrq->opcode     = IB_WR_SEND;
1072         wrq->send_flags = IB_SEND_SIGNALED;
1073
1074         tx->tx_nwrq++;
1075 }
1076
1077 int
1078 kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1079                   int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
1080 {
1081         kib_msg_t         *ibmsg = tx->tx_msg;
1082         kib_rdma_desc_t   *srcrd = tx->tx_rd;
1083         struct ib_sge     *sge = &tx->tx_sge[0];
1084         struct ib_send_wr *wrq = &tx->tx_wrq[0];
1085         int                rc  = resid;
1086         int                srcidx;
1087         int                dstidx;
1088         int                wrknob;
1089
1090         LASSERT (!in_interrupt());
1091         LASSERT (tx->tx_nwrq == 0);
1092         LASSERT (type == IBLND_MSG_GET_DONE ||
1093                  type == IBLND_MSG_PUT_DONE);
1094
1095         srcidx = dstidx = 0;
1096
1097         while (resid > 0) {
1098                 if (srcidx >= srcrd->rd_nfrags) {
1099                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1100                         rc = -EPROTO;
1101                         break;
1102                 }
1103
1104                 if (dstidx == dstrd->rd_nfrags) {
1105                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1106                         rc = -EPROTO;
1107                         break;
1108                 }
1109
1110                 if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
1111                         CERROR("RDMA too fragmented for %s (%d): "
1112                                "%d/%d src %d/%d dst frags\n",
1113                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1114                                IBLND_RDMA_FRAGS(conn->ibc_version),
1115                                srcidx, srcrd->rd_nfrags,
1116                                dstidx, dstrd->rd_nfrags);
1117                         rc = -EMSGSIZE;
1118                         break;
1119                 }
1120
1121                 wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1122                                  kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1123
1124                 sge = &tx->tx_sge[tx->tx_nwrq];
1125                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1126                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1127                 sge->length = wrknob;
1128
1129                 wrq = &tx->tx_wrq[tx->tx_nwrq];
1130
1131                 wrq->next       = wrq + 1;
1132                 wrq->wr_id      = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1133                 wrq->sg_list    = sge;
1134                 wrq->num_sge    = 1;
1135                 wrq->opcode     = IB_WR_RDMA_WRITE;
1136                 wrq->send_flags = 0;
1137
1138                 wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
1139                 wrq->wr.rdma.rkey        = kiblnd_rd_frag_key(dstrd, dstidx);
1140
1141                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
1142                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
1143
1144                 resid -= wrknob;
1145
1146                 tx->tx_nwrq++;
1147                 wrq++;
1148                 sge++;
1149         }
1150
1151         if (rc < 0)                             /* no RDMA if completing with failure */
1152                 tx->tx_nwrq = 0;
1153
1154         ibmsg->ibm_u.completion.ibcm_status = rc;
1155         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1156         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1157                            type, sizeof (kib_completion_msg_t));
1158
1159         return rc;
1160 }
1161
1162 void
1163 kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
1164 {
1165         struct list_head   *q;
1166
1167         LASSERT (tx->tx_nwrq > 0);              /* work items set up */
1168         LASSERT (!tx->tx_queued);               /* not queued for sending already */
1169         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1170
1171         tx->tx_queued = 1;
1172         tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
1173
1174         if (tx->tx_conn == NULL) {
1175                 kiblnd_conn_addref(conn);
1176                 tx->tx_conn = conn;
1177                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1178         } else {
1179                 /* PUT_DONE first attached to conn as a PUT_REQ */
1180                 LASSERT (tx->tx_conn == conn);
1181                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1182         }
1183
1184         switch (tx->tx_msg->ibm_type) {
1185         default:
1186                 LBUG();
1187
1188         case IBLND_MSG_PUT_REQ:
1189         case IBLND_MSG_GET_REQ:
1190                 q = &conn->ibc_tx_queue_rsrvd;
1191                 break;
1192
1193         case IBLND_MSG_PUT_NAK:
1194         case IBLND_MSG_PUT_ACK:
1195         case IBLND_MSG_PUT_DONE:
1196         case IBLND_MSG_GET_DONE:
1197                 q = &conn->ibc_tx_queue_nocred;
1198                 break;
1199
1200         case IBLND_MSG_NOOP:
1201                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1202                         q = &conn->ibc_tx_queue_nocred;
1203                 else
1204                         q = &conn->ibc_tx_queue;
1205                 break;
1206
1207         case IBLND_MSG_IMMEDIATE:
1208                 q = &conn->ibc_tx_queue;
1209                 break;
1210         }
1211
1212         list_add_tail(&tx->tx_list, q);
1213 }
1214
1215 void
1216 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1217 {
1218         spin_lock(&conn->ibc_lock);
1219         kiblnd_queue_tx_locked(tx, conn);
1220         spin_unlock(&conn->ibc_lock);
1221
1222         kiblnd_check_sends(conn);
1223 }
1224
1225 void
1226 kiblnd_connect_peer (kib_peer_t *peer)
1227 {
1228         struct rdma_cm_id *cmid;
1229         kib_net_t         *net = peer->ibp_ni->ni_data;
1230         struct sockaddr_in srcaddr;
1231         struct sockaddr_in dstaddr;
1232         int                rc;
1233
1234         LASSERT (net != NULL);
1235         LASSERT (peer->ibp_connecting > 0);
1236
1237         cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
1238         if (IS_ERR(cmid)) {
1239                 CERROR("Can't create CMID for %s: %ld\n",
1240                        libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
1241                 rc = PTR_ERR(cmid);
1242                 goto failed;
1243         }
1244
1245         memset(&srcaddr, 0, sizeof(srcaddr));
1246         srcaddr.sin_family = AF_INET;
1247         srcaddr.sin_addr.s_addr = htonl(net->ibn_dev->ibd_ifip);
1248
1249         memset(&dstaddr, 0, sizeof(dstaddr));
1250         dstaddr.sin_family = AF_INET;
1251         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1252         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
1253
1254         kiblnd_peer_addref(peer);               /* cmid's ref */
1255
1256         rc = rdma_resolve_addr(cmid,
1257                                (struct sockaddr *)&srcaddr,
1258                                (struct sockaddr *)&dstaddr,
1259                                *kiblnd_tunables.kib_timeout * 1000);
1260         if (rc == 0)
1261                 return;
1262
1263         /* Can't initiate address resolution:  */
1264         CERROR("Can't resolve addr for %s: %d\n",
1265                libcfs_nid2str(peer->ibp_nid), rc);
1266
1267         kiblnd_peer_decref(peer);               /* cmid's ref */
1268         rdma_destroy_id(cmid);
1269  failed:
1270         kiblnd_peer_connect_failed(peer, 1, rc);
1271 }
1272
1273 void
1274 kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
1275 {
1276         kib_peer_t        *peer;
1277         kib_peer_t        *peer2;
1278         kib_conn_t        *conn;
1279         rwlock_t          *g_lock = &kiblnd_data.kib_global_lock;
1280         unsigned long      flags;
1281         int                rc;
1282
1283         /* If I get here, I've committed to send, so I complete the tx with
1284          * failure on any problems */
1285
1286         LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1287         LASSERT (tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
1288
1289         /* First time, just use a read lock since I expect to find my peer
1290          * connected */
1291         read_lock_irqsave(g_lock, flags);
1292
1293         peer = kiblnd_find_peer_locked(nid);
1294         if (peer != NULL && !list_empty(&peer->ibp_conns)) {
1295                 /* Found a peer with an established connection */
1296                 conn = kiblnd_get_conn_locked(peer);
1297                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1298
1299                 read_unlock_irqrestore(g_lock, flags);
1300
1301                 if (tx != NULL)
1302                         kiblnd_queue_tx(tx, conn);
1303                 kiblnd_conn_decref(conn); /* ...to here */
1304                 return;
1305         }
1306
1307         read_unlock(g_lock);
1308         /* Re-try with a write lock */
1309         write_lock(g_lock);
1310
1311         peer = kiblnd_find_peer_locked(nid);
1312         if (peer != NULL) {
1313                 if (list_empty(&peer->ibp_conns)) {
1314                         /* found a peer, but it's still connecting... */
1315                         LASSERT (peer->ibp_connecting != 0 ||
1316                                  peer->ibp_accepting != 0);
1317                         if (tx != NULL)
1318                                 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1319                         write_unlock_irqrestore(g_lock, flags);
1320                 } else {
1321                         conn = kiblnd_get_conn_locked(peer);
1322                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1323
1324                         write_unlock_irqrestore(g_lock, flags);
1325
1326                         if (tx != NULL)
1327                                 kiblnd_queue_tx(tx, conn);
1328                         kiblnd_conn_decref(conn); /* ...to here */
1329                 }
1330                 return;
1331         }
1332
1333         write_unlock_irqrestore(g_lock, flags);
1334
1335         /* Allocate a peer ready to add to the peer table and retry */
1336         rc = kiblnd_create_peer(ni, &peer, nid);
1337         if (rc != 0) {
1338                 CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
1339                 if (tx != NULL) {
1340                         tx->tx_status = -EHOSTUNREACH;
1341                         tx->tx_waiting = 0;
1342                         kiblnd_tx_done(ni, tx);
1343                 }
1344                 return;
1345         }
1346
1347         write_lock_irqsave(g_lock, flags);
1348
1349         peer2 = kiblnd_find_peer_locked(nid);
1350         if (peer2 != NULL) {
1351                 if (list_empty(&peer2->ibp_conns)) {
1352                         /* found a peer, but it's still connecting... */
1353                         LASSERT (peer2->ibp_connecting != 0 ||
1354                                  peer2->ibp_accepting != 0);
1355                         if (tx != NULL)
1356                                 list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
1357                         write_unlock_irqrestore(g_lock, flags);
1358                 } else {
1359                         conn = kiblnd_get_conn_locked(peer2);
1360                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1361
1362                         write_unlock_irqrestore(g_lock, flags);
1363
1364                         if (tx != NULL)
1365                                 kiblnd_queue_tx(tx, conn);
1366                         kiblnd_conn_decref(conn); /* ...to here */
1367                 }
1368
1369                 kiblnd_peer_decref(peer);
1370                 return;
1371         }
1372
1373         /* Brand new peer */
1374         LASSERT (peer->ibp_connecting == 0);
1375         peer->ibp_connecting = 1;
1376
1377         /* always called with a ref on ni, which prevents ni being shutdown */
1378         LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
1379
1380         if (tx != NULL)
1381                 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1382
1383         kiblnd_peer_addref(peer);
1384         list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
1385
1386         write_unlock_irqrestore(g_lock, flags);
1387
1388         kiblnd_connect_peer(peer);
1389         kiblnd_peer_decref(peer);
1390 }
1391
1392 int
1393 kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
1394 {
1395         lnet_hdr_t       *hdr = &lntmsg->msg_hdr;
1396         int               type = lntmsg->msg_type;
1397         lnet_process_id_t target = lntmsg->msg_target;
1398         int               target_is_router = lntmsg->msg_target_is_router;
1399         int               routing = lntmsg->msg_routing;
1400         unsigned int      payload_niov = lntmsg->msg_niov;
1401         struct iovec     *payload_iov = lntmsg->msg_iov;
1402         lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
1403         unsigned int      payload_offset = lntmsg->msg_offset;
1404         unsigned int      payload_nob = lntmsg->msg_len;
1405         kib_msg_t        *ibmsg;
1406         kib_tx_t         *tx;
1407         int               nob;
1408         int               rc;
1409
1410         /* NB 'private' is different depending on what we're sending.... */
1411
1412         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1413                payload_nob, payload_niov, libcfs_id2str(target));
1414
1415         LASSERT (payload_nob == 0 || payload_niov > 0);
1416         LASSERT (payload_niov <= LNET_MAX_IOV);
1417
1418         /* Thread context */
1419         LASSERT (!in_interrupt());
1420         /* payload is either all vaddrs or all pages */
1421         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1422
1423         switch (type) {
1424         default:
1425                 LBUG();
1426                 return (-EIO);
1427
1428         case LNET_MSG_ACK:
1429                 LASSERT (payload_nob == 0);
1430                 break;
1431
1432         case LNET_MSG_GET:
1433                 if (routing || target_is_router)
1434                         break;                  /* send IMMEDIATE */
1435
1436                 /* is the REPLY message too small for RDMA? */
1437                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1438                 if (nob <= IBLND_MSG_SIZE)
1439                         break;                  /* send IMMEDIATE */
1440
1441                 tx = kiblnd_get_idle_tx(ni);
1442                 if (tx == NULL) {
1443                         CERROR("Can allocate txd for GET to %s: \n",
1444                                libcfs_nid2str(target.nid));
1445                         return -ENOMEM;
1446                 }
1447
1448                 ibmsg = tx->tx_msg;
1449
1450                 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1451                         rc = kiblnd_setup_rd_iov(ni, tx,
1452                                                  &ibmsg->ibm_u.get.ibgm_rd,
1453                                                  lntmsg->msg_md->md_niov,
1454                                                  lntmsg->msg_md->md_iov.iov,
1455                                                  0, lntmsg->msg_md->md_length);
1456                 else
1457                         rc = kiblnd_setup_rd_kiov(ni, tx,
1458                                                   &ibmsg->ibm_u.get.ibgm_rd,
1459                                                   lntmsg->msg_md->md_niov,
1460                                                   lntmsg->msg_md->md_iov.kiov,
1461                                                   0, lntmsg->msg_md->md_length);
1462                 if (rc != 0) {
1463                         CERROR("Can't setup GET sink for %s: %d\n",
1464                                libcfs_nid2str(target.nid), rc);
1465                         kiblnd_tx_done(ni, tx);
1466                         return -EIO;
1467                 }
1468
1469                 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
1470                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1471                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1472
1473                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1474
1475                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1476                 if (tx->tx_lntmsg[1] == NULL) {
1477                         CERROR("Can't create reply for GET -> %s\n",
1478                                libcfs_nid2str(target.nid));
1479                         kiblnd_tx_done(ni, tx);
1480                         return -EIO;
1481                 }
1482
1483                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1484                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1485                 kiblnd_launch_tx(ni, tx, target.nid);
1486                 return 0;
1487
1488         case LNET_MSG_REPLY:
1489         case LNET_MSG_PUT:
1490                 /* Is the payload small enough not to need RDMA? */
1491                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
1492                 if (nob <= IBLND_MSG_SIZE)
1493                         break;                  /* send IMMEDIATE */
1494
1495                 tx = kiblnd_get_idle_tx(ni);
1496                 if (tx == NULL) {
1497                         CERROR("Can't allocate %s txd for %s\n",
1498                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1499                                libcfs_nid2str(target.nid));
1500                         return -ENOMEM;
1501                 }
1502
1503                 if (payload_kiov == NULL)
1504                         rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1505                                                  payload_niov, payload_iov,
1506                                                  payload_offset, payload_nob);
1507                 else
1508                         rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1509                                                   payload_niov, payload_kiov,
1510                                                   payload_offset, payload_nob);
1511                 if (rc != 0) {
1512                         CERROR("Can't setup PUT src for %s: %d\n",
1513                                libcfs_nid2str(target.nid), rc);
1514                         kiblnd_tx_done(ni, tx);
1515                         return -EIO;
1516                 }
1517
1518                 ibmsg = tx->tx_msg;
1519                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1520                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1521                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1522
1523                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1524                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1525                 kiblnd_launch_tx(ni, tx, target.nid);
1526                 return 0;
1527         }
1528
1529         /* send IMMEDIATE */
1530
1531         LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
1532                  <= IBLND_MSG_SIZE);
1533
1534         tx = kiblnd_get_idle_tx(ni);
1535         if (tx == NULL) {
1536                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1537                         type, libcfs_nid2str(target.nid));
1538                 return -ENOMEM;
1539         }
1540
1541         ibmsg = tx->tx_msg;
1542         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1543
1544         if (payload_kiov != NULL)
1545                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1546                                     offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1547                                     payload_niov, payload_kiov,
1548                                     payload_offset, payload_nob);
1549         else
1550                 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1551                                    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1552                                    payload_niov, payload_iov,
1553                                    payload_offset, payload_nob);
1554
1555         nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
1556         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1557
1558         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1559         kiblnd_launch_tx(ni, tx, target.nid);
1560         return 0;
1561 }
1562
1563 void
1564 kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
1565 {
1566         lnet_process_id_t target = lntmsg->msg_target;
1567         unsigned int      niov = lntmsg->msg_niov;
1568         struct iovec     *iov = lntmsg->msg_iov;
1569         lnet_kiov_t      *kiov = lntmsg->msg_kiov;
1570         unsigned int      offset = lntmsg->msg_offset;
1571         unsigned int      nob = lntmsg->msg_len;
1572         kib_tx_t         *tx;
1573         int               rc;
1574
1575         tx = kiblnd_get_idle_tx(ni);
1576         if (tx == NULL) {
1577                 CERROR("Can't get tx for REPLY to %s\n",
1578                        libcfs_nid2str(target.nid));
1579                 goto failed_0;
1580         }
1581
1582         if (nob == 0)
1583                 rc = 0;
1584         else if (kiov == NULL)
1585                 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1586                                          niov, iov, offset, nob);
1587         else
1588                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1589                                           niov, kiov, offset, nob);
1590
1591         if (rc != 0) {
1592                 CERROR("Can't setup GET src for %s: %d\n",
1593                        libcfs_nid2str(target.nid), rc);
1594                 goto failed_1;
1595         }
1596
1597         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1598                               IBLND_MSG_GET_DONE, nob,
1599                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1600                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1601         if (rc < 0) {
1602                 CERROR("Can't setup rdma for GET from %s: %d\n",
1603                        libcfs_nid2str(target.nid), rc);
1604                 goto failed_1;
1605         }
1606         
1607         if (nob == 0) {
1608                 /* No RDMA: local completion may happen now! */
1609                 lnet_finalize(ni, lntmsg, 0);
1610         } else {
1611                 /* RDMA: lnet_finalize(lntmsg) when it
1612                  * completes */
1613                 tx->tx_lntmsg[0] = lntmsg;
1614         }
1615
1616         kiblnd_queue_tx(tx, rx->rx_conn);
1617         return;
1618
1619  failed_1:
1620         kiblnd_tx_done(ni, tx);
1621  failed_0:
1622         lnet_finalize(ni, lntmsg, -EIO);
1623 }
1624
1625 int
1626 kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1627              unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1628              unsigned int offset, unsigned int mlen, unsigned int rlen)
1629 {
1630         kib_rx_t    *rx = private;
1631         kib_msg_t   *rxmsg = rx->rx_msg;
1632         kib_conn_t  *conn = rx->rx_conn;
1633         kib_tx_t    *tx;
1634         kib_msg_t   *txmsg;
1635         int          nob;
1636         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1637         int          rc = 0;
1638
1639         LASSERT (mlen <= rlen);
1640         LASSERT (!in_interrupt());
1641         /* Either all pages or all vaddrs */
1642         LASSERT (!(kiov != NULL && iov != NULL));
1643
1644         switch (rxmsg->ibm_type) {
1645         default:
1646                 LBUG();
1647
1648         case IBLND_MSG_IMMEDIATE:
1649                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
1650                 if (nob > rx->rx_nob) {
1651                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1652                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1653                                 nob, rx->rx_nob);
1654                         rc = -EPROTO;
1655                         break;
1656                 }
1657
1658                 if (kiov != NULL)
1659                         lnet_copy_flat2kiov(niov, kiov, offset,
1660                                             IBLND_MSG_SIZE, rxmsg,
1661                                             offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1662                                             mlen);
1663                 else
1664                         lnet_copy_flat2iov(niov, iov, offset,
1665                                            IBLND_MSG_SIZE, rxmsg,
1666                                            offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1667                                            mlen);
1668                 lnet_finalize (ni, lntmsg, 0);
1669                 break;
1670
1671         case IBLND_MSG_PUT_REQ:
1672                 if (mlen == 0) {
1673                         lnet_finalize(ni, lntmsg, 0);
1674                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
1675                                                rxmsg->ibm_u.putreq.ibprm_cookie);
1676                         break;
1677                 }
1678
1679                 tx = kiblnd_get_idle_tx(ni);
1680                 if (tx == NULL) {
1681                         CERROR("Can't allocate tx for %s\n",
1682                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1683                         /* Not replying will break the connection */
1684                         rc = -ENOMEM;
1685                         break;
1686                 }
1687
1688                 txmsg = tx->tx_msg;
1689                 if (kiov == NULL)
1690                         rc = kiblnd_setup_rd_iov(ni, tx,
1691                                                  &txmsg->ibm_u.putack.ibpam_rd,
1692                                                  niov, iov, offset, mlen);
1693                 else
1694                         rc = kiblnd_setup_rd_kiov(ni, tx,
1695                                                   &txmsg->ibm_u.putack.ibpam_rd,
1696                                                   niov, kiov, offset, mlen);
1697                 if (rc != 0) {
1698                         CERROR("Can't setup PUT sink for %s: %d\n",
1699                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1700                         kiblnd_tx_done(ni, tx);
1701                         /* tell peer it's over */
1702                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
1703                                                rxmsg->ibm_u.putreq.ibprm_cookie);
1704                         break;
1705                 }
1706
1707                 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
1708                 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1709                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1710
1711                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1712
1713                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1714                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1715                 kiblnd_queue_tx(tx, conn);
1716
1717                 /* reposted buffer reserved for PUT_DONE */
1718                 post_credit = IBLND_POSTRX_NO_CREDIT;
1719                 break;
1720
1721         case IBLND_MSG_GET_REQ:
1722                 if (lntmsg != NULL) {
1723                         /* Optimized GET; RDMA lntmsg's payload */
1724                         kiblnd_reply(ni, rx, lntmsg);
1725                 } else {
1726                         /* GET didn't match anything */
1727                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1728                                                -ENODATA,
1729                                                rxmsg->ibm_u.get.ibgm_cookie);
1730                 }
1731                 break;
1732         }
1733
1734         kiblnd_post_rx(rx, post_credit);
1735         return rc;
1736 }
1737
1738 int
1739 kiblnd_thread_start (int (*fn)(void *arg), void *arg)
1740 {
1741         long    pid = kernel_thread (fn, arg, 0);
1742
1743         if (pid < 0)
1744                 return ((int)pid);
1745
1746         atomic_inc (&kiblnd_data.kib_nthreads);
1747         return (0);
1748 }
1749
1750 void
1751 kiblnd_thread_fini (void)
1752 {
1753         atomic_dec (&kiblnd_data.kib_nthreads);
1754 }
1755
1756 void
1757 kiblnd_peer_alive (kib_peer_t *peer)
1758 {
1759         /* This is racy, but everyone's only writing cfs_time_current() */
1760         peer->ibp_last_alive = cfs_time_current();
1761         mb();
1762 }
1763
1764 void
1765 kiblnd_peer_notify (kib_peer_t *peer)
1766 {
1767         time_t        last_alive = 0;
1768         int           error = 0;
1769         unsigned long flags;
1770
1771         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1772
1773         if (list_empty(&peer->ibp_conns) &&
1774             peer->ibp_accepting == 0 &&
1775             peer->ibp_connecting == 0 &&
1776             peer->ibp_error != 0) {
1777                 error = peer->ibp_error;
1778                 peer->ibp_error = 0;
1779
1780                 last_alive = cfs_time_current_sec() -
1781                              cfs_duration_sec(cfs_time_current() -
1782                                               peer->ibp_last_alive);
1783         }
1784
1785         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1786
1787         if (error != 0)
1788                 lnet_notify(peer->ibp_ni,
1789                             peer->ibp_nid, 0, last_alive);
1790 }
1791
1792 void
1793 kiblnd_close_conn_locked (kib_conn_t *conn, int error)
1794 {
1795         /* This just does the immediate housekeeping.  'error' is zero for a
1796          * normal shutdown which can happen only after the connection has been
1797          * established.  If the connection is established, schedule the
1798          * connection to be finished off by the connd.  Otherwise the connd is
1799          * already dealing with it (either to set it up or tear it down).
1800          * Caller holds kib_global_lock exclusively in irq context */
1801         unsigned long     flags;
1802         kib_peer_t       *peer = conn->ibc_peer;
1803
1804         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1805
1806         if (error != 0 && conn->ibc_comms_error == 0)
1807                 conn->ibc_comms_error = error;
1808
1809         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
1810                 return; /* already being handled  */
1811
1812         if (error == 0 &&
1813             list_empty(&conn->ibc_tx_queue) &&
1814             list_empty(&conn->ibc_tx_queue_rsrvd) &&
1815             list_empty(&conn->ibc_tx_queue_nocred) &&
1816             list_empty(&conn->ibc_active_txs)) {
1817                 CDEBUG(D_NET, "closing conn to %s\n", 
1818                        libcfs_nid2str(peer->ibp_nid));
1819         } else {
1820                 CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
1821                        libcfs_nid2str(peer->ibp_nid), error,
1822                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1823                        list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
1824                        list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
1825                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1826         }
1827
1828         list_del(&conn->ibc_list);
1829         /* connd (see below) takes over ibc_list's ref */
1830
1831         if (list_empty (&peer->ibp_conns) &&    /* no more conns */
1832             kiblnd_peer_active(peer)) {         /* still in peer table */
1833                 kiblnd_unlink_peer_locked(peer);
1834
1835                 /* set/clear error on last conn */
1836                 peer->ibp_error = conn->ibc_comms_error;
1837         }
1838
1839         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
1840
1841         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
1842
1843         list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
1844         wake_up (&kiblnd_data.kib_connd_waitq);
1845
1846         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
1847 }
1848
1849 void
1850 kiblnd_close_conn (kib_conn_t *conn, int error)
1851 {
1852         unsigned long flags;
1853
1854         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1855
1856         kiblnd_close_conn_locked(conn, error);
1857
1858         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1859 }
1860
1861 void
1862 kiblnd_handle_early_rxs(kib_conn_t *conn)
1863 {
1864         unsigned long    flags;
1865         kib_rx_t        *rx;
1866
1867         LASSERT (!in_interrupt());
1868         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1869
1870         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1871         while (!list_empty(&conn->ibc_early_rxs)) {
1872                 rx = list_entry(conn->ibc_early_rxs.next,
1873                                 kib_rx_t, rx_list);
1874                 list_del(&rx->rx_list);
1875                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1876
1877                 kiblnd_handle_rx(rx);
1878
1879                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1880         }
1881         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1882 }
1883
1884 void
1885 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
1886 {
1887         LIST_HEAD           (zombies);
1888         struct list_head    *tmp;
1889         struct list_head    *nxt;
1890         kib_tx_t            *tx;
1891
1892         spin_lock(&conn->ibc_lock);
1893
1894         list_for_each_safe (tmp, nxt, txs) {
1895                 tx = list_entry (tmp, kib_tx_t, tx_list);
1896
1897                 if (txs == &conn->ibc_active_txs) {
1898                         LASSERT (!tx->tx_queued);
1899                         LASSERT (tx->tx_waiting ||
1900                                  tx->tx_sending != 0);
1901                 } else {
1902                         LASSERT (tx->tx_queued);
1903                 }
1904
1905                 tx->tx_status = -ECONNABORTED;
1906                 tx->tx_waiting = 0;
1907
1908                 if (tx->tx_sending == 0) {
1909                         tx->tx_queued = 0;
1910                         list_del (&tx->tx_list);
1911                         list_add (&tx->tx_list, &zombies);
1912                 }
1913         }
1914
1915         spin_unlock(&conn->ibc_lock);
1916
1917         kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
1918                            &zombies, -ECONNABORTED);
1919 }
1920
1921 void
1922 kiblnd_finalise_conn (kib_conn_t *conn)
1923 {
1924         LASSERT (!in_interrupt());
1925         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
1926
1927         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
1928
1929         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
1930          * for connections that didn't get as far as being connected, because
1931          * rdma_disconnect() does this for free. */
1932         kiblnd_abort_receives(conn);
1933
1934         /* Complete all tx descs not waiting for sends to complete.
1935          * NB we should be safe from RDMA now that the QP has changed state */
1936
1937         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
1938         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
1939         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
1940         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
1941
1942         kiblnd_handle_early_rxs(conn);
1943 }
1944
1945 void
1946 kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
1947 {
1948         LIST_HEAD        (zombies);
1949         unsigned long     flags;
1950
1951         LASSERT (error != 0);
1952         LASSERT (!in_interrupt());
1953
1954         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1955
1956         if (active) {
1957                 LASSERT (peer->ibp_connecting > 0);
1958                 peer->ibp_connecting--;
1959         } else {
1960                 LASSERT (peer->ibp_accepting > 0);
1961                 peer->ibp_accepting--;
1962         }
1963
1964         if (peer->ibp_connecting != 0 ||
1965             peer->ibp_accepting != 0) {
1966                 /* another connection attempt under way... */
1967                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1968                 return;
1969         }
1970
1971         if (list_empty(&peer->ibp_conns)) {
1972                 /* Take peer's blocked transmits to complete with error */
1973                 list_add(&zombies, &peer->ibp_tx_queue);
1974                 list_del_init(&peer->ibp_tx_queue);
1975
1976                 if (kiblnd_peer_active(peer))
1977                         kiblnd_unlink_peer_locked(peer);
1978
1979                 peer->ibp_error = error;
1980         } else {
1981                 /* Can't have blocked transmits if there are connections */
1982                 LASSERT (list_empty(&peer->ibp_tx_queue));
1983         }
1984
1985         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1986
1987         kiblnd_peer_notify(peer);
1988
1989         if (list_empty (&zombies))
1990                 return;
1991
1992         CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n",
1993                 libcfs_nid2str(peer->ibp_nid));
1994
1995         kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
1996 }
1997
1998 void
1999 kiblnd_connreq_done(kib_conn_t *conn, int status)
2000 {
2001         kib_peer_t        *peer = conn->ibc_peer;
2002         kib_tx_t          *tx;
2003         struct list_head   txs;
2004         unsigned long      flags;
2005         int                active;
2006
2007         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2008
2009         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2010                libcfs_nid2str(peer->ibp_nid), active,
2011                conn->ibc_version, status);
2012
2013         LASSERT (!in_interrupt());
2014         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2015                   peer->ibp_connecting > 0) ||
2016                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2017                   peer->ibp_accepting > 0));
2018
2019         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2020         conn->ibc_connvars = NULL;
2021
2022         if (status != 0) {
2023                 /* failed to establish connection */
2024                 kiblnd_peer_connect_failed(peer, active, status);
2025                 kiblnd_finalise_conn(conn);
2026                 return;
2027         }
2028
2029         /* connection established */
2030         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2031
2032         conn->ibc_last_send = jiffies;
2033         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2034         kiblnd_peer_alive(peer);
2035
2036         /* Add conn to peer's list and nuke any dangling conns from a different
2037          * peer instance... */
2038         kiblnd_conn_addref(conn);               /* +1 ref for ibc_list */
2039         list_add(&conn->ibc_list, &peer->ibp_conns);
2040         if (active)
2041                 peer->ibp_connecting--;
2042         else
2043                 peer->ibp_accepting--;
2044
2045         if (peer->ibp_version == 0) {
2046                 peer->ibp_version     = conn->ibc_version;
2047                 peer->ibp_incarnation = conn->ibc_incarnation;
2048         }
2049
2050         if (peer->ibp_version     != conn->ibc_version ||
2051             peer->ibp_incarnation != conn->ibc_incarnation) {
2052                 kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
2053                                                 conn->ibc_incarnation);
2054                 peer->ibp_version     = conn->ibc_version;
2055                 peer->ibp_incarnation = conn->ibc_incarnation;
2056         }
2057
2058         /* grab pending txs while I have the lock */
2059         list_add(&txs, &peer->ibp_tx_queue);
2060         list_del_init(&peer->ibp_tx_queue);
2061
2062         if (!kiblnd_peer_active(peer) ||        /* peer has been deleted */
2063             conn->ibc_comms_error != 0) {       /* error has happened already */
2064                 lnet_ni_t *ni = peer->ibp_ni;
2065
2066                 /* start to shut down connection */
2067                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2068                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2069
2070                 kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
2071
2072                 return;
2073         }
2074
2075         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2076
2077         /* Schedule blocked txs */
2078         spin_lock (&conn->ibc_lock);
2079         while (!list_empty (&txs)) {
2080                 tx = list_entry (txs.next, kib_tx_t, tx_list);
2081                 list_del(&tx->tx_list);
2082
2083                 kiblnd_queue_tx_locked(tx, conn);
2084         }
2085         spin_unlock (&conn->ibc_lock);
2086
2087         kiblnd_check_sends(conn);
2088
2089         /* schedule blocked rxs */
2090         kiblnd_handle_early_rxs(conn);
2091 }
2092
2093 void
2094 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
2095 {
2096         int          rc;
2097
2098         rc = rdma_reject(cmid, rej, sizeof(*rej));
2099
2100         if (rc != 0)
2101                 CWARN("Error %d sending reject\n", rc);
2102 }
2103
2104 int
2105 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
2106 {
2107         rwlock_t              *g_lock = &kiblnd_data.kib_global_lock;
2108         kib_msg_t             *reqmsg = priv;
2109         kib_msg_t             *ackmsg;
2110         kib_dev_t             *ibdev;
2111         kib_peer_t            *peer;
2112         kib_peer_t            *peer2;
2113         kib_conn_t            *conn;
2114         lnet_ni_t             *ni  = NULL;
2115         kib_net_t             *net = NULL;
2116         lnet_nid_t             nid;
2117         struct rdma_conn_param cp;
2118         kib_rej_t              rej;
2119         int                    version = IBLND_MSG_VERSION;
2120         unsigned long          flags;
2121         int                    rc;
2122
2123         LASSERT (!in_interrupt());
2124
2125         /* cmid inherits 'context' from the corresponding listener id */
2126         ibdev = (kib_dev_t *)cmid->context;
2127         LASSERT (ibdev != NULL);
2128
2129         memset(&rej, 0, sizeof(rej));
2130         rej.ibr_magic                = IBLND_MSG_MAGIC;
2131         rej.ibr_why                  = IBLND_REJECT_FATAL;
2132         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2133
2134         if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
2135                 CERROR("Short connection request\n");
2136                 goto failed;
2137         }
2138
2139         /* Future protocol version compatibility support!  If the
2140          * o2iblnd-specific protocol changes, or when LNET unifies
2141          * protocols over all LNDs, the initial connection will
2142          * negotiate a protocol version.  I trap this here to avoid
2143          * console errors; the reject tells the peer which protocol I
2144          * speak. */
2145         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2146             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2147                 goto failed;
2148         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2149             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2150             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2151                 goto failed;
2152         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2153             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2154             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2155                 goto failed;
2156
2157         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2158         if (rc != 0) {
2159                 CERROR("Can't parse connection request: %d\n", rc);
2160                 goto failed;
2161         }
2162
2163         nid = reqmsg->ibm_srcnid;
2164         ni  = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
2165
2166         if (ni != NULL) {
2167                 net = (kib_net_t *)ni->ni_data;
2168                 rej.ibr_incarnation = net->ibn_incarnation;
2169         }
2170
2171         if (ni == NULL ||                         /* no matching net */
2172             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2173             net->ibn_dev != ibdev) {              /* wrong device */
2174                 CERROR("Can't accept %s: bad dst nid %s\n",
2175                        libcfs_nid2str(nid),
2176                        libcfs_nid2str(reqmsg->ibm_dstnid));
2177
2178                 goto failed;
2179         }
2180
2181        /* check time stamp as soon as possible */
2182         if (reqmsg->ibm_dststamp != 0 &&
2183             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2184                 CWARN("Stale connection request\n");
2185                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2186                 goto failed;
2187         }
2188
2189         /* I can accept peer's version */
2190         version = reqmsg->ibm_version;
2191
2192         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2193                 CERROR("Unexpected connreq msg type: %x from %s\n",
2194                        reqmsg->ibm_type, libcfs_nid2str(nid));
2195                 goto failed;
2196         }
2197
2198         if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
2199             IBLND_MSG_QUEUE_SIZE(version)) {
2200                 CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
2201                        libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
2202                        IBLND_MSG_QUEUE_SIZE(version));
2203
2204                 if (version == IBLND_MSG_VERSION)
2205                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2206
2207                 goto failed;
2208         }
2209
2210         if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
2211             IBLND_RDMA_FRAGS(version)) {
2212                 CERROR("Can't accept %s(version %x): "
2213                        "incompatible max_frags %d (%d wanted)\n",
2214                        libcfs_nid2str(nid), version,
2215                        reqmsg->ibm_u.connparams.ibcp_max_frags,
2216                        IBLND_RDMA_FRAGS(version));
2217
2218                 if (version == IBLND_MSG_VERSION)
2219                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2220
2221                 goto failed;
2222
2223         }
2224
2225         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2226                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2227                        libcfs_nid2str(nid),
2228                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2229                        IBLND_MSG_SIZE);
2230                 goto failed;
2231         }
2232
2233         /* assume 'nid' is a new peer; create  */
2234         rc = kiblnd_create_peer(ni, &peer, nid);
2235         if (rc != 0) {
2236                 CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
2237                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2238                 goto failed;
2239         }
2240
2241         write_lock_irqsave(g_lock, flags);
2242
2243         peer2 = kiblnd_find_peer_locked(nid);
2244         if (peer2 != NULL) {
2245                 if (peer2->ibp_version == 0) {
2246                         peer2->ibp_version     = version;
2247                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2248                 }
2249
2250                 /* not the guy I've talked with */
2251                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2252                     peer2->ibp_version     != version) {
2253                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2254                         write_unlock_irqrestore(g_lock, flags);
2255
2256                         CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
2257                               libcfs_nid2str(nid), peer2->ibp_version, version);
2258
2259                         kiblnd_peer_decref(peer);
2260                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2261                         goto failed;
2262                 }
2263
2264                 /* tie-break connection race in favour of the higher NID */
2265                 if (peer2->ibp_connecting != 0 &&
2266                     nid < ni->ni_nid) {
2267                         write_unlock_irqrestore(g_lock, flags);
2268
2269                         CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
2270
2271                         kiblnd_peer_decref(peer);
2272                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2273                         goto failed;
2274                 }
2275
2276                 peer2->ibp_accepting++;
2277                 kiblnd_peer_addref(peer2);
2278
2279                 write_unlock_irqrestore(g_lock, flags);
2280                 kiblnd_peer_decref(peer);
2281                 peer = peer2;
2282         } else {
2283                 /* Brand new peer */
2284                 LASSERT (peer->ibp_accepting == 0);
2285                 LASSERT (peer->ibp_version == 0 &&
2286                          peer->ibp_incarnation == 0);
2287
2288                 peer->ibp_accepting   = 1;
2289                 peer->ibp_version     = version;
2290                 peer->ibp_incarnation = reqmsg->ibm_srcstamp;
2291
2292                 /* I have a ref on ni that prevents it being shutdown */
2293                 LASSERT (net->ibn_shutdown == 0);
2294
2295                 kiblnd_peer_addref(peer);
2296                 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
2297
2298                 write_unlock_irqrestore(g_lock, flags);
2299         }
2300
2301         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2302         if (conn == NULL) {
2303                 kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
2304                 kiblnd_peer_decref(peer);
2305                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2306                 goto failed;
2307         }
2308
2309         /* conn now "owns" cmid, so I return success from here on to ensure the
2310          * CM callback doesn't destroy cmid. */
2311
2312         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2313         conn->ibc_credits          = IBLND_MSG_QUEUE_SIZE(version);
2314         conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
2315         LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
2316                  <= IBLND_RX_MSGS(version));
2317
2318         ackmsg = &conn->ibc_connvars->cv_msg;
2319         memset(ackmsg, 0, sizeof(*ackmsg));
2320
2321         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2322                         sizeof(ackmsg->ibm_u.connparams));
2323         ackmsg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
2324         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2325         ackmsg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
2326
2327         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2328
2329         memset(&cp, 0, sizeof(cp));
2330         cp.private_data        = ackmsg;
2331         cp.private_data_len    = ackmsg->ibm_nob;
2332         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2333         cp.initiator_depth     = 0;
2334         cp.flow_control        = 1;
2335         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2336         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2337
2338         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2339
2340         rc = rdma_accept(cmid, &cp);
2341         if (rc != 0) {
2342                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2343                 rej.ibr_version = version;
2344                 rej.ibr_why     = IBLND_REJECT_FATAL;
2345
2346                 kiblnd_reject(cmid, &rej);
2347                 kiblnd_connreq_done(conn, rc);
2348                 kiblnd_conn_decref(conn);
2349         }
2350
2351         lnet_ni_decref(ni);
2352         return 0;
2353
2354  failed:
2355         if (ni != NULL)
2356                 lnet_ni_decref(ni);
2357
2358         rej.ibr_version = version;
2359         rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2360         rej.ibr_cp.ibcp_max_frags   = IBLND_RDMA_FRAGS(version);
2361         kiblnd_reject(cmid, &rej);
2362
2363         return -ECONNREFUSED;
2364 }
2365
2366 void
2367 kiblnd_reconnect (kib_conn_t *conn, int version,
2368                   __u64 incarnation, int why, kib_connparams_t *cp)
2369 {
2370         kib_peer_t    *peer = conn->ibc_peer;
2371         char          *reason;
2372         int            retry = 0;
2373         unsigned long  flags;
2374
2375         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2376         LASSERT (peer->ibp_connecting > 0);     /* 'conn' at least */
2377
2378         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2379
2380         /* retry connection if it's still needed and no other connection
2381          * attempts (active or passive) are in progress */
2382         if (!list_empty(&peer->ibp_tx_queue) &&
2383             peer->ibp_connecting == 1 &&
2384             peer->ibp_accepting == 0) {
2385                 retry = 1;
2386                 peer->ibp_connecting++;
2387
2388                 peer->ibp_version     = version;
2389                 peer->ibp_incarnation = incarnation;
2390         }
2391
2392         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2393
2394         if (!retry)
2395                 return;
2396
2397         switch (why) {
2398         default:
2399                 reason = "Unknown";
2400                 break;
2401
2402         case IBLND_REJECT_CONN_STALE:
2403                 reason = "stale";
2404                 break;
2405
2406         case IBLND_REJECT_CONN_RACE:
2407                 reason = "conn race";
2408                 break;
2409
2410         case IBLND_REJECT_CONN_UNCOMPAT:
2411                 reason = "version negotiation";
2412                 break;
2413         }
2414
2415         CDEBUG(D_NETERROR, "%s: retrying (%s), %x, %x, "
2416                            "queue_dep: %d, max_frag: %d, msg_size: %d\n",
2417                libcfs_nid2str(peer->ibp_nid),
2418                reason, IBLND_MSG_VERSION, version,
2419                cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
2420                cp != NULL ? cp->ibcp_max_frags   : IBLND_RDMA_FRAGS(version),
2421                cp != NULL ? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
2422
2423         kiblnd_connect_peer(peer);
2424 }
2425
2426 void
2427 kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
2428 {
2429         kib_peer_t    *peer = conn->ibc_peer;
2430
2431         LASSERT (!in_interrupt());
2432         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2433
2434         switch (reason) {
2435         case IB_CM_REJ_STALE_CONN:
2436                 kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
2437                                  IBLND_REJECT_CONN_STALE, NULL);
2438                 break;
2439
2440         case IB_CM_REJ_CONSUMER_DEFINED:
2441                 if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
2442                         kib_rej_t        *rej         = priv;
2443                         kib_connparams_t *cp          = NULL;
2444                         int               flip        = 0;
2445                         __u64             incarnation = -1;
2446
2447                         /* NB. default incarnation is -1 because:
2448                          * a) V1 will ignore dst incarnation in connreq.
2449                          * b) V2 will provide incarnation while rejecting me,
2450                          *    -1 will be overwrote.
2451                          *
2452                          * if I try to connect to a V1 peer with V2 protocol,
2453                          * it rejected me then upgrade to V2, I have no idea
2454                          * about the upgrading and try to reconnect with V1,
2455                          * in this case upgraded V2 can find out I'm trying to
2456                          * talk to the old guy and reject me(incarnation is -1). 
2457                          */
2458
2459                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2460                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2461                                 __swab32s(&rej->ibr_magic);
2462                                 __swab16s(&rej->ibr_version);
2463                                 flip = 1;
2464                         }
2465
2466                         if (priv_nob >= sizeof(kib_rej_t) &&
2467                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2468                                 /* priv_nob is always 148 in current version
2469                                  * of OFED, so we still need to check version.
2470                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2471                                 cp = &rej->ibr_cp;
2472
2473                                 if (flip) {
2474                                         __swab64s(&rej->ibr_incarnation);
2475                                         __swab16s(&cp->ibcp_queue_depth);
2476                                         __swab16s(&cp->ibcp_max_frags);
2477                                         __swab32s(&cp->ibcp_max_msg_size);
2478                                 }
2479
2480                                 incarnation = rej->ibr_incarnation;
2481                         }
2482
2483                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2484                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2485                                 CERROR("%s rejected: consumer defined fatal error\n",
2486                                        libcfs_nid2str(peer->ibp_nid));
2487                                 break;
2488                         }
2489
2490                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2491                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2492                                 CERROR("%s rejected: o2iblnd version %x error\n",
2493                                        libcfs_nid2str(peer->ibp_nid),
2494                                        rej->ibr_version);
2495                                 break;
2496                         }
2497
2498                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2499                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2500                                 CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
2501                                        libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
2502
2503                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2504                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2505                         }
2506
2507                         switch (rej->ibr_why) {
2508                         case IBLND_REJECT_CONN_RACE:
2509                         case IBLND_REJECT_CONN_STALE:
2510                         case IBLND_REJECT_CONN_UNCOMPAT:
2511                                 kiblnd_reconnect(conn, rej->ibr_version,
2512                                                  incarnation, rej->ibr_why, cp);
2513                                 break;
2514
2515                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2516                                 CERROR("%s rejected: incompatible message queue depth %d, %d\n",
2517                                        libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
2518                                        IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
2519                                 break;
2520
2521                         case IBLND_REJECT_RDMA_FRAGS:
2522                                 CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
2523                                        libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
2524                                        IBLND_RDMA_FRAGS(conn->ibc_version));
2525                                 break;
2526
2527                         case IBLND_REJECT_NO_RESOURCES:
2528                                 CERROR("%s rejected: o2iblnd no resources\n",
2529                                        libcfs_nid2str(peer->ibp_nid));
2530                                 break;
2531
2532                         case IBLND_REJECT_FATAL:
2533                                 CERROR("%s rejected: o2iblnd fatal error\n",
2534                                        libcfs_nid2str(peer->ibp_nid));
2535                                 break;
2536
2537                         default:
2538                                 CERROR("%s rejected: o2iblnd reason %d\n",
2539                                        libcfs_nid2str(peer->ibp_nid),
2540                                        rej->ibr_why);
2541                                 break;
2542                         }
2543                         break;
2544                 }
2545                 /* fall through */
2546         default:
2547                 CDEBUG(D_NETERROR, "%s rejected: reason %d, size %d\n",
2548                        libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
2549                 break;
2550         }
2551
2552         kiblnd_connreq_done(conn, -ECONNREFUSED);
2553 }
2554
2555 void
2556 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
2557 {
2558         kib_peer_t    *peer = conn->ibc_peer;
2559         lnet_ni_t     *ni   = peer->ibp_ni;
2560         kib_net_t     *net  = ni->ni_data;
2561         kib_msg_t     *msg  = priv;
2562         int            ver  = conn->ibc_version;
2563         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2564         unsigned long  flags;
2565
2566         LASSERT (net != NULL);
2567
2568         if (rc != 0) {
2569                 CERROR("Can't unpack connack from %s: %d\n",
2570                        libcfs_nid2str(peer->ibp_nid), rc);
2571                 goto failed;
2572         }
2573
2574         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2575                 CERROR("Unexpected message %d from %s\n",
2576                        msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
2577                 rc = -EPROTO;
2578                 goto failed;
2579         }
2580
2581         if (ver != msg->ibm_version) {
2582                 CERROR("%s replied version %x is different with "
2583                        "requested version %x\n",
2584                        libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
2585                 rc = -EPROTO;
2586                 goto failed;
2587         }
2588
2589         if (msg->ibm_u.connparams.ibcp_queue_depth !=
2590             IBLND_MSG_QUEUE_SIZE(ver)) {
2591                 CERROR("%s has incompatible queue depth %d(%d wanted)\n",
2592                        libcfs_nid2str(peer->ibp_nid),
2593                        msg->ibm_u.connparams.ibcp_queue_depth,
2594                        IBLND_MSG_QUEUE_SIZE(ver));
2595                 rc = -EPROTO;
2596                 goto failed;
2597         }
2598
2599         if (msg->ibm_u.connparams.ibcp_max_frags !=
2600             IBLND_RDMA_FRAGS(ver)) {
2601                 CERROR("%s has incompatible max_frags %d (%d wanted)\n",
2602                        libcfs_nid2str(peer->ibp_nid),
2603                        msg->ibm_u.connparams.ibcp_max_frags,
2604                        IBLND_RDMA_FRAGS(ver));
2605                 rc = -EPROTO;
2606                 goto failed;
2607         }
2608
2609         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2610                 CERROR("%s max message size %d too big (%d max)\n",
2611                        libcfs_nid2str(peer->ibp_nid),
2612                        msg->ibm_u.connparams.ibcp_max_msg_size,
2613                        IBLND_MSG_SIZE);
2614                 rc = -EPROTO;
2615                 goto failed;
2616         }
2617
2618         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2619         if (msg->ibm_dstnid == ni->ni_nid &&
2620             msg->ibm_dststamp == net->ibn_incarnation)
2621                 rc = 0;
2622         else
2623                 rc = -ESTALE;
2624         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2625
2626         if (rc != 0) {
2627                 CERROR("Bad connection reply from %s, rc = %d, "
2628                        "version: %x max_frags: %d\n",
2629                        libcfs_nid2str(peer->ibp_nid), rc,
2630                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
2631                 goto failed;
2632         }
2633
2634         conn->ibc_incarnation      = msg->ibm_srcstamp;
2635         conn->ibc_credits          =
2636         conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
2637         LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
2638                  <= IBLND_RX_MSGS(ver));
2639
2640         kiblnd_connreq_done(conn, 0);
2641         return;
2642
2643  failed:
2644         /* NB My QP has already established itself, so I handle anything going
2645          * wrong here by setting ibc_comms_error.
2646          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2647          * immediately tears it down. */
2648
2649         LASSERT (rc != 0);
2650         conn->ibc_comms_error = rc;
2651         kiblnd_connreq_done(conn, 0);
2652 }
2653
2654 int
2655 kiblnd_active_connect (struct rdma_cm_id *cmid)
2656 {
2657         kib_peer_t              *peer = (kib_peer_t *)cmid->context;
2658         kib_conn_t              *conn;
2659         kib_msg_t               *msg;
2660         struct rdma_conn_param   cp;
2661         int                      version;
2662         __u64                    incarnation;
2663         long                     flags;
2664         int                      rc;
2665
2666         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2667
2668         incarnation = peer->ibp_incarnation;
2669         version     = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
2670
2671         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2672
2673         conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
2674         if (conn == NULL) {
2675                 kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
2676                 kiblnd_peer_decref(peer); /* lose cmid's ref */
2677                 return -ENOMEM;
2678         }
2679
2680         /* conn "owns" cmid now, so I return success from here on to ensure the
2681          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2682          * on peer */
2683
2684         msg = &conn->ibc_connvars->cv_msg;
2685
2686         memset(msg, 0, sizeof(*msg));
2687         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
2688         msg->ibm_u.connparams.ibcp_queue_depth  = IBLND_MSG_QUEUE_SIZE(version);
2689         msg->ibm_u.connparams.ibcp_max_frags    = IBLND_RDMA_FRAGS(version);
2690         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2691
2692         kiblnd_pack_msg(peer->ibp_ni, msg, version,
2693                         0, peer->ibp_nid, incarnation);
2694
2695         memset(&cp, 0, sizeof(cp));
2696         cp.private_data        = msg;
2697         cp.private_data_len    = msg->ibm_nob;
2698         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2699         cp.initiator_depth     = 0;
2700         cp.flow_control        = 1;
2701         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2702         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2703
2704         LASSERT(cmid->context == (void *)conn);
2705         LASSERT(conn->ibc_cmid == cmid);
2706
2707         rc = rdma_connect(cmid, &cp);
2708         if (rc != 0) {
2709                 CERROR("Can't connect to %s: %d\n",
2710                        libcfs_nid2str(peer->ibp_nid), rc);
2711                 kiblnd_connreq_done(conn, rc);
2712                 kiblnd_conn_decref(conn);
2713         }
2714
2715         return 0;
2716 }
2717
2718 int
2719 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2720 {
2721         kib_peer_t  *peer;
2722         kib_conn_t  *conn;
2723         int          rc;
2724
2725         switch (event->event) {
2726         default:
2727                 LBUG();
2728
2729         case RDMA_CM_EVENT_CONNECT_REQUEST:
2730                 /* destroy cmid on failure */
2731                 rc = kiblnd_passive_connect(cmid, 
2732                                             (void *)KIBLND_CONN_PARAM(event),
2733                                             KIBLND_CONN_PARAM_LEN(event));
2734                 CDEBUG(D_NET, "connreq: %d\n", rc);
2735                 return rc;
2736                 
2737         case RDMA_CM_EVENT_ADDR_ERROR:
2738                 peer = (kib_peer_t *)cmid->context;
2739                 CDEBUG(D_NETERROR, "%s: ADDR ERROR %d\n",
2740                        libcfs_nid2str(peer->ibp_nid), event->status);
2741                 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2742                 kiblnd_peer_decref(peer);
2743                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
2744
2745         case RDMA_CM_EVENT_ADDR_RESOLVED:
2746                 peer = (kib_peer_t *)cmid->context;
2747
2748                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
2749                        libcfs_nid2str(peer->ibp_nid), event->status);
2750
2751                 if (event->status != 0) {
2752                         CDEBUG(D_NETERROR, "Can't resolve address for %s: %d\n",
2753                                libcfs_nid2str(peer->ibp_nid), event->status);
2754                         rc = event->status;
2755                 } else {
2756                         rc = rdma_resolve_route(
2757                                 cmid, *kiblnd_tunables.kib_timeout * 1000);
2758                         if (rc == 0)
2759                                 return 0;
2760                         /* Can't initiate route resolution */
2761                         CERROR("Can't resolve route for %s: %d\n",
2762                                libcfs_nid2str(peer->ibp_nid), rc);
2763                 }
2764                 kiblnd_peer_connect_failed(peer, 1, rc);
2765                 kiblnd_peer_decref(peer);
2766                 return rc;                      /* rc != 0 destroys cmid */
2767
2768         case RDMA_CM_EVENT_ROUTE_ERROR:
2769                 peer = (kib_peer_t *)cmid->context;
2770                 CDEBUG(D_NETERROR, "%s: ROUTE ERROR %d\n",
2771                        libcfs_nid2str(peer->ibp_nid), event->status);
2772                 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2773                 kiblnd_peer_decref(peer);
2774                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
2775
2776         case RDMA_CM_EVENT_ROUTE_RESOLVED:
2777                 peer = (kib_peer_t *)cmid->context;
2778                 CDEBUG(D_NET,"%s Route resolved: %d\n",
2779                        libcfs_nid2str(peer->ibp_nid), event->status);
2780
2781                 if (event->status == 0)
2782                         return kiblnd_active_connect(cmid);
2783
2784                 CDEBUG(D_NETERROR, "Can't resolve route for %s: %d\n",
2785                        libcfs_nid2str(peer->ibp_nid), event->status);
2786                 kiblnd_peer_connect_failed(peer, 1, event->status);
2787                 kiblnd_peer_decref(peer);
2788                 return event->status;           /* rc != 0 destroys cmid */
2789                 
2790         case RDMA_CM_EVENT_UNREACHABLE:
2791                 conn = (kib_conn_t *)cmid->context;
2792                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2793                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2794                 CDEBUG(D_NETERROR, "%s: UNREACHABLE %d\n",
2795                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2796                 kiblnd_connreq_done(conn, -ENETDOWN);
2797                 kiblnd_conn_decref(conn);
2798                 return 0;
2799
2800         case RDMA_CM_EVENT_CONNECT_ERROR:
2801                 conn = (kib_conn_t *)cmid->context;
2802                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2803                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2804                 CDEBUG(D_NETERROR, "%s: CONNECT ERROR %d\n",
2805                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2806                 kiblnd_connreq_done(conn, -ENOTCONN);
2807                 kiblnd_conn_decref(conn);
2808                 return 0;
2809
2810         case RDMA_CM_EVENT_REJECTED:
2811                 conn = (kib_conn_t *)cmid->context;
2812                 switch (conn->ibc_state) {
2813                 default:
2814                         LBUG();
2815
2816                 case IBLND_CONN_PASSIVE_WAIT:
2817                         CERROR ("%s: REJECTED %d\n",
2818                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
2819                                 event->status);
2820                         kiblnd_connreq_done(conn, -ECONNRESET);
2821                         break;
2822
2823                 case IBLND_CONN_ACTIVE_CONNECT:
2824                         kiblnd_rejected(conn, event->status,
2825                                         (void *)KIBLND_CONN_PARAM(event),
2826                                         KIBLND_CONN_PARAM_LEN(event));
2827                         break;
2828                 }
2829                 kiblnd_conn_decref(conn);
2830                 return 0;
2831
2832         case RDMA_CM_EVENT_ESTABLISHED:
2833                 conn = (kib_conn_t *)cmid->context;
2834                 switch (conn->ibc_state) {
2835                 default:
2836                         LBUG();
2837
2838                 case IBLND_CONN_PASSIVE_WAIT:
2839                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
2840                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
2841                         kiblnd_connreq_done(conn, 0);
2842                         break;
2843
2844                 case IBLND_CONN_ACTIVE_CONNECT:
2845                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
2846                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
2847                         kiblnd_check_connreply(conn,
2848                                                (void *)KIBLND_CONN_PARAM(event),
2849                                                KIBLND_CONN_PARAM_LEN(event));
2850                         break;
2851                 }
2852                 /* net keeps its ref on conn! */
2853                 return 0;
2854
2855         case RDMA_CM_EVENT_DISCONNECTED:
2856                 conn = (kib_conn_t *)cmid->context;
2857                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
2858                         CERROR("%s DISCONNECTED\n",
2859                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
2860                         kiblnd_connreq_done(conn, -ECONNRESET);
2861                 } else {
2862                         kiblnd_close_conn(conn, 0);
2863                 }
2864                 kiblnd_conn_decref(conn);
2865                 return 0;
2866
2867         case RDMA_CM_EVENT_DEVICE_REMOVAL:
2868                 LCONSOLE_ERROR_MSG(0x131,
2869                                    "Received notification of device removal\n"
2870                                    "Please shutdown LNET to allow this to proceed\n");
2871                 /* Can't remove network from underneath LNET for now, so I have
2872                  * to ignore this */
2873                 return 0;
2874         }
2875 }
2876
2877 int
2878 kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
2879 {
2880         kib_tx_t          *tx;
2881         struct list_head  *ttmp;
2882         int                timed_out = 0;
2883
2884         spin_lock(&conn->ibc_lock);
2885
2886         list_for_each (ttmp, txs) {
2887                 tx = list_entry (ttmp, kib_tx_t, tx_list);
2888
2889                 if (txs != &conn->ibc_active_txs) {
2890                         LASSERT (tx->tx_queued);
2891                 } else {
2892                         LASSERT (!tx->tx_queued);
2893                         LASSERT (tx->tx_waiting || tx->tx_sending != 0);
2894                 }
2895
2896                 if (time_after_eq (jiffies, tx->tx_deadline)) {
2897                         timed_out = 1;
2898                         CERROR("Timed out tx: %s, %lu seconds\n",
2899                                kiblnd_queue2str(conn, txs),
2900                                cfs_duration_sec(jiffies - tx->tx_deadline));
2901                         break;
2902                 }
2903         }
2904
2905         spin_unlock(&conn->ibc_lock);
2906         return timed_out;
2907 }
2908
2909 int
2910 kiblnd_conn_timed_out (kib_conn_t *conn)
2911 {
2912         return  kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
2913                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
2914                 kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
2915                 kiblnd_check_txs(conn, &conn->ibc_active_txs);
2916 }
2917
2918 void
2919 kiblnd_check_conns (int idx)
2920 {
2921         struct list_head  *peers = &kiblnd_data.kib_peers[idx];
2922         struct list_head  *ptmp;
2923         kib_peer_t        *peer;
2924         kib_conn_t        *conn;
2925         struct list_head  *ctmp;
2926         unsigned long      flags;
2927
2928  again:
2929         /* NB. We expect to have a look at all the peers and not find any
2930          * rdmas to time out, so we just use a shared lock while we
2931          * take a look... */
2932         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2933
2934         list_for_each (ptmp, peers) {
2935                 peer = list_entry (ptmp, kib_peer_t, ibp_list);
2936
2937                 list_for_each (ctmp, &peer->ibp_conns) {
2938                         conn = list_entry (ctmp, kib_conn_t, ibc_list);
2939
2940                         LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
2941
2942                         /* In case we have enough credits to return via a
2943                          * NOOP, but there were no non-blocking tx descs
2944                          * free to do it last time... */
2945                         kiblnd_check_sends(conn);
2946
2947                         if (!kiblnd_conn_timed_out(conn))
2948                                 continue;
2949
2950                         /* Handle timeout by closing the whole connection.  We
2951                          * can only be sure RDMA activity has ceased once the
2952                          * QP has been modified. */
2953
2954                         kiblnd_conn_addref(conn); /* 1 ref for me... */
2955
2956                         read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2957                                                flags);
2958
2959                         CERROR("Timed out RDMA with %s (%lu)\n",
2960                                libcfs_nid2str(peer->ibp_nid),
2961                                cfs_duration_sec(cfs_time_current() -
2962                                                 peer->ibp_last_alive));
2963
2964                         kiblnd_close_conn(conn, -ETIMEDOUT);
2965                         kiblnd_conn_decref(conn); /* ...until here */
2966
2967                         /* start again now I've dropped the lock */
2968                         goto again;
2969                 }
2970         }
2971
2972         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2973 }
2974
2975 void
2976 kiblnd_disconnect_conn (kib_conn_t *conn)
2977 {
2978         LASSERT (!in_interrupt());
2979         LASSERT (current == kiblnd_data.kib_connd);
2980         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
2981
2982         rdma_disconnect(conn->ibc_cmid);
2983         kiblnd_finalise_conn(conn);
2984
2985         kiblnd_peer_notify(conn->ibc_peer);
2986 }
2987
2988 int
2989 kiblnd_connd (void *arg)
2990 {
2991         wait_queue_t       wait;
2992         unsigned long      flags;
2993         kib_conn_t        *conn;
2994         int                timeout;
2995         int                i;
2996         int                dropped_lock;
2997         int                peer_index = 0;
2998         unsigned long      deadline = jiffies;
2999
3000         cfs_daemonize ("kiblnd_connd");
3001         cfs_block_allsigs ();
3002
3003         init_waitqueue_entry (&wait, current);
3004         kiblnd_data.kib_connd = current;
3005
3006         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3007
3008         while (!kiblnd_data.kib_shutdown) {
3009
3010                 dropped_lock = 0;
3011
3012                 if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
3013                         conn = list_entry (kiblnd_data.kib_connd_zombies.next,
3014                                            kib_conn_t, ibc_list);
3015                         list_del(&conn->ibc_list);
3016
3017                         spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3018                         dropped_lock = 1;
3019
3020                         kiblnd_destroy_conn(conn);
3021
3022                         spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3023                 }
3024
3025                 if (!list_empty (&kiblnd_data.kib_connd_conns)) {
3026                         conn = list_entry (kiblnd_data.kib_connd_conns.next,
3027                                            kib_conn_t, ibc_list);
3028                         list_del(&conn->ibc_list);
3029
3030                         spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3031                         dropped_lock = 1;
3032
3033                         kiblnd_disconnect_conn(conn);
3034                         kiblnd_conn_decref(conn);
3035
3036                         spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3037                 }
3038
3039                 /* careful with the jiffy wrap... */
3040                 timeout = (int)(deadline - jiffies);
3041                 if (timeout <= 0) {
3042                         const int n = 4;
3043                         const int p = 1;
3044                         int       chunk = kiblnd_data.kib_peer_hash_size;
3045
3046                         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
3047                         dropped_lock = 1;
3048
3049                         /* Time to check for RDMA timeouts on a few more
3050                          * peers: I do checks every 'p' seconds on a
3051                          * proportion of the peer table and I need to check
3052                          * every connection 'n' times within a timeout
3053                          * interval, to ensure I detect a timeout on any
3054                          * connection within (n+1)/n times the timeout
3055                          * interval. */
3056
3057                         if (*kiblnd_tunables.kib_timeout > n * p)
3058                                 chunk = (chunk * n * p) /
3059                                         *kiblnd_tunables.kib_timeout;
3060                         if (chunk == 0)
3061                                 chunk = 1;
3062
3063                         for (i = 0; i < chunk; i++) {
3064                                 kiblnd_check_conns(peer_index);
3065                                 peer_index = (peer_index + 1) %
3066                                              kiblnd_data.kib_peer_hash_size;
3067                         }
3068
3069                         deadline += p * HZ;
3070                         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3071                 }
3072
3073                 if (dropped_lock)
3074                         continue;
3075
3076                 /* Nothing to do for 'timeout'  */
3077                 set_current_state (TASK_INTERRUPTIBLE);
3078                 add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3079                 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3080
3081                 schedule_timeout (timeout);
3082
3083                 set_current_state (TASK_RUNNING);
3084                 remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3085                 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3086         }
3087
3088         spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3089
3090         kiblnd_thread_fini();
3091         return (0);
3092 }
3093
3094 void
3095 kiblnd_qp_event(struct ib_event *event, void *arg)
3096 {
3097         kib_conn_t *conn = arg;
3098
3099         switch (event->event) {
3100         case IB_EVENT_COMM_EST:
3101                 CDEBUG(D_NET, "%s established\n",
3102                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3103                 return;
3104                 
3105         default:
3106                 CERROR("%s: Async QP event type %d\n",
3107                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3108                 return;
3109         }
3110 }
3111
3112 void
3113 kiblnd_complete (struct ib_wc *wc)
3114 {
3115         switch (kiblnd_wreqid2type(wc->wr_id)) {
3116         default:
3117                 LBUG();
3118
3119         case IBLND_WID_RDMA:
3120                 /* We only get RDMA completion notification if it fails.  All
3121                  * subsequent work items, including the final SEND will fail
3122                  * too.  However we can't print out any more info about the
3123                  * failing RDMA because 'tx' might be back on the idle list or
3124                  * even reused already if we didn't manage to post all our work
3125                  * items */
3126                 CDEBUG(D_NETERROR, "RDMA (tx: %p) failed: %d\n",
3127                        kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3128                 return;
3129
3130         case IBLND_WID_TX:
3131                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3132                 return;
3133
3134         case IBLND_WID_RX:
3135                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3136                                    wc->byte_len);
3137                 return;
3138         }
3139 }
3140
3141 void
3142 kiblnd_cq_completion (struct ib_cq *cq, void *arg)
3143 {
3144         /* NB I'm not allowed to schedule this conn once its refcount has
3145          * reached 0.  Since fundamentally I'm racing with scheduler threads
3146          * consuming my CQ I could be called after all completions have
3147          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3148          * and this CQ is about to be destroyed so I NOOP. */
3149         kib_conn_t     *conn = (kib_conn_t *)arg;
3150         unsigned long   flags;
3151         
3152         LASSERT (cq == conn->ibc_cq);
3153
3154         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3155
3156         conn->ibc_ready = 1;
3157
3158         if (!conn->ibc_scheduled &&
3159             (conn->ibc_nrx > 0 ||
3160              conn->ibc_nsends_posted > 0)) {
3161                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3162                 conn->ibc_scheduled = 1;
3163                 list_add_tail(&conn->ibc_sched_list,
3164                               &kiblnd_data.kib_sched_conns);
3165                 wake_up(&kiblnd_data.kib_sched_waitq);
3166         }
3167
3168         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3169 }
3170
3171 void
3172 kiblnd_cq_event(struct ib_event *event, void *arg)
3173 {
3174         kib_conn_t *conn = arg;
3175
3176         CERROR("%s: async CQ event type %d\n",
3177                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3178 }
3179
3180 int
3181 kiblnd_scheduler(void *arg)
3182 {
3183         long            id = (long)arg;
3184         wait_queue_t    wait;
3185         char            name[16];
3186         unsigned long   flags;
3187         kib_conn_t     *conn;
3188         struct ib_wc    wc;
3189         int             rc;
3190         int             did_something;
3191         int             busy_loops = 0;
3192
3193         snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id);
3194         cfs_daemonize(name);
3195         cfs_block_allsigs();
3196
3197         init_waitqueue_entry(&wait, current);
3198
3199         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3200
3201         while (!kiblnd_data.kib_shutdown) {
3202                 if (busy_loops++ >= IBLND_RESCHED) {
3203                         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3204                                                flags);
3205
3206                         our_cond_resched();
3207                         busy_loops = 0;
3208
3209                         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3210                 }
3211
3212                 did_something = 0;
3213
3214                 if (!list_empty(&kiblnd_data.kib_sched_conns)) {
3215                         conn = list_entry(kiblnd_data.kib_sched_conns.next,
3216                                           kib_conn_t, ibc_sched_list);
3217                         /* take over kib_sched_conns' ref on conn... */
3218                         LASSERT(conn->ibc_scheduled);
3219                         list_del(&conn->ibc_sched_list);
3220                         conn->ibc_ready = 0;
3221
3222                         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3223                                                flags);
3224
3225                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3226                         if (rc == 0) {
3227                                 rc = ib_req_notify_cq(conn->ibc_cq,
3228                                                       IB_CQ_NEXT_COMP);
3229                                 if (rc < 0) {
3230                                         CWARN("%s: ib_req_notify_cq failed: %d, "
3231                                               "closing connection\n",
3232                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3233                                         kiblnd_close_conn(conn, -EIO);
3234                                         kiblnd_conn_decref(conn);
3235                                         spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3236                                         continue;
3237                                 }
3238
3239                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3240                         }
3241
3242                         if (rc < 0) {
3243                                 CWARN("%s: ib_poll_cq failed: %d, "
3244                                       "closing connection\n",
3245                                       libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3246                                 kiblnd_close_conn(conn, -EIO);
3247                                 kiblnd_conn_decref(conn);
3248                                 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3249                                 continue;
3250                         }
3251
3252                         spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3253                                           flags);
3254
3255                         if (rc != 0 || conn->ibc_ready) {
3256                                 /* There may be another completion waiting; get
3257                                  * another scheduler to check while I handle
3258                                  * this one... */
3259                                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3260                                 list_add_tail(&conn->ibc_sched_list,
3261                                               &kiblnd_data.kib_sched_conns);
3262                                 wake_up(&kiblnd_data.kib_sched_waitq);
3263                         } else {
3264                                 conn->ibc_scheduled = 0;
3265                         }
3266                         
3267                         if (rc != 0) {
3268                                 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3269                                                        flags);
3270
3271                                 kiblnd_complete(&wc);
3272
3273                                 spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3274                                                   flags);
3275                         }
3276
3277                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
3278                         did_something = 1;
3279                 }
3280
3281                 if (did_something)
3282                         continue;
3283
3284                 set_current_state(TASK_INTERRUPTIBLE);
3285                 add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
3286                 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3287
3288                 schedule();
3289                 busy_loops = 0;
3290
3291                 remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
3292                 set_current_state(TASK_RUNNING);
3293                 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3294         }
3295
3296         spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3297
3298         kiblnd_thread_fini();
3299         return (0);
3300 }