Whamcloud - gitweb
LU-9120 lnet: handle fatal device error
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd_cb.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include "o2iblnd.h"
38
39 #define MAX_CONN_RACES_BEFORE_ABORT 20
40
41 static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
42 static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
43                                        int error);
44 static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
45                                int type, int body_nob);
46 static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
47                             int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
48 static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
49 static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
50
51 static void kiblnd_unmap_tx(struct kib_tx *tx);
52 static void kiblnd_check_sends_locked(struct kib_conn *conn);
53
54 void
55 kiblnd_tx_done(struct kib_tx *tx)
56 {
57         struct lnet_msg *lntmsg[2];
58         int         rc;
59         int         i;
60
61         LASSERT (!in_interrupt());
62         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
63         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
64         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
65         LASSERT (tx->tx_pool != NULL);
66
67         kiblnd_unmap_tx(tx);
68
69         /* tx may have up to 2 lnet msgs to finalise */
70         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
71         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
72         rc = tx->tx_status;
73
74         if (tx->tx_conn != NULL) {
75                 kiblnd_conn_decref(tx->tx_conn);
76                 tx->tx_conn = NULL;
77         }
78
79         tx->tx_nwrq = tx->tx_nsge = 0;
80         tx->tx_status = 0;
81
82         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
83
84         /* delay finalize until my descs have been freed */
85         for (i = 0; i < 2; i++) {
86                 if (lntmsg[i] == NULL)
87                         continue;
88
89                 /* propagate health status to LNet for requests */
90                 if (i == 0 && lntmsg[i])
91                         lntmsg[i]->msg_health_status = tx->tx_hstatus;
92
93                 lnet_finalize(lntmsg[i], rc);
94         }
95 }
96
97 void
98 kiblnd_txlist_done(struct list_head *txlist, int status,
99                    enum lnet_msg_hstatus hstatus)
100 {
101         struct kib_tx *tx;
102
103         while (!list_empty(txlist)) {
104                 tx = list_entry(txlist->next, struct kib_tx, tx_list);
105
106                 list_del(&tx->tx_list);
107                 /* complete now */
108                 tx->tx_waiting = 0;
109                 tx->tx_status = status;
110                 tx->tx_hstatus = hstatus;
111                 kiblnd_tx_done(tx);
112         }
113 }
114
115 static struct kib_tx *
116 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
117 {
118         struct kib_net *net = ni->ni_data;
119         struct list_head *node;
120         struct kib_tx *tx;
121         struct kib_tx_poolset *tps;
122
123         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
124         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
125         if (node == NULL)
126                 return NULL;
127         tx = container_of(node, struct kib_tx, tx_list);
128
129         LASSERT (tx->tx_nwrq == 0);
130         LASSERT (!tx->tx_queued);
131         LASSERT (tx->tx_sending == 0);
132         LASSERT (!tx->tx_waiting);
133         LASSERT (tx->tx_status == 0);
134         LASSERT (tx->tx_conn == NULL);
135         LASSERT (tx->tx_lntmsg[0] == NULL);
136         LASSERT (tx->tx_lntmsg[1] == NULL);
137         LASSERT (tx->tx_nfrags == 0);
138
139         tx->tx_gaps = false;
140         tx->tx_hstatus = LNET_MSG_STATUS_OK;
141
142         return tx;
143 }
144
145 static void
146 kiblnd_drop_rx(struct kib_rx *rx)
147 {
148         struct kib_conn *conn = rx->rx_conn;
149         struct kib_sched_info *sched = conn->ibc_sched;
150         unsigned long flags;
151
152         spin_lock_irqsave(&sched->ibs_lock, flags);
153         LASSERT(conn->ibc_nrx > 0);
154         conn->ibc_nrx--;
155         spin_unlock_irqrestore(&sched->ibs_lock, flags);
156
157         kiblnd_conn_decref(conn);
158 }
159
160 int
161 kiblnd_post_rx(struct kib_rx *rx, int credit)
162 {
163         struct kib_conn *conn = rx->rx_conn;
164         struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
165         struct ib_recv_wr *bad_wrq = NULL;
166 #ifdef HAVE_IB_GET_DMA_MR
167         struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
168 #endif
169         int rc;
170
171         LASSERT (net != NULL);
172         LASSERT (!in_interrupt());
173         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
174                  credit == IBLND_POSTRX_PEER_CREDIT ||
175                  credit == IBLND_POSTRX_RSRVD_CREDIT);
176 #ifdef HAVE_IB_GET_DMA_MR
177         LASSERT(mr != NULL);
178
179         rx->rx_sge.lkey   = mr->lkey;
180 #else
181         rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
182 #endif
183         rx->rx_sge.addr   = rx->rx_msgaddr;
184         rx->rx_sge.length = IBLND_MSG_SIZE;
185
186         rx->rx_wrq.next = NULL;
187         rx->rx_wrq.sg_list = &rx->rx_sge;
188         rx->rx_wrq.num_sge = 1;
189         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
190
191         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
192         LASSERT (rx->rx_nob >= 0);              /* not posted */
193
194         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
195                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
196                 return 0;
197         }
198
199         rx->rx_nob = -1;                        /* flag posted */
200
201         /* NB: need an extra reference after ib_post_recv because we don't
202          * own this rx (and rx::rx_conn) anymore, LU-5678.
203          */
204         kiblnd_conn_addref(conn);
205         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
206         if (unlikely(rc != 0)) {
207                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
208                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
209                 rx->rx_nob = 0;
210         }
211
212         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
213                 goto out;
214
215         if (unlikely(rc != 0)) {
216                 kiblnd_close_conn(conn, rc);
217                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
218                 goto out;
219         }
220
221         if (credit == IBLND_POSTRX_NO_CREDIT)
222                 goto out;
223
224         spin_lock(&conn->ibc_lock);
225         if (credit == IBLND_POSTRX_PEER_CREDIT)
226                 conn->ibc_outstanding_credits++;
227         else
228                 conn->ibc_reserved_credits++;
229         kiblnd_check_sends_locked(conn);
230         spin_unlock(&conn->ibc_lock);
231
232 out:
233         kiblnd_conn_decref(conn);
234         return rc;
235 }
236
237 static struct kib_tx *
238 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
239 {
240         struct list_head *tmp;
241
242         list_for_each(tmp, &conn->ibc_active_txs) {
243                 struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
244
245                 LASSERT(!tx->tx_queued);
246                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
247
248                 if (tx->tx_cookie != cookie)
249                         continue;
250
251                 if (tx->tx_waiting &&
252                     tx->tx_msg->ibm_type == txtype)
253                         return tx;
254
255                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
256                       tx->tx_waiting ? "" : "NOT ",
257                       tx->tx_msg->ibm_type, txtype);
258         }
259         return NULL;
260 }
261
262 static void
263 kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
264 {
265         struct kib_tx *tx;
266         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
267         int idle;
268
269         spin_lock(&conn->ibc_lock);
270
271         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
272         if (tx == NULL) {
273                 spin_unlock(&conn->ibc_lock);
274
275                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
276                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
277                 kiblnd_close_conn(conn, -EPROTO);
278                 return;
279         }
280
281         if (tx->tx_status == 0) {               /* success so far */
282                 if (status < 0) {               /* failed? */
283                         tx->tx_status = status;
284                         tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
285                 } else if (txtype == IBLND_MSG_GET_REQ) {
286                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
287                 }
288         }
289
290         tx->tx_waiting = 0;
291
292         idle = !tx->tx_queued && (tx->tx_sending == 0);
293         if (idle)
294                 list_del(&tx->tx_list);
295
296         spin_unlock(&conn->ibc_lock);
297
298         if (idle)
299                 kiblnd_tx_done(tx);
300 }
301
302 static void
303 kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
304 {
305         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
306         struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
307
308         if (tx == NULL) {
309                 CERROR("Can't get tx for completion %x for %s\n",
310                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
311                 return;
312         }
313
314         tx->tx_msg->ibm_u.completion.ibcm_status = status;
315         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
316         kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
317
318         kiblnd_queue_tx(tx, conn);
319 }
320
321 static void
322 kiblnd_handle_rx(struct kib_rx *rx)
323 {
324         struct kib_msg *msg = rx->rx_msg;
325         struct kib_conn   *conn = rx->rx_conn;
326         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
327         int           credits = msg->ibm_credits;
328         struct kib_tx *tx;
329         int           rc = 0;
330         int           rc2;
331         int           post_credit;
332
333         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
334
335         CDEBUG (D_NET, "Received %x[%d] from %s\n",
336                 msg->ibm_type, credits,
337                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
338
339         if (credits != 0) {
340                 /* Have I received credits that will let me send? */
341                 spin_lock(&conn->ibc_lock);
342
343                 if (conn->ibc_credits + credits >
344                     conn->ibc_queue_depth) {
345                         rc2 = conn->ibc_credits;
346                         spin_unlock(&conn->ibc_lock);
347
348                         CERROR("Bad credits from %s: %d + %d > %d\n",
349                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
350                                rc2, credits,
351                                conn->ibc_queue_depth);
352
353                         kiblnd_close_conn(conn, -EPROTO);
354                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
355                         return;
356                 }
357
358                 conn->ibc_credits += credits;
359
360                 /* This ensures the credit taken by NOOP can be returned */
361                 if (msg->ibm_type == IBLND_MSG_NOOP &&
362                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
363                         conn->ibc_outstanding_credits++;
364
365                 kiblnd_check_sends_locked(conn);
366                 spin_unlock(&conn->ibc_lock);
367         }
368
369         switch (msg->ibm_type) {
370         default:
371                 CERROR("Bad IBLND message type %x from %s\n",
372                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
373                 post_credit = IBLND_POSTRX_NO_CREDIT;
374                 rc = -EPROTO;
375                 break;
376
377         case IBLND_MSG_NOOP:
378                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
379                         post_credit = IBLND_POSTRX_NO_CREDIT;
380                         break;
381                 }
382
383                 if (credits != 0) /* credit already posted */
384                         post_credit = IBLND_POSTRX_NO_CREDIT;
385                 else              /* a keepalive NOOP */
386                         post_credit = IBLND_POSTRX_PEER_CREDIT;
387                 break;
388
389         case IBLND_MSG_IMMEDIATE:
390                 post_credit = IBLND_POSTRX_DONT_POST;
391                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
392                                 msg->ibm_srcnid, rx, 0);
393                 if (rc < 0)                     /* repost on error */
394                         post_credit = IBLND_POSTRX_PEER_CREDIT;
395                 break;
396
397         case IBLND_MSG_PUT_REQ:
398                 post_credit = IBLND_POSTRX_DONT_POST;
399                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
400                                 msg->ibm_srcnid, rx, 1);
401                 if (rc < 0)                     /* repost on error */
402                         post_credit = IBLND_POSTRX_PEER_CREDIT;
403                 break;
404
405         case IBLND_MSG_PUT_NAK:
406                 CWARN ("PUT_NACK from %s\n",
407                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
408                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
409                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
410                                          msg->ibm_u.completion.ibcm_status,
411                                          msg->ibm_u.completion.ibcm_cookie);
412                 break;
413
414         case IBLND_MSG_PUT_ACK:
415                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
416
417                 spin_lock(&conn->ibc_lock);
418                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
419                                         msg->ibm_u.putack.ibpam_src_cookie);
420                 if (tx != NULL)
421                         list_del(&tx->tx_list);
422                 spin_unlock(&conn->ibc_lock);
423
424                 if (tx == NULL) {
425                         CERROR("Unmatched PUT_ACK from %s\n",
426                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
427                         rc = -EPROTO;
428                         break;
429                 }
430
431                 LASSERT (tx->tx_waiting);
432                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
433                  * (a) I can overwrite tx_msg since my peer_ni has received it!
434                  * (b) tx_waiting set tells tx_complete() it's not done. */
435
436                 tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
437
438                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
439                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
440                                        &msg->ibm_u.putack.ibpam_rd,
441                                        msg->ibm_u.putack.ibpam_dst_cookie);
442                 if (rc2 < 0)
443                         CERROR("Can't setup rdma for PUT to %s: %d\n",
444                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
445
446                 spin_lock(&conn->ibc_lock);
447                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
448                 kiblnd_queue_tx_locked(tx, conn);
449                 spin_unlock(&conn->ibc_lock);
450                 break;
451
452         case IBLND_MSG_PUT_DONE:
453                 post_credit = IBLND_POSTRX_PEER_CREDIT;
454                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
455                                          msg->ibm_u.completion.ibcm_status,
456                                          msg->ibm_u.completion.ibcm_cookie);
457                 break;
458
459         case IBLND_MSG_GET_REQ:
460                 post_credit = IBLND_POSTRX_DONT_POST;
461                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
462                                 msg->ibm_srcnid, rx, 1);
463                 if (rc < 0)                     /* repost on error */
464                         post_credit = IBLND_POSTRX_PEER_CREDIT;
465                 break;
466
467         case IBLND_MSG_GET_DONE:
468                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
469                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
470                                          msg->ibm_u.completion.ibcm_status,
471                                          msg->ibm_u.completion.ibcm_cookie);
472                 break;
473         }
474
475         if (rc < 0)                             /* protocol error */
476                 kiblnd_close_conn(conn, rc);
477
478         if (post_credit != IBLND_POSTRX_DONT_POST)
479                 kiblnd_post_rx(rx, post_credit);
480 }
481
482 static void
483 kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
484 {
485         struct kib_msg *msg = rx->rx_msg;
486         struct kib_conn   *conn = rx->rx_conn;
487         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
488         struct kib_net *net = ni->ni_data;
489         int rc;
490         int err = -EIO;
491
492         LASSERT (net != NULL);
493         LASSERT (rx->rx_nob < 0);               /* was posted */
494         rx->rx_nob = 0;                         /* isn't now */
495
496         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
497                 goto ignore;
498
499         if (status != IB_WC_SUCCESS) {
500                 CNETERR("Rx from %s failed: %d\n",
501                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
502                 goto failed;
503         }
504
505         LASSERT (nob >= 0);
506         rx->rx_nob = nob;
507
508         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
509         if (rc != 0) {
510                 CERROR ("Error %d unpacking rx from %s\n",
511                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
512                 goto failed;
513         }
514
515         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
516             msg->ibm_dstnid != ni->ni_nid ||
517             msg->ibm_srcstamp != conn->ibc_incarnation ||
518             msg->ibm_dststamp != net->ibn_incarnation) {
519                 CERROR ("Stale rx from %s\n",
520                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
521                 err = -ESTALE;
522                 goto failed;
523         }
524
525         /* set time last known alive */
526         kiblnd_peer_alive(conn->ibc_peer);
527
528         /* racing with connection establishment/teardown! */
529
530         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
531                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
532                 unsigned long  flags;
533
534                 write_lock_irqsave(g_lock, flags);
535                 /* must check holding global lock to eliminate race */
536                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
537                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
538                         write_unlock_irqrestore(g_lock, flags);
539                         return;
540                 }
541                 write_unlock_irqrestore(g_lock, flags);
542         }
543         kiblnd_handle_rx(rx);
544         return;
545
546  failed:
547         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
548         kiblnd_close_conn(conn, err);
549  ignore:
550         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
551 }
552
553 static int
554 kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
555                   struct kib_rdma_desc *rd, u32 nob)
556 {
557         struct kib_hca_dev *hdev;
558         struct kib_dev *dev;
559         struct kib_fmr_poolset *fps;
560         int                     cpt;
561         int                     rc;
562         int i;
563
564         LASSERT(tx->tx_pool != NULL);
565         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
566
567         dev = net->ibn_dev;
568         hdev = tx->tx_pool->tpo_hdev;
569         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
570
571         /*
572          * If we're dealing with FastReg, but the device doesn't
573          * support GAPS and the tx has GAPS, then there is no real point
574          * in trying to map the memory, because it'll just fail. So
575          * preemptively fail with an appropriate message
576          */
577         if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) &&
578             !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
579             tx->tx_gaps) {
580                 CERROR("Using FastReg with no GAPS support, but tx has gaps. "
581                        "Try setting use_fastreg_gaps to 1\n");
582                 return -EPROTONOSUPPORT;
583         }
584
585         /*
586          * FMR does not support gaps but the tx has gaps then
587          * we should make sure that the number of fragments we'll be sending
588          * over fits within the number of fragments negotiated on the
589          * connection, otherwise, we won't be able to RDMA the data.
590          * We need to maintain the number of fragments negotiation on the
591          * connection for backwards compatibility.
592          */
593         if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
594                 if (tx->tx_conn &&
595                     tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
596                         CERROR("TX number of frags (%d) is <= than connection"
597                                " number of frags (%d). Consider setting peer's"
598                                " map_on_demand to 256\n", tx->tx_nfrags,
599                                tx->tx_conn->ibc_max_frags);
600                         return -EFBIG;
601                 }
602         }
603
604         fps = net->ibn_fmr_ps[cpt];
605         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
606         if (rc != 0) {
607                 CERROR("Can't map %u pages: %d\n", nob, rc);
608                 return rc;
609         }
610
611         /*
612          * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
613          * need the rkey
614          */
615         rd->rd_key = tx->tx_fmr.fmr_key;
616         /*
617          * for FastReg or FMR with no gaps we can accumulate all
618          * the fragments in one FastReg or FMR fragment.
619          */
620         if (((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) && !tx->tx_gaps) ||
621             (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
622                 /* FMR requires zero based address */
623                 if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
624                         rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
625                 rd->rd_frags[0].rf_nob = nob;
626                 rd->rd_nfrags = 1;
627         } else {
628                 /*
629                  * We're transmitting with gaps using FMR.
630                  * We'll need to use multiple fragments and identify the
631                  * zero based address of each fragment.
632                  */
633                 for (i = 0; i < rd->rd_nfrags; i++) {
634                         rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
635                         rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
636                 }
637         }
638
639         return 0;
640 }
641
642 static void
643 kiblnd_unmap_tx(struct kib_tx *tx)
644 {
645         if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd)
646                 kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
647
648         if (tx->tx_nfrags != 0) {
649                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
650                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
651                 tx->tx_nfrags = 0;
652         }
653 }
654
655 #ifdef HAVE_IB_GET_DMA_MR
656 static struct ib_mr *
657 kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
658 {
659         struct kib_net *net = ni->ni_data;
660         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
661         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
662
663         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
664
665         /*
666          * if map-on-demand is turned on and the device supports
667          * either FMR or FastReg then use that. Otherwise use global
668          * memory regions. If that's not available either, then you're
669          * dead in the water and fail the operation.
670          */
671         if (tunables->lnd_map_on_demand &&
672             (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED ||
673              net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED))
674                 return NULL;
675
676         /*
677          * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
678          * in the call chain. The mapping will fail with appropriate error
679          * message.
680          */
681         return hdev->ibh_mrs;
682 }
683 #endif
684
685 static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
686                          struct kib_rdma_desc *rd, int nfrags)
687 {
688         struct kib_net *net = ni->ni_data;
689         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
690 #ifdef HAVE_IB_GET_DMA_MR
691         struct ib_mr *mr = NULL;
692 #endif
693         __u32 nob;
694         int i;
695
696         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
697          * RDMA sink */
698         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
699         tx->tx_nfrags = nfrags;
700
701         rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
702                                           tx->tx_nfrags, tx->tx_dmadir);
703
704         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
705                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
706                         hdev->ibh_ibdev, &tx->tx_frags[i]);
707                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
708                         hdev->ibh_ibdev, &tx->tx_frags[i]);
709                 nob += rd->rd_frags[i].rf_nob;
710         }
711
712 #ifdef HAVE_IB_GET_DMA_MR
713         mr = kiblnd_find_rd_dma_mr(ni, rd);
714         if (mr != NULL) {
715                 /* found pre-mapping MR */
716                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
717                 return 0;
718         }
719 #endif
720
721         if (net->ibn_fmr_ps != NULL)
722                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
723
724         return -EINVAL;
725 }
726
727 static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
728                                struct kib_rdma_desc *rd, unsigned int niov,
729                                struct kvec *iov, int offset, int nob)
730 {
731         struct kib_net *net = ni->ni_data;
732         struct page *page;
733         struct scatterlist *sg;
734         unsigned long       vaddr;
735         int                 fragnob;
736         int                 page_offset;
737         unsigned int        max_niov;
738
739         LASSERT (nob > 0);
740         LASSERT (niov > 0);
741         LASSERT (net != NULL);
742
743         while (offset >= iov->iov_len) {
744                 offset -= iov->iov_len;
745                 niov--;
746                 iov++;
747                 LASSERT (niov > 0);
748         }
749
750         max_niov = niov;
751
752         sg = tx->tx_frags;
753         do {
754                 LASSERT(niov > 0);
755
756                 vaddr = ((unsigned long)iov->iov_base) + offset;
757                 page_offset = vaddr & (PAGE_SIZE - 1);
758                 page = lnet_kvaddr_to_page(vaddr);
759                 if (page == NULL) {
760                         CERROR("Can't find page\n");
761                         return -EFAULT;
762                 }
763
764                 fragnob = min((int)(iov->iov_len - offset), nob);
765                 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
766
767                 /*
768                  * We're allowed to start at a non-aligned page offset in
769                  * the first fragment and end at a non-aligned page offset
770                  * in the last fragment.
771                  */
772                 if ((fragnob < (int)PAGE_SIZE - page_offset) &&
773                     (niov < max_niov) && nob > fragnob) {
774                         CDEBUG(D_NET, "fragnob %d < available page %d: with"
775                                       " remaining %d iovs with %d nob left\n",
776                                fragnob, (int)PAGE_SIZE - page_offset, niov,
777                                nob);
778                         tx->tx_gaps = true;
779                 }
780
781                 sg_set_page(sg, page, fragnob, page_offset);
782                 sg = sg_next(sg);
783                 if (!sg) {
784                         CERROR("lacking enough sg entries to map tx\n");
785                         return -EFAULT;
786                 }
787
788                 if (offset + fragnob < iov->iov_len) {
789                         offset += fragnob;
790                 } else {
791                         offset = 0;
792                         iov++;
793                         niov--;
794                 }
795                 nob -= fragnob;
796         } while (nob > 0);
797
798         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
799 }
800
801 static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
802                                 struct kib_rdma_desc *rd, int nkiov,
803                                 lnet_kiov_t *kiov, int offset, int nob)
804 {
805         struct kib_net *net = ni->ni_data;
806         struct scatterlist *sg;
807         int                 fragnob;
808         int                 max_nkiov;
809
810         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
811
812         LASSERT(nob > 0);
813         LASSERT(nkiov > 0);
814         LASSERT(net != NULL);
815
816         while (offset >= kiov->kiov_len) {
817                 offset -= kiov->kiov_len;
818                 nkiov--;
819                 kiov++;
820                 LASSERT(nkiov > 0);
821         }
822
823         max_nkiov = nkiov;
824
825         sg = tx->tx_frags;
826         do {
827                 LASSERT(nkiov > 0);
828
829                 fragnob = min((int)(kiov->kiov_len - offset), nob);
830
831                 /*
832                  * We're allowed to start at a non-aligned page offset in
833                  * the first fragment and end at a non-aligned page offset
834                  * in the last fragment.
835                  */
836                 if ((fragnob < (int)(kiov->kiov_len - offset)) &&
837                     nkiov < max_nkiov && nob > fragnob) {
838                         CDEBUG(D_NET, "fragnob %d < available page %d: with"
839                                       " remaining %d kiovs with %d nob left\n",
840                                fragnob, (int)(kiov->kiov_len - offset),
841                                nkiov, nob);
842                         tx->tx_gaps = true;
843                 }
844
845                 sg_set_page(sg, kiov->kiov_page, fragnob,
846                             kiov->kiov_offset + offset);
847                 sg = sg_next(sg);
848                 if (!sg) {
849                         CERROR("lacking enough sg entries to map tx\n");
850                         return -EFAULT;
851                 }
852
853                 offset = 0;
854                 kiov++;
855                 nkiov--;
856                 nob -= fragnob;
857         } while (nob > 0);
858
859         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
860 }
861
862 static int
863 kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
864 __must_hold(&conn->ibc_lock)
865 {
866         struct kib_msg *msg = tx->tx_msg;
867         struct kib_peer_ni *peer_ni = conn->ibc_peer;
868         int ver = conn->ibc_version;
869         int rc;
870         int done;
871
872         LASSERT(tx->tx_queued);
873         /* We rely on this for QP sizing */
874         LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
875         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
876
877         LASSERT(credit == 0 || credit == 1);
878         LASSERT(conn->ibc_outstanding_credits >= 0);
879         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
880         LASSERT(conn->ibc_credits >= 0);
881         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
882
883         if (conn->ibc_nsends_posted ==
884             conn->ibc_queue_depth) {
885                 /* tx completions outstanding... */
886                 CDEBUG(D_NET, "%s: posted enough\n",
887                        libcfs_nid2str(peer_ni->ibp_nid));
888                 return -EAGAIN;
889         }
890
891         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
892                 CDEBUG(D_NET, "%s: no credits\n",
893                        libcfs_nid2str(peer_ni->ibp_nid));
894                 return -EAGAIN;
895         }
896
897         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
898             conn->ibc_credits == 1 &&   /* last credit reserved */
899             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
900                 CDEBUG(D_NET, "%s: not using last credit\n",
901                        libcfs_nid2str(peer_ni->ibp_nid));
902                 return -EAGAIN;
903         }
904
905         /* NB don't drop ibc_lock before bumping tx_sending */
906         list_del(&tx->tx_list);
907         tx->tx_queued = 0;
908
909         if (msg->ibm_type == IBLND_MSG_NOOP &&
910             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
911              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
912               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
913                 /* OK to drop when posted enough NOOPs, since
914                  * kiblnd_check_sends_locked will queue NOOP again when
915                  * posted NOOPs complete */
916                 spin_unlock(&conn->ibc_lock);
917                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
918                 kiblnd_tx_done(tx);
919                 spin_lock(&conn->ibc_lock);
920                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
921                        libcfs_nid2str(peer_ni->ibp_nid),
922                        conn->ibc_noops_posted);
923                 return 0;
924         }
925
926         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
927                         peer_ni->ibp_nid, conn->ibc_incarnation);
928
929         conn->ibc_credits -= credit;
930         conn->ibc_outstanding_credits = 0;
931         conn->ibc_nsends_posted++;
932         if (msg->ibm_type == IBLND_MSG_NOOP)
933                 conn->ibc_noops_posted++;
934
935         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
936          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
937          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
938          * and then re-queued here.  It's (just) possible that
939          * tx_sending is non-zero if we've not done the tx_complete()
940          * from the first send; hence the ++ rather than = below. */
941         tx->tx_sending++;
942         list_add(&tx->tx_list, &conn->ibc_active_txs);
943
944         /* I'm still holding ibc_lock! */
945         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
946                 rc = -ECONNABORTED;
947         } else if (tx->tx_pool->tpo_pool.po_failed ||
948                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
949                 /* close_conn will launch failover */
950                 rc = -ENETDOWN;
951         } else {
952                 struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
953                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
954                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
955
956                 if (frd != NULL) {
957                         if (!frd->frd_valid) {
958                                 wr = &frd->frd_inv_wr.wr;
959                                 wr->next = &frd->frd_fastreg_wr.wr;
960                         } else {
961                                 wr = &frd->frd_fastreg_wr.wr;
962                         }
963                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
964                 }
965
966                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
967                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
968                          bad->wr_id, bad->opcode, bad->send_flags,
969                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
970
971                 bad = NULL;
972                 rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
973         }
974
975         conn->ibc_last_send = ktime_get();
976
977         if (rc == 0)
978                 return 0;
979
980         /* NB credits are transferred in the actual
981          * message, which can only be the last work item */
982         conn->ibc_credits += credit;
983         conn->ibc_outstanding_credits += msg->ibm_credits;
984         conn->ibc_nsends_posted--;
985         if (msg->ibm_type == IBLND_MSG_NOOP)
986                 conn->ibc_noops_posted--;
987
988         tx->tx_status = rc;
989         tx->tx_waiting = 0;
990         tx->tx_sending--;
991
992         done = (tx->tx_sending == 0);
993         if (done)
994                 list_del(&tx->tx_list);
995
996         spin_unlock(&conn->ibc_lock);
997
998         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
999                 CERROR("Error %d posting transmit to %s\n",
1000                        rc, libcfs_nid2str(peer_ni->ibp_nid));
1001         else
1002                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
1003                        rc, libcfs_nid2str(peer_ni->ibp_nid));
1004
1005         kiblnd_close_conn(conn, rc);
1006
1007         if (done)
1008                 kiblnd_tx_done(tx);
1009
1010         spin_lock(&conn->ibc_lock);
1011
1012         return -EIO;
1013 }
1014
1015 static void
1016 kiblnd_check_sends_locked(struct kib_conn *conn)
1017 {
1018         int ver = conn->ibc_version;
1019         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
1020         struct kib_tx *tx;
1021
1022         /* Don't send anything until after the connection is established */
1023         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
1024                 CDEBUG(D_NET, "%s too soon\n",
1025                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
1026                 return;
1027         }
1028
1029         LASSERT(conn->ibc_nsends_posted <=
1030                 conn->ibc_queue_depth);
1031         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
1032                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
1033         LASSERT (conn->ibc_reserved_credits >= 0);
1034
1035         while (conn->ibc_reserved_credits > 0 &&
1036                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
1037                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
1038                                 struct kib_tx, tx_list);
1039                 list_del(&tx->tx_list);
1040                 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
1041                 conn->ibc_reserved_credits--;
1042         }
1043
1044         if (kiblnd_need_noop(conn)) {
1045                 spin_unlock(&conn->ibc_lock);
1046
1047                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1048                 if (tx != NULL)
1049                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
1050
1051                 spin_lock(&conn->ibc_lock);
1052                 if (tx != NULL)
1053                         kiblnd_queue_tx_locked(tx, conn);
1054         }
1055
1056         for (;;) {
1057                 int credit;
1058
1059                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
1060                         credit = 0;
1061                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
1062                                         struct kib_tx, tx_list);
1063                 } else if (!list_empty(&conn->ibc_tx_noops)) {
1064                         LASSERT (!IBLND_OOB_CAPABLE(ver));
1065                         credit = 1;
1066                         tx = list_entry(conn->ibc_tx_noops.next,
1067                                         struct kib_tx, tx_list);
1068                 } else if (!list_empty(&conn->ibc_tx_queue)) {
1069                         credit = 1;
1070                         tx = list_entry(conn->ibc_tx_queue.next,
1071                                         struct kib_tx, tx_list);
1072                 } else
1073                         break;
1074
1075                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
1076                         break;
1077         }
1078 }
1079
1080 static void
1081 kiblnd_tx_complete(struct kib_tx *tx, int status)
1082 {
1083         int           failed = (status != IB_WC_SUCCESS);
1084         struct kib_conn   *conn = tx->tx_conn;
1085         int           idle;
1086
1087         LASSERT (tx->tx_sending > 0);
1088
1089         if (failed) {
1090                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
1091                         CNETERR("Tx -> %s cookie %#llx"
1092                                 " sending %d waiting %d: failed %d\n",
1093                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1094                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1095                                 status);
1096
1097                 kiblnd_close_conn(conn, -EIO);
1098         } else {
1099                 kiblnd_peer_alive(conn->ibc_peer);
1100         }
1101
1102         spin_lock(&conn->ibc_lock);
1103
1104         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
1105          * gets to free it, which also drops its ref on 'conn'. */
1106
1107         tx->tx_sending--;
1108         conn->ibc_nsends_posted--;
1109         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1110                 conn->ibc_noops_posted--;
1111
1112         if (failed) {
1113                 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
1114                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
1115                 tx->tx_status = -EIO;
1116         }
1117
1118         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1119                !tx->tx_waiting &&               /* Not waiting for peer_ni */
1120                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1121         if (idle)
1122                 list_del(&tx->tx_list);
1123
1124         kiblnd_check_sends_locked(conn);
1125         spin_unlock(&conn->ibc_lock);
1126
1127         if (idle)
1128                 kiblnd_tx_done(tx);
1129 }
1130
1131 static void
1132 kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
1133                    int body_nob)
1134 {
1135         struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
1136         struct ib_sge *sge = &tx->tx_msgsge;
1137         struct ib_rdma_wr *wrq;
1138         int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1139 #ifdef HAVE_IB_GET_DMA_MR
1140         struct ib_mr *mr = hdev->ibh_mrs;
1141 #endif
1142
1143         LASSERT(tx->tx_nwrq >= 0);
1144         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1145         LASSERT(nob <= IBLND_MSG_SIZE);
1146 #ifdef HAVE_IB_GET_DMA_MR
1147         LASSERT(mr != NULL);
1148 #endif
1149
1150         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1151
1152 #ifdef HAVE_IB_GET_DMA_MR
1153         sge->lkey   = mr->lkey;
1154 #else
1155         sge->lkey   = hdev->ibh_pd->local_dma_lkey;
1156 #endif
1157         sge->addr   = tx->tx_msgaddr;
1158         sge->length = nob;
1159
1160         wrq = &tx->tx_wrq[tx->tx_nwrq];
1161         memset(wrq, 0, sizeof(*wrq));
1162
1163         wrq->wr.next            = NULL;
1164         wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1165         wrq->wr.sg_list         = sge;
1166         wrq->wr.num_sge         = 1;
1167         wrq->wr.opcode          = IB_WR_SEND;
1168         wrq->wr.send_flags      = IB_SEND_SIGNALED;
1169
1170         tx->tx_nwrq++;
1171 }
1172
1173 static int
1174 kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
1175                  int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
1176 {
1177         struct kib_msg *ibmsg = tx->tx_msg;
1178         struct kib_rdma_desc *srcrd = tx->tx_rd;
1179         struct ib_rdma_wr *wrq = NULL;
1180         struct ib_sge     *sge;
1181         int                rc  = resid;
1182         int                srcidx;
1183         int                dstidx;
1184         int                sge_nob;
1185         int                wrq_sge;
1186
1187         LASSERT(!in_interrupt());
1188         LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
1189         LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
1190
1191         for (srcidx = dstidx = wrq_sge = sge_nob = 0;
1192              resid > 0; resid -= sge_nob) {
1193                 int     prev = dstidx;
1194
1195                 if (srcidx >= srcrd->rd_nfrags) {
1196                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1197                         rc = -EPROTO;
1198                         break;
1199                 }
1200
1201                 if (dstidx >= dstrd->rd_nfrags) {
1202                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1203                         rc = -EPROTO;
1204                         break;
1205                 }
1206
1207                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1208                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1209                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1210                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1211                                conn->ibc_max_frags,
1212                                srcidx, srcrd->rd_nfrags,
1213                                dstidx, dstrd->rd_nfrags);
1214                         rc = -EMSGSIZE;
1215                         break;
1216                 }
1217
1218                 sge_nob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1219                                   kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1220
1221                 sge = &tx->tx_sge[tx->tx_nsge];
1222                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1223                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1224                 sge->length = sge_nob;
1225
1226                 if (wrq_sge == 0) {
1227                         wrq = &tx->tx_wrq[tx->tx_nwrq];
1228
1229                         wrq->wr.next    = &(wrq + 1)->wr;
1230                         wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1231                         wrq->wr.sg_list = sge;
1232                         wrq->wr.opcode  = IB_WR_RDMA_WRITE;
1233                         wrq->wr.send_flags = 0;
1234
1235 #ifdef HAVE_IB_RDMA_WR
1236                         wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
1237                                                                       dstidx);
1238                         wrq->rkey               = kiblnd_rd_frag_key(dstrd,
1239                                                                      dstidx);
1240 #else
1241                         wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
1242                                                                         dstidx);
1243                         wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
1244                                                                      dstidx);
1245 #endif
1246                 }
1247
1248                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
1249                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
1250
1251                 wrq_sge++;
1252                 if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
1253                         tx->tx_nwrq++;
1254                         wrq->wr.num_sge = wrq_sge;
1255                         wrq_sge = 0;
1256                 }
1257                 tx->tx_nsge++;
1258         }
1259
1260         if (rc < 0)     /* no RDMA if completing with failure */
1261                 tx->tx_nwrq = tx->tx_nsge = 0;
1262
1263         ibmsg->ibm_u.completion.ibcm_status = rc;
1264         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1265         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1266                            type, sizeof(struct kib_completion_msg));
1267
1268         return rc;
1269 }
1270
1271 static void
1272 kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
1273 {
1274         struct list_head *q;
1275         s64 timeout_ns;
1276
1277         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1278         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1279         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1280
1281         timeout_ns = lnet_get_lnd_timeout() * NSEC_PER_SEC;
1282         tx->tx_queued = 1;
1283         tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
1284
1285         if (tx->tx_conn == NULL) {
1286                 kiblnd_conn_addref(conn);
1287                 tx->tx_conn = conn;
1288                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1289         } else {
1290                 /* PUT_DONE first attached to conn as a PUT_REQ */
1291                 LASSERT (tx->tx_conn == conn);
1292                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1293         }
1294
1295         switch (tx->tx_msg->ibm_type) {
1296         default:
1297                 LBUG();
1298
1299         case IBLND_MSG_PUT_REQ:
1300         case IBLND_MSG_GET_REQ:
1301                 q = &conn->ibc_tx_queue_rsrvd;
1302                 break;
1303
1304         case IBLND_MSG_PUT_NAK:
1305         case IBLND_MSG_PUT_ACK:
1306         case IBLND_MSG_PUT_DONE:
1307         case IBLND_MSG_GET_DONE:
1308                 q = &conn->ibc_tx_queue_nocred;
1309                 break;
1310
1311         case IBLND_MSG_NOOP:
1312                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1313                         q = &conn->ibc_tx_queue_nocred;
1314                 else
1315                         q = &conn->ibc_tx_noops;
1316                 break;
1317
1318         case IBLND_MSG_IMMEDIATE:
1319                 q = &conn->ibc_tx_queue;
1320                 break;
1321         }
1322
1323         list_add_tail(&tx->tx_list, q);
1324 }
1325
1326 static void
1327 kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
1328 {
1329         spin_lock(&conn->ibc_lock);
1330         kiblnd_queue_tx_locked(tx, conn);
1331         kiblnd_check_sends_locked(conn);
1332         spin_unlock(&conn->ibc_lock);
1333 }
1334
1335 static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1336                                struct sockaddr_in *srcaddr,
1337                                struct sockaddr_in *dstaddr,
1338                                int timeout_ms)
1339 {
1340         unsigned short port;
1341         int rc;
1342
1343         /* allow the port to be reused */
1344         rc = rdma_set_reuseaddr(cmid, 1);
1345         if (rc != 0) {
1346                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1347                 return rc;
1348         }
1349
1350         /* look for a free privileged port */
1351         for (port = PROT_SOCK-1; port > 0; port--) {
1352                 srcaddr->sin_port = htons(port);
1353                 rc = rdma_resolve_addr(cmid,
1354                                        (struct sockaddr *)srcaddr,
1355                                        (struct sockaddr *)dstaddr,
1356                                        timeout_ms);
1357                 if (rc == 0) {
1358                         CDEBUG(D_NET, "bound to port %hu\n", port);
1359                         return 0;
1360                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1361                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1362                                port, rc);
1363                 } else {
1364                         return rc;
1365                 }
1366         }
1367
1368         CERROR("Failed to bind to a free privileged port\n");
1369         return rc;
1370 }
1371
1372 static void
1373 kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
1374 {
1375         struct rdma_cm_id *cmid;
1376         struct kib_dev *dev;
1377         struct kib_net *net = peer_ni->ibp_ni->ni_data;
1378         struct sockaddr_in srcaddr;
1379         struct sockaddr_in dstaddr;
1380         int rc;
1381
1382         LASSERT (net != NULL);
1383         LASSERT (peer_ni->ibp_connecting > 0);
1384
1385         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
1386                                      IB_QPT_RC);
1387
1388         if (IS_ERR(cmid)) {
1389                 CERROR("Can't create CMID for %s: %ld\n",
1390                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1391                 rc = PTR_ERR(cmid);
1392                 goto failed;
1393         }
1394
1395         dev = net->ibn_dev;
1396         memset(&srcaddr, 0, sizeof(srcaddr));
1397         srcaddr.sin_family = AF_INET;
1398         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1399
1400         memset(&dstaddr, 0, sizeof(dstaddr));
1401         dstaddr.sin_family = AF_INET;
1402         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1403         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1404
1405         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1406
1407         if (*kiblnd_tunables.kib_use_priv_port) {
1408                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1409                                          lnet_get_lnd_timeout() * 1000);
1410         } else {
1411                 rc = rdma_resolve_addr(cmid,
1412                                        (struct sockaddr *)&srcaddr,
1413                                        (struct sockaddr *)&dstaddr,
1414                                        lnet_get_lnd_timeout() * 1000);
1415         }
1416         if (rc != 0) {
1417                 /* Can't initiate address resolution:  */
1418                 CERROR("Can't resolve addr for %s: %d\n",
1419                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1420                 goto failed2;
1421         }
1422
1423         return;
1424
1425  failed2:
1426         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1427         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1428         rdma_destroy_id(cmid);
1429         return;
1430  failed:
1431         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1432 }
1433
1434 bool
1435 kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
1436 {
1437         rwlock_t         *glock = &kiblnd_data.kib_global_lock;
1438         char             *reason = NULL;
1439         struct list_head  txs;
1440         unsigned long     flags;
1441
1442         INIT_LIST_HEAD(&txs);
1443
1444         write_lock_irqsave(glock, flags);
1445         if (peer_ni->ibp_reconnecting == 0) {
1446                 if (peer_ni->ibp_accepting)
1447                         reason = "accepting";
1448                 else if (peer_ni->ibp_connecting)
1449                         reason = "connecting";
1450                 else if (!list_empty(&peer_ni->ibp_conns))
1451                         reason = "connected";
1452                 else /* connected then closed */
1453                         reason = "closed";
1454
1455                 goto no_reconnect;
1456         }
1457
1458         if (peer_ni->ibp_accepting)
1459                 CNETERR("Detecting race between accepting and reconnecting\n");
1460         peer_ni->ibp_reconnecting--;
1461
1462         if (!kiblnd_peer_active(peer_ni)) {
1463                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1464                 reason = "unlinked";
1465                 goto no_reconnect;
1466         }
1467
1468         peer_ni->ibp_connecting++;
1469         peer_ni->ibp_reconnected++;
1470
1471         write_unlock_irqrestore(glock, flags);
1472
1473         kiblnd_connect_peer(peer_ni);
1474         return true;
1475
1476  no_reconnect:
1477         write_unlock_irqrestore(glock, flags);
1478
1479         CWARN("Abort reconnection of %s: %s\n",
1480               libcfs_nid2str(peer_ni->ibp_nid), reason);
1481         kiblnd_txlist_done(&txs, -ECONNABORTED,
1482                            LNET_MSG_STATUS_LOCAL_ABORTED);
1483         return false;
1484 }
1485
1486 void
1487 kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
1488 {
1489         struct kib_peer_ni *peer_ni;
1490         struct kib_peer_ni *peer2;
1491         struct kib_conn *conn;
1492         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1493         unsigned long      flags;
1494         int                rc;
1495         int                i;
1496         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1497
1498         /* If I get here, I've committed to send, so I complete the tx with
1499          * failure on any problems */
1500
1501         LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1502         LASSERT (tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
1503
1504         /* First time, just use a read lock since I expect to find my peer_ni
1505          * connected */
1506         read_lock_irqsave(g_lock, flags);
1507
1508         peer_ni = kiblnd_find_peer_locked(ni, nid);
1509         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1510                 /* Found a peer_ni with an established connection */
1511                 conn = kiblnd_get_conn_locked(peer_ni);
1512                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1513
1514                 read_unlock_irqrestore(g_lock, flags);
1515
1516                 if (tx != NULL)
1517                         kiblnd_queue_tx(tx, conn);
1518                 kiblnd_conn_decref(conn); /* ...to here */
1519                 return;
1520         }
1521
1522         read_unlock(g_lock);
1523         /* Re-try with a write lock */
1524         write_lock(g_lock);
1525
1526         peer_ni = kiblnd_find_peer_locked(ni, nid);
1527         if (peer_ni != NULL) {
1528                 if (list_empty(&peer_ni->ibp_conns)) {
1529                         /* found a peer_ni, but it's still connecting... */
1530                         LASSERT(kiblnd_peer_connecting(peer_ni));
1531                         if (tx != NULL)
1532                                 list_add_tail(&tx->tx_list,
1533                                                   &peer_ni->ibp_tx_queue);
1534                         write_unlock_irqrestore(g_lock, flags);
1535                 } else {
1536                         conn = kiblnd_get_conn_locked(peer_ni);
1537                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1538
1539                         write_unlock_irqrestore(g_lock, flags);
1540
1541                         if (tx != NULL)
1542                                 kiblnd_queue_tx(tx, conn);
1543                         kiblnd_conn_decref(conn); /* ...to here */
1544                 }
1545                 return;
1546         }
1547
1548         write_unlock_irqrestore(g_lock, flags);
1549
1550         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1551         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1552         if (rc != 0) {
1553                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1554                 if (tx != NULL) {
1555                         tx->tx_status = -EHOSTUNREACH;
1556                         tx->tx_waiting = 0;
1557                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1558                         kiblnd_tx_done(tx);
1559                 }
1560                 return;
1561         }
1562
1563         write_lock_irqsave(g_lock, flags);
1564
1565         peer2 = kiblnd_find_peer_locked(ni, nid);
1566         if (peer2 != NULL) {
1567                 if (list_empty(&peer2->ibp_conns)) {
1568                         /* found a peer_ni, but it's still connecting... */
1569                         LASSERT(kiblnd_peer_connecting(peer2));
1570                         if (tx != NULL)
1571                                 list_add_tail(&tx->tx_list,
1572                                                   &peer2->ibp_tx_queue);
1573                         write_unlock_irqrestore(g_lock, flags);
1574                 } else {
1575                         conn = kiblnd_get_conn_locked(peer2);
1576                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1577
1578                         write_unlock_irqrestore(g_lock, flags);
1579
1580                         if (tx != NULL)
1581                                 kiblnd_queue_tx(tx, conn);
1582                         kiblnd_conn_decref(conn); /* ...to here */
1583                 }
1584
1585                 kiblnd_peer_decref(peer_ni);
1586                 return;
1587         }
1588
1589         /* Brand new peer_ni */
1590         LASSERT(peer_ni->ibp_connecting == 0);
1591         tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
1592         peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
1593
1594         /* always called with a ref on ni, which prevents ni being shutdown */
1595         LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
1596
1597         if (tx != NULL)
1598                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1599
1600         kiblnd_peer_addref(peer_ni);
1601         list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
1602
1603         write_unlock_irqrestore(g_lock, flags);
1604
1605         for (i = 0; i < tunables->lnd_conns_per_peer; i++)
1606                 kiblnd_connect_peer(peer_ni);
1607         kiblnd_peer_decref(peer_ni);
1608 }
1609
1610 int
1611 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1612 {
1613         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1614         int               type = lntmsg->msg_type;
1615         struct lnet_process_id target = lntmsg->msg_target;
1616         int               target_is_router = lntmsg->msg_target_is_router;
1617         int               routing = lntmsg->msg_routing;
1618         unsigned int      payload_niov = lntmsg->msg_niov;
1619         struct kvec      *payload_iov = lntmsg->msg_iov;
1620         lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
1621         unsigned int      payload_offset = lntmsg->msg_offset;
1622         unsigned int      payload_nob = lntmsg->msg_len;
1623         struct kib_msg *ibmsg;
1624         struct kib_rdma_desc *rd;
1625         struct kib_tx *tx;
1626         int               nob;
1627         int               rc;
1628
1629         /* NB 'private' is different depending on what we're sending.... */
1630
1631         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1632                payload_nob, payload_niov, libcfs_id2str(target));
1633
1634         LASSERT (payload_nob == 0 || payload_niov > 0);
1635         LASSERT (payload_niov <= LNET_MAX_IOV);
1636
1637         /* Thread context */
1638         LASSERT (!in_interrupt());
1639         /* payload is either all vaddrs or all pages */
1640         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1641
1642         switch (type) {
1643         default:
1644                 LBUG();
1645                 return (-EIO);
1646
1647         case LNET_MSG_ACK:
1648                 LASSERT (payload_nob == 0);
1649                 break;
1650
1651         case LNET_MSG_GET:
1652                 if (routing || target_is_router)
1653                         break;                  /* send IMMEDIATE */
1654
1655                 /* is the REPLY message too small for RDMA? */
1656                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1657                 if (nob <= IBLND_MSG_SIZE)
1658                         break;                  /* send IMMEDIATE */
1659
1660                 tx = kiblnd_get_idle_tx(ni, target.nid);
1661                 if (tx == NULL) {
1662                         CERROR("Can't allocate txd for GET to %s\n",
1663                                libcfs_nid2str(target.nid));
1664                         return -ENOMEM;
1665                 }
1666
1667                 ibmsg = tx->tx_msg;
1668                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1669                 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1670                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1671                                                  lntmsg->msg_md->md_niov,
1672                                                  lntmsg->msg_md->md_iov.iov,
1673                                                  0, lntmsg->msg_md->md_length);
1674                 else
1675                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1676                                                   lntmsg->msg_md->md_niov,
1677                                                   lntmsg->msg_md->md_iov.kiov,
1678                                                   0, lntmsg->msg_md->md_length);
1679                 if (rc != 0) {
1680                         CERROR("Can't setup GET sink for %s: %d\n",
1681                                libcfs_nid2str(target.nid), rc);
1682                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1683                         kiblnd_tx_done(tx);
1684                         return -EIO;
1685                 }
1686
1687                 nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
1688                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1689                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1690
1691                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1692
1693                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1694                 if (tx->tx_lntmsg[1] == NULL) {
1695                         CERROR("Can't create reply for GET -> %s\n",
1696                                libcfs_nid2str(target.nid));
1697                         kiblnd_tx_done(tx);
1698                         return -EIO;
1699                 }
1700
1701                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1702                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1703                 kiblnd_launch_tx(ni, tx, target.nid);
1704                 return 0;
1705
1706         case LNET_MSG_REPLY:
1707         case LNET_MSG_PUT:
1708                 /* Is the payload small enough not to need RDMA? */
1709                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
1710                 if (nob <= IBLND_MSG_SIZE)
1711                         break;                  /* send IMMEDIATE */
1712
1713                 tx = kiblnd_get_idle_tx(ni, target.nid);
1714                 if (tx == NULL) {
1715                         CERROR("Can't allocate %s txd for %s\n",
1716                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1717                                libcfs_nid2str(target.nid));
1718                         return -ENOMEM;
1719                 }
1720
1721                 if (payload_kiov == NULL)
1722                         rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1723                                                  payload_niov, payload_iov,
1724                                                  payload_offset, payload_nob);
1725                 else
1726                         rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1727                                                   payload_niov, payload_kiov,
1728                                                   payload_offset, payload_nob);
1729                 if (rc != 0) {
1730                         CERROR("Can't setup PUT src for %s: %d\n",
1731                                libcfs_nid2str(target.nid), rc);
1732                         kiblnd_tx_done(tx);
1733                         return -EIO;
1734                 }
1735
1736                 ibmsg = tx->tx_msg;
1737                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1738                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1739                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
1740                                    sizeof(struct kib_putreq_msg));
1741
1742                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1743                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1744                 kiblnd_launch_tx(ni, tx, target.nid);
1745                 return 0;
1746         }
1747
1748         /* send IMMEDIATE */
1749         LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
1750                 <= IBLND_MSG_SIZE);
1751
1752         tx = kiblnd_get_idle_tx(ni, target.nid);
1753         if (tx == NULL) {
1754                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1755                         type, libcfs_nid2str(target.nid));
1756                 return -ENOMEM;
1757         }
1758
1759         ibmsg = tx->tx_msg;
1760         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1761
1762         if (payload_kiov != NULL)
1763                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1764                                     offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
1765                                     payload_niov, payload_kiov,
1766                                     payload_offset, payload_nob);
1767         else
1768                 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1769                                    offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
1770                                    payload_niov, payload_iov,
1771                                    payload_offset, payload_nob);
1772
1773         nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
1774         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1775
1776         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1777         kiblnd_launch_tx(ni, tx, target.nid);
1778         return 0;
1779 }
1780
1781 static void
1782 kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
1783 {
1784         struct lnet_process_id target = lntmsg->msg_target;
1785         unsigned int      niov = lntmsg->msg_niov;
1786         struct kvec      *iov = lntmsg->msg_iov;
1787         lnet_kiov_t      *kiov = lntmsg->msg_kiov;
1788         unsigned int      offset = lntmsg->msg_offset;
1789         unsigned int      nob = lntmsg->msg_len;
1790         struct kib_tx *tx;
1791         int               rc;
1792
1793         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1794         if (tx == NULL) {
1795                 CERROR("Can't get tx for REPLY to %s\n",
1796                        libcfs_nid2str(target.nid));
1797                 goto failed_0;
1798         }
1799
1800         if (nob == 0)
1801                 rc = 0;
1802         else if (kiov == NULL)
1803                 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1804                                          niov, iov, offset, nob);
1805         else
1806                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1807                                           niov, kiov, offset, nob);
1808
1809         if (rc != 0) {
1810                 CERROR("Can't setup GET src for %s: %d\n",
1811                        libcfs_nid2str(target.nid), rc);
1812                 goto failed_1;
1813         }
1814
1815         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1816                               IBLND_MSG_GET_DONE, nob,
1817                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1818                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1819         if (rc < 0) {
1820                 CERROR("Can't setup rdma for GET from %s: %d\n",
1821                        libcfs_nid2str(target.nid), rc);
1822                 goto failed_1;
1823         }
1824
1825         if (nob == 0) {
1826                 /* No RDMA: local completion may happen now! */
1827                 lnet_finalize(lntmsg, 0);
1828         } else {
1829                 /* RDMA: lnet_finalize(lntmsg) when it
1830                  * completes */
1831                 tx->tx_lntmsg[0] = lntmsg;
1832         }
1833
1834         kiblnd_queue_tx(tx, rx->rx_conn);
1835         return;
1836
1837
1838 failed_1:
1839         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1840         kiblnd_tx_done(tx);
1841 failed_0:
1842         lnet_finalize(lntmsg, -EIO);
1843 }
1844
1845 int
1846 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1847             int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1848             unsigned int offset, unsigned int mlen, unsigned int rlen)
1849 {
1850         struct kib_rx *rx = private;
1851         struct kib_msg *rxmsg = rx->rx_msg;
1852         struct kib_conn *conn = rx->rx_conn;
1853         struct kib_tx *tx;
1854         __u64        ibprm_cookie;
1855         int          nob;
1856         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1857         int          rc = 0;
1858
1859         LASSERT (mlen <= rlen);
1860         LASSERT (!in_interrupt());
1861         /* Either all pages or all vaddrs */
1862         LASSERT (!(kiov != NULL && iov != NULL));
1863
1864         switch (rxmsg->ibm_type) {
1865         default:
1866                 LBUG();
1867
1868         case IBLND_MSG_IMMEDIATE:
1869                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
1870                 if (nob > rx->rx_nob) {
1871                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1872                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1873                                 nob, rx->rx_nob);
1874                         rc = -EPROTO;
1875                         break;
1876                 }
1877
1878                 if (kiov != NULL)
1879                         lnet_copy_flat2kiov(niov, kiov, offset,
1880                                             IBLND_MSG_SIZE, rxmsg,
1881                                             offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
1882                                             mlen);
1883                 else
1884                         lnet_copy_flat2iov(niov, iov, offset,
1885                                            IBLND_MSG_SIZE, rxmsg,
1886                                            offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
1887                                            mlen);
1888                 lnet_finalize(lntmsg, 0);
1889                 break;
1890
1891         case IBLND_MSG_PUT_REQ: {
1892                 struct kib_msg  *txmsg;
1893                 struct kib_rdma_desc *rd;
1894                 ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1895
1896                 if (mlen == 0) {
1897                         lnet_finalize(lntmsg, 0);
1898                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1899                                                0, ibprm_cookie);
1900                         break;
1901                 }
1902
1903                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1904                 if (tx == NULL) {
1905                         CERROR("Can't allocate tx for %s\n",
1906                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1907                         /* Not replying will break the connection */
1908                         rc = -ENOMEM;
1909                         break;
1910                 }
1911
1912                 txmsg = tx->tx_msg;
1913                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1914                 if (kiov == NULL)
1915                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1916                                                  niov, iov, offset, mlen);
1917                 else
1918                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1919                                                   niov, kiov, offset, mlen);
1920                 if (rc != 0) {
1921                         CERROR("Can't setup PUT sink for %s: %d\n",
1922                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1923                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1924                         kiblnd_tx_done(tx);
1925                         /* tell peer_ni it's over */
1926                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1927                                                rc, ibprm_cookie);
1928                         break;
1929                 }
1930
1931                 nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
1932                 txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
1933                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1934
1935                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1936
1937                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1938                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1939                 kiblnd_queue_tx(tx, conn);
1940
1941                 /* reposted buffer reserved for PUT_DONE */
1942                 post_credit = IBLND_POSTRX_NO_CREDIT;
1943                 break;
1944                 }
1945
1946         case IBLND_MSG_GET_REQ:
1947                 if (lntmsg != NULL) {
1948                         /* Optimized GET; RDMA lntmsg's payload */
1949                         kiblnd_reply(ni, rx, lntmsg);
1950                 } else {
1951                         /* GET didn't match anything */
1952                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1953                                                -ENODATA,
1954                                                rxmsg->ibm_u.get.ibgm_cookie);
1955                 }
1956                 break;
1957         }
1958
1959         kiblnd_post_rx(rx, post_credit);
1960         return rc;
1961 }
1962
1963 int
1964 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1965 {
1966         struct task_struct *task = kthread_run(fn, arg, name);
1967
1968         if (IS_ERR(task))
1969                 return PTR_ERR(task);
1970
1971         atomic_inc(&kiblnd_data.kib_nthreads);
1972         return 0;
1973 }
1974
1975 static void
1976 kiblnd_thread_fini (void)
1977 {
1978         atomic_dec (&kiblnd_data.kib_nthreads);
1979 }
1980
1981 static void
1982 kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
1983 {
1984         /* This is racy, but everyone's only writing ktime_get_seconds() */
1985         peer_ni->ibp_last_alive = ktime_get_seconds();
1986         smp_mb();
1987 }
1988
1989 static void
1990 kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
1991 {
1992         int           error = 0;
1993         time64_t last_alive = 0;
1994         unsigned long flags;
1995
1996         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1997
1998         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
1999                 error = peer_ni->ibp_error;
2000                 peer_ni->ibp_error = 0;
2001
2002                 last_alive = peer_ni->ibp_last_alive;
2003         }
2004
2005         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2006
2007         if (error != 0)
2008                 lnet_notify(peer_ni->ibp_ni,
2009                             peer_ni->ibp_nid, 0, last_alive);
2010 }
2011
2012 void
2013 kiblnd_close_conn_locked(struct kib_conn *conn, int error)
2014 {
2015         /* This just does the immediate housekeeping.  'error' is zero for a
2016          * normal shutdown which can happen only after the connection has been
2017          * established.  If the connection is established, schedule the
2018          * connection to be finished off by the connd.  Otherwise the connd is
2019          * already dealing with it (either to set it up or tear it down).
2020          * Caller holds kib_global_lock exclusively in irq context */
2021         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2022         struct kib_dev *dev;
2023         unsigned long flags;
2024
2025         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2026
2027         if (error != 0 && conn->ibc_comms_error == 0)
2028                 conn->ibc_comms_error = error;
2029
2030         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
2031                 return; /* already being handled  */
2032
2033         if (error == 0 &&
2034             list_empty(&conn->ibc_tx_noops) &&
2035             list_empty(&conn->ibc_tx_queue) &&
2036             list_empty(&conn->ibc_tx_queue_rsrvd) &&
2037             list_empty(&conn->ibc_tx_queue_nocred) &&
2038             list_empty(&conn->ibc_active_txs)) {
2039                 CDEBUG(D_NET, "closing conn to %s\n", 
2040                        libcfs_nid2str(peer_ni->ibp_nid));
2041         } else {
2042                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
2043                        libcfs_nid2str(peer_ni->ibp_nid), error,
2044                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
2045                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
2046                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
2047                                                 "" : "(sending_rsrvd)",
2048                        list_empty(&conn->ibc_tx_queue_nocred) ?
2049                                                  "" : "(sending_nocred)",
2050                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
2051         }
2052
2053         dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
2054         if (peer_ni->ibp_next_conn == conn)
2055                 /* clear next_conn so it won't be used */
2056                 peer_ni->ibp_next_conn = NULL;
2057         list_del(&conn->ibc_list);
2058         /* connd (see below) takes over ibc_list's ref */
2059
2060         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
2061             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
2062                 kiblnd_unlink_peer_locked(peer_ni);
2063
2064                 /* set/clear error on last conn */
2065                 peer_ni->ibp_error = conn->ibc_comms_error;
2066         }
2067
2068         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
2069
2070         if (error != 0 &&
2071             kiblnd_dev_can_failover(dev)) {
2072                 list_add_tail(&dev->ibd_fail_list,
2073                               &kiblnd_data.kib_failed_devs);
2074                 wake_up(&kiblnd_data.kib_failover_waitq);
2075         }
2076
2077         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
2078
2079         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
2080         wake_up(&kiblnd_data.kib_connd_waitq);
2081
2082         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
2083 }
2084
2085 void
2086 kiblnd_close_conn(struct kib_conn *conn, int error)
2087 {
2088         unsigned long flags;
2089
2090         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2091
2092         kiblnd_close_conn_locked(conn, error);
2093
2094         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2095 }
2096
2097 static void
2098 kiblnd_handle_early_rxs(struct kib_conn *conn)
2099 {
2100         unsigned long flags;
2101         struct kib_rx *rx;
2102
2103         LASSERT(!in_interrupt());
2104         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2105
2106         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2107         while (!list_empty(&conn->ibc_early_rxs)) {
2108                 rx = list_entry(conn->ibc_early_rxs.next,
2109                                 struct kib_rx, rx_list);
2110                 list_del(&rx->rx_list);
2111                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2112
2113                 kiblnd_handle_rx(rx);
2114
2115                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2116         }
2117         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2118 }
2119
2120 static void
2121 kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
2122 {
2123         struct list_head         zombies = LIST_HEAD_INIT(zombies);
2124         struct list_head        *tmp;
2125         struct list_head        *nxt;
2126         struct kib_tx *tx;
2127
2128         spin_lock(&conn->ibc_lock);
2129
2130         list_for_each_safe(tmp, nxt, txs) {
2131                 tx = list_entry(tmp, struct kib_tx, tx_list);
2132
2133                 if (txs == &conn->ibc_active_txs) {
2134                         LASSERT(!tx->tx_queued);
2135                         LASSERT(tx->tx_waiting ||
2136                                 tx->tx_sending != 0);
2137                         if (conn->ibc_comms_error == -ETIMEDOUT) {
2138                                 if (tx->tx_waiting && !tx->tx_sending)
2139                                         tx->tx_hstatus =
2140                                           LNET_MSG_STATUS_REMOTE_TIMEOUT;
2141                                 else if (tx->tx_sending)
2142                                         tx->tx_hstatus =
2143                                           LNET_MSG_STATUS_NETWORK_TIMEOUT;
2144                         }
2145                 } else {
2146                         LASSERT(tx->tx_queued);
2147                         if (conn->ibc_comms_error == -ETIMEDOUT)
2148                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2149                         else
2150                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
2151                 }
2152
2153                 tx->tx_status = -ECONNABORTED;
2154                 tx->tx_waiting = 0;
2155
2156                 /*
2157                  * TODO: This makes an assumption that
2158                  * kiblnd_tx_complete() will be called for each tx. If
2159                  * that event is dropped we could end up with stale
2160                  * connections floating around. We'd like to deal with
2161                  * that in a better way.
2162                  *
2163                  * Also that means we can exceed the timeout by many
2164                  * seconds.
2165                  */
2166                 if (tx->tx_sending == 0) {
2167                         tx->tx_queued = 0;
2168                         list_del(&tx->tx_list);
2169                         list_add(&tx->tx_list, &zombies);
2170                 }
2171         }
2172
2173         spin_unlock(&conn->ibc_lock);
2174
2175         /*
2176          * aborting transmits occurs when finalizing the connection.
2177          * The connection is finalized on error
2178          */
2179         kiblnd_txlist_done(&zombies, -ECONNABORTED, -1);
2180 }
2181
2182 static void
2183 kiblnd_finalise_conn(struct kib_conn *conn)
2184 {
2185         LASSERT (!in_interrupt());
2186         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2187
2188         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2189
2190         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2191          * for connections that didn't get as far as being connected, because
2192          * rdma_disconnect() does this for free. */
2193         kiblnd_abort_receives(conn);
2194
2195         /* Complete all tx descs not waiting for sends to complete.
2196          * NB we should be safe from RDMA now that the QP has changed state */
2197
2198         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2199         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2200         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2201         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2202         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2203
2204         kiblnd_handle_early_rxs(conn);
2205 }
2206
2207 static void
2208 kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
2209                            int error)
2210 {
2211         struct list_head zombies = LIST_HEAD_INIT(zombies);
2212         unsigned long   flags;
2213
2214         LASSERT (error != 0);
2215         LASSERT (!in_interrupt());
2216
2217         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2218
2219         if (active) {
2220                 LASSERT(peer_ni->ibp_connecting > 0);
2221                 peer_ni->ibp_connecting--;
2222         } else {
2223                 LASSERT (peer_ni->ibp_accepting > 0);
2224                 peer_ni->ibp_accepting--;
2225         }
2226
2227         if (kiblnd_peer_connecting(peer_ni)) {
2228                 /* another connection attempt under way... */
2229                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2230                                         flags);
2231                 return;
2232         }
2233
2234         peer_ni->ibp_reconnected = 0;
2235         if (list_empty(&peer_ni->ibp_conns)) {
2236                 /* Take peer_ni's blocked transmits to complete with error */
2237                 list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
2238
2239                 if (kiblnd_peer_active(peer_ni))
2240                         kiblnd_unlink_peer_locked(peer_ni);
2241
2242                 peer_ni->ibp_error = error;
2243         } else {
2244                 /* Can't have blocked transmits if there are connections */
2245                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2246         }
2247
2248         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2249
2250         kiblnd_peer_notify(peer_ni);
2251
2252         if (list_empty(&zombies))
2253                 return;
2254
2255         CNETERR("Deleting messages for %s: connection failed\n",
2256                 libcfs_nid2str(peer_ni->ibp_nid));
2257
2258         kiblnd_txlist_done(&zombies, error,
2259                            LNET_MSG_STATUS_LOCAL_DROPPED);
2260 }
2261
2262 static void
2263 kiblnd_connreq_done(struct kib_conn *conn, int status)
2264 {
2265         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2266         struct kib_tx *tx;
2267         struct list_head txs;
2268         unsigned long    flags;
2269         int              active;
2270
2271         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2272
2273         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2274                libcfs_nid2str(peer_ni->ibp_nid), active,
2275                conn->ibc_version, status);
2276
2277         LASSERT (!in_interrupt());
2278         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2279                   peer_ni->ibp_connecting > 0) ||
2280                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2281                   peer_ni->ibp_accepting > 0));
2282
2283         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2284         conn->ibc_connvars = NULL;
2285
2286         if (status != 0) {
2287                 /* failed to establish connection */
2288                 kiblnd_peer_connect_failed(peer_ni, active, status);
2289                 kiblnd_finalise_conn(conn);
2290                 return;
2291         }
2292
2293         /* connection established */
2294         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2295
2296         conn->ibc_last_send = ktime_get();
2297         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2298         kiblnd_peer_alive(peer_ni);
2299
2300         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2301          * peer_ni instance... */
2302         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2303         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2304         peer_ni->ibp_reconnected = 0;
2305         if (active)
2306                 peer_ni->ibp_connecting--;
2307         else
2308                 peer_ni->ibp_accepting--;
2309
2310         if (peer_ni->ibp_version == 0) {
2311                 peer_ni->ibp_version     = conn->ibc_version;
2312                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2313         }
2314
2315         if (peer_ni->ibp_version     != conn->ibc_version ||
2316             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2317                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2318                                                 conn->ibc_incarnation);
2319                 peer_ni->ibp_version     = conn->ibc_version;
2320                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2321         }
2322
2323         /* grab pending txs while I have the lock */
2324         INIT_LIST_HEAD(&txs);
2325         list_splice_init(&peer_ni->ibp_tx_queue, &txs);
2326
2327         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2328             conn->ibc_comms_error != 0) {       /* error has happened already */
2329
2330                 /* start to shut down connection */
2331                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2332                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2333
2334                 kiblnd_txlist_done(&txs, -ECONNABORTED,
2335                                    LNET_MSG_STATUS_LOCAL_ERROR);
2336
2337                 return;
2338         }
2339
2340         /* +1 ref for myself, this connection is visible to other threads
2341          * now, refcount of peer:ibp_conns can be released by connection
2342          * close from either a different thread, or the calling of
2343          * kiblnd_check_sends_locked() below. See bz21911 for details.
2344          */
2345         kiblnd_conn_addref(conn);
2346         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2347
2348         /* Schedule blocked txs
2349          * Note: if we are running with conns_per_peer > 1, these blocked
2350          * txs will all get scheduled to the first connection which gets
2351          * scheduled.  We won't be using round robin on this first batch.
2352          */
2353         spin_lock(&conn->ibc_lock);
2354         while (!list_empty(&txs)) {
2355                 tx = list_entry(txs.next, struct kib_tx, tx_list);
2356                 list_del(&tx->tx_list);
2357
2358                 kiblnd_queue_tx_locked(tx, conn);
2359         }
2360         kiblnd_check_sends_locked(conn);
2361         spin_unlock(&conn->ibc_lock);
2362
2363         /* schedule blocked rxs */
2364         kiblnd_handle_early_rxs(conn);
2365         kiblnd_conn_decref(conn);
2366 }
2367
2368 static void
2369 kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
2370 {
2371         int          rc;
2372
2373         rc = rdma_reject(cmid, rej, sizeof(*rej));
2374
2375         if (rc != 0)
2376                 CWARN("Error %d sending reject\n", rc);
2377 }
2378
2379 static int
2380 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2381 {
2382         rwlock_t                *g_lock = &kiblnd_data.kib_global_lock;
2383         struct kib_msg *reqmsg = priv;
2384         struct kib_msg *ackmsg;
2385         struct kib_dev *ibdev;
2386         struct kib_peer_ni *peer_ni;
2387         struct kib_peer_ni *peer2;
2388         struct kib_conn *conn;
2389         struct lnet_ni *ni = NULL;
2390         struct kib_net *net = NULL;
2391         lnet_nid_t             nid;
2392         struct rdma_conn_param cp;
2393         struct kib_rej rej;
2394         int                    version = IBLND_MSG_VERSION;
2395         unsigned long          flags;
2396         int                    rc;
2397         struct sockaddr_in    *peer_addr;
2398         LASSERT (!in_interrupt());
2399
2400         /* cmid inherits 'context' from the corresponding listener id */
2401         ibdev = cmid->context;
2402         LASSERT(ibdev);
2403
2404         memset(&rej, 0, sizeof(rej));
2405         rej.ibr_magic                = IBLND_MSG_MAGIC;
2406         rej.ibr_why                  = IBLND_REJECT_FATAL;
2407         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2408
2409         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2410         if (*kiblnd_tunables.kib_require_priv_port &&
2411             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2412                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2413                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2414                        &ip, ntohs(peer_addr->sin_port));
2415                 goto failed;
2416         }
2417
2418         if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
2419                 CERROR("Short connection request\n");
2420                 goto failed;
2421         }
2422
2423         /* Future protocol version compatibility support!  If the
2424          * o2iblnd-specific protocol changes, or when LNET unifies
2425          * protocols over all LNDs, the initial connection will
2426          * negotiate a protocol version.  I trap this here to avoid
2427          * console errors; the reject tells the peer_ni which protocol I
2428          * speak. */
2429         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2430             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2431                 goto failed;
2432         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2433             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2434             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2435                 goto failed;
2436         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2437             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2438             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2439                 goto failed;
2440
2441         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2442         if (rc != 0) {
2443                 CERROR("Can't parse connection request: %d\n", rc);
2444                 goto failed;
2445         }
2446
2447         nid = reqmsg->ibm_srcnid;
2448         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2449
2450         if (ni != NULL) {
2451                 net = (struct kib_net *)ni->ni_data;
2452                 rej.ibr_incarnation = net->ibn_incarnation;
2453         }
2454
2455         if (ni == NULL ||                         /* no matching net */
2456             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2457             net->ibn_dev != ibdev) {              /* wrong device */
2458                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
2459                        "bad dst nid %s\n", libcfs_nid2str(nid),
2460                        ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
2461                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2462                         &ibdev->ibd_ifip,
2463                        libcfs_nid2str(reqmsg->ibm_dstnid));
2464
2465                 goto failed;
2466         }
2467
2468        /* check time stamp as soon as possible */
2469         if (reqmsg->ibm_dststamp != 0 &&
2470             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2471                 CWARN("Stale connection request\n");
2472                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2473                 goto failed;
2474         }
2475
2476         /* I can accept peer_ni's version */
2477         version = reqmsg->ibm_version;
2478
2479         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2480                 CERROR("Unexpected connreq msg type: %x from %s\n",
2481                        reqmsg->ibm_type, libcfs_nid2str(nid));
2482                 goto failed;
2483         }
2484
2485         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2486             kiblnd_msg_queue_size(version, ni)) {
2487                 CERROR("Can't accept conn from %s, queue depth too large: "
2488                        " %d (<=%d wanted)\n",
2489                        libcfs_nid2str(nid),
2490                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2491                        kiblnd_msg_queue_size(version, ni));
2492
2493                 if (version == IBLND_MSG_VERSION)
2494                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2495
2496                 goto failed;
2497         }
2498
2499         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2500             IBLND_MAX_RDMA_FRAGS) {
2501                 CWARN("Can't accept conn from %s (version %x): "
2502                       "max_frags %d too large (%d wanted)\n",
2503                       libcfs_nid2str(nid), version,
2504                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2505                       IBLND_MAX_RDMA_FRAGS);
2506
2507                 if (version >= IBLND_MSG_VERSION)
2508                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2509
2510                 goto failed;
2511         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2512                    IBLND_MAX_RDMA_FRAGS &&
2513                    net->ibn_fmr_ps == NULL) {
2514                 CWARN("Can't accept conn from %s (version %x): "
2515                       "max_frags %d incompatible without FMR pool "
2516                       "(%d wanted)\n",
2517                       libcfs_nid2str(nid), version,
2518                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2519                       IBLND_MAX_RDMA_FRAGS);
2520
2521                 if (version == IBLND_MSG_VERSION)
2522                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2523
2524                 goto failed;
2525         }
2526
2527         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2528                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2529                        libcfs_nid2str(nid),
2530                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2531                        IBLND_MSG_SIZE);
2532                 goto failed;
2533         }
2534
2535         /* assume 'nid' is a new peer_ni; create  */
2536         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2537         if (rc != 0) {
2538                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2539                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2540                 goto failed;
2541         }
2542
2543         /* We have validated the peer's parameters so use those */
2544         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2545         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2546
2547         write_lock_irqsave(g_lock, flags);
2548
2549         peer2 = kiblnd_find_peer_locked(ni, nid);
2550         if (peer2 != NULL) {
2551                 if (peer2->ibp_version == 0) {
2552                         peer2->ibp_version     = version;
2553                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2554                 }
2555
2556                 /* not the guy I've talked with */
2557                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2558                     peer2->ibp_version     != version) {
2559                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2560
2561                         if (kiblnd_peer_active(peer2)) {
2562                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2563                                 peer2->ibp_version = version;
2564                         }
2565                         write_unlock_irqrestore(g_lock, flags);
2566
2567                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2568                               libcfs_nid2str(nid), peer2->ibp_version, version,
2569                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2570
2571                         kiblnd_peer_decref(peer_ni);
2572                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2573                         goto failed;
2574                 }
2575
2576                 /* Tie-break connection race in favour of the higher NID.
2577                  * If we keep running into a race condition multiple times,
2578                  * we have to assume that the connection attempt with the
2579                  * higher NID is stuck in a connecting state and will never
2580                  * recover.  As such, we pass through this if-block and let
2581                  * the lower NID connection win so we can move forward.
2582                  */
2583                 if (peer2->ibp_connecting != 0 &&
2584                     nid < ni->ni_nid && peer2->ibp_races <
2585                     MAX_CONN_RACES_BEFORE_ABORT) {
2586                         peer2->ibp_races++;
2587                         write_unlock_irqrestore(g_lock, flags);
2588
2589                         CDEBUG(D_NET, "Conn race %s\n",
2590                                libcfs_nid2str(peer2->ibp_nid));
2591
2592                         kiblnd_peer_decref(peer_ni);
2593                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2594                         goto failed;
2595                 }
2596                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2597                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2598                                 libcfs_nid2str(peer2->ibp_nid),
2599                                 MAX_CONN_RACES_BEFORE_ABORT);
2600                 /*
2601                  * passive connection is allowed even this peer_ni is waiting for
2602                  * reconnection.
2603                  */
2604                 peer2->ibp_reconnecting = 0;
2605                 peer2->ibp_races = 0;
2606                 peer2->ibp_accepting++;
2607                 kiblnd_peer_addref(peer2);
2608
2609                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2610                  * so copy validated parameters since we now know what the
2611                  * peer_ni's limits are */
2612                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2613                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2614
2615                 write_unlock_irqrestore(g_lock, flags);
2616                 kiblnd_peer_decref(peer_ni);
2617                 peer_ni = peer2;
2618         } else {
2619                 /* Brand new peer_ni */
2620                 LASSERT (peer_ni->ibp_accepting == 0);
2621                 LASSERT (peer_ni->ibp_version == 0 &&
2622                          peer_ni->ibp_incarnation == 0);
2623
2624                 peer_ni->ibp_accepting   = 1;
2625                 peer_ni->ibp_version     = version;
2626                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2627
2628                 /* I have a ref on ni that prevents it being shutdown */
2629                 LASSERT (net->ibn_shutdown == 0);
2630
2631                 kiblnd_peer_addref(peer_ni);
2632                 list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
2633
2634                 write_unlock_irqrestore(g_lock, flags);
2635         }
2636
2637         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2638         if (conn == NULL) {
2639                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2640                 kiblnd_peer_decref(peer_ni);
2641                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2642                 goto failed;
2643         }
2644
2645         /* conn now "owns" cmid, so I return success from here on to ensure the
2646          * CM callback doesn't destroy cmid. */
2647         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2648         conn->ibc_credits          = conn->ibc_queue_depth;
2649         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2650         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2651                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2652
2653         ackmsg = &conn->ibc_connvars->cv_msg;
2654         memset(ackmsg, 0, sizeof(*ackmsg));
2655
2656         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2657                         sizeof(ackmsg->ibm_u.connparams));
2658         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2659         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2660         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2661
2662         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2663
2664         memset(&cp, 0, sizeof(cp));
2665         cp.private_data        = ackmsg;
2666         cp.private_data_len    = ackmsg->ibm_nob;
2667         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2668         cp.initiator_depth     = 0;
2669         cp.flow_control        = 1;
2670         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2671         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2672
2673         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2674
2675         rc = rdma_accept(cmid, &cp);
2676         if (rc != 0) {
2677                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2678                 rej.ibr_version = version;
2679                 rej.ibr_why     = IBLND_REJECT_FATAL;
2680
2681                 kiblnd_reject(cmid, &rej);
2682                 kiblnd_connreq_done(conn, rc);
2683                 kiblnd_conn_decref(conn);
2684         }
2685
2686         lnet_ni_decref(ni);
2687         return 0;
2688
2689  failed:
2690         if (ni != NULL) {
2691                 rej.ibr_cp.ibcp_queue_depth =
2692                         kiblnd_msg_queue_size(version, ni);
2693                 rej.ibr_cp.ibcp_max_frags   = IBLND_MAX_RDMA_FRAGS;
2694                 lnet_ni_decref(ni);
2695         }
2696
2697         rej.ibr_version = version;
2698         kiblnd_reject(cmid, &rej);
2699
2700         return -ECONNREFUSED;
2701 }
2702
2703 static void
2704 kiblnd_check_reconnect(struct kib_conn *conn, int version,
2705                        u64 incarnation, int why, struct kib_connparams *cp)
2706 {
2707         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2708         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2709         char            *reason;
2710         int              msg_size = IBLND_MSG_SIZE;
2711         int              frag_num = -1;
2712         int              queue_dep = -1;
2713         bool             reconnect;
2714         unsigned long    flags;
2715
2716         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2717         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2718
2719         if (cp) {
2720                 msg_size        = cp->ibcp_max_msg_size;
2721                 frag_num        = cp->ibcp_max_frags;
2722                 queue_dep       = cp->ibcp_queue_depth;
2723         }
2724
2725         write_lock_irqsave(glock, flags);
2726         /* retry connection if it's still needed and no other connection
2727          * attempts (active or passive) are in progress
2728          * NB: reconnect is still needed even when ibp_tx_queue is
2729          * empty if ibp_version != version because reconnect may be
2730          * initiated by kiblnd_query() */
2731         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2732                      peer_ni->ibp_version != version) &&
2733                     peer_ni->ibp_connecting &&
2734                     peer_ni->ibp_accepting == 0;
2735         if (!reconnect) {
2736                 reason = "no need";
2737                 goto out;
2738         }
2739
2740         switch (why) {
2741         default:
2742                 reason = "Unknown";
2743                 break;
2744
2745         case IBLND_REJECT_RDMA_FRAGS: {
2746                 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2747
2748                 if (!cp) {
2749                         reason = "can't negotiate max frags";
2750                         goto out;
2751                 }
2752                 tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2753 #ifdef HAVE_IB_GET_DMA_MR
2754                 /*
2755                  * This check only makes sense if the kernel supports global
2756                  * memory registration. Otherwise, map_on_demand will never == 0
2757                  */
2758                 if (!tunables->lnd_map_on_demand) {
2759                         reason = "map_on_demand must be enabled";
2760                         goto out;
2761                 }
2762 #endif
2763                 if (conn->ibc_max_frags <= frag_num) {
2764                         reason = "unsupported max frags";
2765                         goto out;
2766                 }
2767
2768                 peer_ni->ibp_max_frags = frag_num;
2769                 reason = "rdma fragments";
2770                 break;
2771         }
2772         case IBLND_REJECT_MSG_QUEUE_SIZE:
2773                 if (!cp) {
2774                         reason = "can't negotiate queue depth";
2775                         goto out;
2776                 }
2777                 if (conn->ibc_queue_depth <= queue_dep) {
2778                         reason = "unsupported queue depth";
2779                         goto out;
2780                 }
2781
2782                 peer_ni->ibp_queue_depth = queue_dep;
2783                 reason = "queue depth";
2784                 break;
2785
2786         case IBLND_REJECT_CONN_STALE:
2787                 reason = "stale";
2788                 break;
2789
2790         case IBLND_REJECT_CONN_RACE:
2791                 reason = "conn race";
2792                 break;
2793
2794         case IBLND_REJECT_CONN_UNCOMPAT:
2795                 reason = "version negotiation";
2796                 break;
2797
2798         case IBLND_REJECT_INVALID_SRV_ID:
2799                 reason = "invalid service id";
2800                 break;
2801         }
2802
2803         conn->ibc_reconnect = 1;
2804         peer_ni->ibp_reconnecting++;
2805         peer_ni->ibp_version = version;
2806         if (incarnation != 0)
2807                 peer_ni->ibp_incarnation = incarnation;
2808  out:
2809         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2810
2811         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2812                 libcfs_nid2str(peer_ni->ibp_nid),
2813                 reconnect ? "reconnect" : "don't reconnect",
2814                 reason, IBLND_MSG_VERSION, version, msg_size,
2815                 conn->ibc_queue_depth, queue_dep,
2816                 conn->ibc_max_frags, frag_num);
2817         /*
2818          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2819          * while destroying the zombie
2820          */
2821 }
2822
2823 static void
2824 kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
2825 {
2826         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2827
2828         LASSERT (!in_interrupt());
2829         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2830
2831         switch (reason) {
2832         case IB_CM_REJ_STALE_CONN:
2833                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2834                                        IBLND_REJECT_CONN_STALE, NULL);
2835                 break;
2836
2837         case IB_CM_REJ_INVALID_SERVICE_ID:
2838                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2839                                        IBLND_REJECT_INVALID_SRV_ID, NULL);
2840                 CNETERR("%s rejected: no listener at %d\n",
2841                         libcfs_nid2str(peer_ni->ibp_nid),
2842                         *kiblnd_tunables.kib_service);
2843                 break;
2844
2845         case IB_CM_REJ_CONSUMER_DEFINED:
2846                 if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
2847                         struct kib_rej *rej = priv;
2848                         struct kib_connparams *cp = NULL;
2849                         int               flip        = 0;
2850                         __u64             incarnation = -1;
2851
2852                         /* NB. default incarnation is -1 because:
2853                          * a) V1 will ignore dst incarnation in connreq.
2854                          * b) V2 will provide incarnation while rejecting me,
2855                          *    -1 will be overwrote.
2856                          *
2857                          * if I try to connect to a V1 peer_ni with V2 protocol,
2858                          * it rejected me then upgrade to V2, I have no idea
2859                          * about the upgrading and try to reconnect with V1,
2860                          * in this case upgraded V2 can find out I'm trying to
2861                          * talk to the old guy and reject me(incarnation is -1). 
2862                          */
2863
2864                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2865                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2866                                 __swab32s(&rej->ibr_magic);
2867                                 __swab16s(&rej->ibr_version);
2868                                 flip = 1;
2869                         }
2870
2871                         if (priv_nob >= sizeof(struct kib_rej) &&
2872                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2873                                 /* priv_nob is always 148 in current version
2874                                  * of OFED, so we still need to check version.
2875                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2876                                 cp = &rej->ibr_cp;
2877
2878                                 if (flip) {
2879                                         __swab64s(&rej->ibr_incarnation);
2880                                         __swab16s(&cp->ibcp_queue_depth);
2881                                         __swab16s(&cp->ibcp_max_frags);
2882                                         __swab32s(&cp->ibcp_max_msg_size);
2883                                 }
2884
2885                                 incarnation = rej->ibr_incarnation;
2886                         }
2887
2888                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2889                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2890                                 CERROR("%s rejected: consumer defined fatal error\n",
2891                                        libcfs_nid2str(peer_ni->ibp_nid));
2892                                 break;
2893                         }
2894
2895                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2896                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2897                                 CERROR("%s rejected: o2iblnd version %x error\n",
2898                                        libcfs_nid2str(peer_ni->ibp_nid),
2899                                        rej->ibr_version);
2900                                 break;
2901                         }
2902
2903                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2904                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2905                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2906                                        libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
2907
2908                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2909                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2910                         }
2911
2912                         switch (rej->ibr_why) {
2913                         case IBLND_REJECT_CONN_RACE:
2914                         case IBLND_REJECT_CONN_STALE:
2915                         case IBLND_REJECT_CONN_UNCOMPAT:
2916                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2917                         case IBLND_REJECT_RDMA_FRAGS:
2918                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2919                                                 incarnation, rej->ibr_why, cp);
2920                                 break;
2921
2922                         case IBLND_REJECT_NO_RESOURCES:
2923                                 CERROR("%s rejected: o2iblnd no resources\n",
2924                                        libcfs_nid2str(peer_ni->ibp_nid));
2925                                 break;
2926
2927                         case IBLND_REJECT_FATAL:
2928                                 CERROR("%s rejected: o2iblnd fatal error\n",
2929                                        libcfs_nid2str(peer_ni->ibp_nid));
2930                                 break;
2931
2932                         default:
2933                                 CERROR("%s rejected: o2iblnd reason %d\n",
2934                                        libcfs_nid2str(peer_ni->ibp_nid),
2935                                        rej->ibr_why);
2936                                 break;
2937                         }
2938                         break;
2939                 }
2940                 /* fall through */
2941         default:
2942                 CNETERR("%s rejected: reason %d, size %d\n",
2943                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2944                 break;
2945         }
2946
2947         kiblnd_connreq_done(conn, -ECONNREFUSED);
2948 }
2949
2950 static void
2951 kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
2952 {
2953         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2954         struct lnet_ni *ni = peer_ni->ibp_ni;
2955         struct kib_net *net = ni->ni_data;
2956         struct kib_msg *msg = priv;
2957         int            ver  = conn->ibc_version;
2958         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2959         unsigned long  flags;
2960
2961         LASSERT (net != NULL);
2962
2963         if (rc != 0) {
2964                 CERROR("Can't unpack connack from %s: %d\n",
2965                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2966                 goto failed;
2967         }
2968
2969         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2970                 CERROR("Unexpected message %d from %s\n",
2971                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
2972                 rc = -EPROTO;
2973                 goto failed;
2974         }
2975
2976         if (ver != msg->ibm_version) {
2977                 CERROR("%s replied version %x is different with "
2978                        "requested version %x\n",
2979                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
2980                 rc = -EPROTO;
2981                 goto failed;
2982         }
2983
2984         if (msg->ibm_u.connparams.ibcp_queue_depth >
2985             conn->ibc_queue_depth) {
2986                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2987                        libcfs_nid2str(peer_ni->ibp_nid),
2988                        msg->ibm_u.connparams.ibcp_queue_depth,
2989                        conn->ibc_queue_depth);
2990                 rc = -EPROTO;
2991                 goto failed;
2992         }
2993
2994         if (msg->ibm_u.connparams.ibcp_max_frags >
2995             conn->ibc_max_frags) {
2996                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
2997                        libcfs_nid2str(peer_ni->ibp_nid),
2998                        msg->ibm_u.connparams.ibcp_max_frags,
2999                        conn->ibc_max_frags);
3000                 rc = -EPROTO;
3001                 goto failed;
3002         }
3003
3004         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
3005                 CERROR("%s max message size %d too big (%d max)\n",
3006                        libcfs_nid2str(peer_ni->ibp_nid),
3007                        msg->ibm_u.connparams.ibcp_max_msg_size,
3008                        IBLND_MSG_SIZE);
3009                 rc = -EPROTO;
3010                 goto failed;
3011         }
3012
3013         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3014         if (msg->ibm_dstnid == ni->ni_nid &&
3015             msg->ibm_dststamp == net->ibn_incarnation)
3016                 rc = 0;
3017         else
3018                 rc = -ESTALE;
3019         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3020
3021         if (rc != 0) {
3022                 CERROR("Bad connection reply from %s, rc = %d, "
3023                        "version: %x max_frags: %d\n",
3024                        libcfs_nid2str(peer_ni->ibp_nid), rc,
3025                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
3026                 goto failed;
3027         }
3028
3029         conn->ibc_incarnation      = msg->ibm_srcstamp;
3030         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
3031         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
3032         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
3033         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
3034         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
3035                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
3036
3037         kiblnd_connreq_done(conn, 0);
3038         return;
3039
3040  failed:
3041         /* NB My QP has already established itself, so I handle anything going
3042          * wrong here by setting ibc_comms_error.
3043          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
3044          * immediately tears it down. */
3045
3046         LASSERT (rc != 0);
3047         conn->ibc_comms_error = rc;
3048         kiblnd_connreq_done(conn, 0);
3049 }
3050
3051 static int
3052 kiblnd_active_connect(struct rdma_cm_id *cmid)
3053 {
3054         struct kib_peer_ni *peer_ni = cmid->context;
3055         struct kib_conn *conn;
3056         struct kib_msg *msg;
3057         struct rdma_conn_param cp;
3058         int                      version;
3059         __u64                    incarnation;
3060         unsigned long            flags;
3061         int                      rc;
3062
3063         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3064
3065         incarnation = peer_ni->ibp_incarnation;
3066         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
3067                                                  peer_ni->ibp_version;
3068
3069         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3070
3071         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
3072                                   version);
3073         if (conn == NULL) {
3074                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
3075                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
3076                 return -ENOMEM;
3077         }
3078
3079         /* conn "owns" cmid now, so I return success from here on to ensure the
3080          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
3081          * on peer_ni */
3082
3083         msg = &conn->ibc_connvars->cv_msg;
3084
3085         memset(msg, 0, sizeof(*msg));
3086         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
3087         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
3088         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
3089         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
3090
3091         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
3092                         0, peer_ni->ibp_nid, incarnation);
3093
3094         memset(&cp, 0, sizeof(cp));
3095         cp.private_data        = msg;
3096         cp.private_data_len    = msg->ibm_nob;
3097         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
3098         cp.initiator_depth     = 0;
3099         cp.flow_control        = 1;
3100         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
3101         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
3102
3103         LASSERT(cmid->context == (void *)conn);
3104         LASSERT(conn->ibc_cmid == cmid);
3105
3106         rc = rdma_connect(cmid, &cp);
3107         if (rc != 0) {
3108                 CERROR("Can't connect to %s: %d\n",
3109                        libcfs_nid2str(peer_ni->ibp_nid), rc);
3110                 kiblnd_connreq_done(conn, rc);
3111                 kiblnd_conn_decref(conn);
3112         }
3113
3114         return 0;
3115 }
3116
3117 int
3118 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3119 {
3120         struct kib_peer_ni *peer_ni;
3121         struct kib_conn *conn;
3122         int rc;
3123
3124         switch (event->event) {
3125         default:
3126                 CERROR("Unexpected event: %d, status: %d\n",
3127                        event->event, event->status);
3128                 LBUG();
3129
3130         case RDMA_CM_EVENT_CONNECT_REQUEST:
3131                 /* destroy cmid on failure */
3132                 rc = kiblnd_passive_connect(cmid,
3133                                             (void *)KIBLND_CONN_PARAM(event),
3134                                             KIBLND_CONN_PARAM_LEN(event));
3135                 CDEBUG(D_NET, "connreq: %d\n", rc);
3136                 return rc;
3137
3138         case RDMA_CM_EVENT_ADDR_ERROR:
3139                 peer_ni = cmid->context;
3140                 CNETERR("%s: ADDR ERROR %d\n",
3141                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3142                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3143                 kiblnd_peer_decref(peer_ni);
3144                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
3145
3146         case RDMA_CM_EVENT_ADDR_RESOLVED:
3147                 peer_ni = cmid->context;
3148
3149                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
3150                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3151
3152                 if (event->status != 0) {
3153                         CNETERR("Can't resolve address for %s: %d\n",
3154                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
3155                         rc = event->status;
3156                 } else {
3157                         rc = rdma_resolve_route(
3158                                 cmid, lnet_get_lnd_timeout() * 1000);
3159                         if (rc == 0) {
3160                                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
3161                                 struct kib_dev *dev = net->ibn_dev;
3162
3163                                 CDEBUG(D_NET, "%s: connection bound to "\
3164                                        "%s:%pI4h:%s\n",
3165                                        libcfs_nid2str(peer_ni->ibp_nid),
3166                                        dev->ibd_ifname,
3167                                        &dev->ibd_ifip, cmid->device->name);
3168
3169                                 return 0;
3170                         }
3171
3172                         /* Can't initiate route resolution */
3173                         CERROR("Can't resolve route for %s: %d\n",
3174                                libcfs_nid2str(peer_ni->ibp_nid), rc);
3175                 }
3176                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
3177                 kiblnd_peer_decref(peer_ni);
3178                 return rc;                      /* rc != 0 destroys cmid */
3179
3180         case RDMA_CM_EVENT_ROUTE_ERROR:
3181                 peer_ni = cmid->context;
3182                 CNETERR("%s: ROUTE ERROR %d\n",
3183                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3184                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3185                 kiblnd_peer_decref(peer_ni);
3186                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3187
3188         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3189                 peer_ni = cmid->context;
3190                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3191                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3192
3193                 if (event->status == 0)
3194                         return kiblnd_active_connect(cmid);
3195
3196                 CNETERR("Can't resolve route for %s: %d\n",
3197                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3198                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3199                 kiblnd_peer_decref(peer_ni);
3200                 return event->status;           /* rc != 0 destroys cmid */
3201
3202         case RDMA_CM_EVENT_UNREACHABLE:
3203                 conn = cmid->context;
3204                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3205                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3206                 CNETERR("%s: UNREACHABLE %d\n",
3207                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3208                 kiblnd_connreq_done(conn, -ENETDOWN);
3209                 kiblnd_conn_decref(conn);
3210                 return 0;
3211
3212         case RDMA_CM_EVENT_CONNECT_ERROR:
3213                 conn = cmid->context;
3214                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3215                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3216                 CNETERR("%s: CONNECT ERROR %d\n",
3217                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3218                 kiblnd_connreq_done(conn, -ENOTCONN);
3219                 kiblnd_conn_decref(conn);
3220                 return 0;
3221
3222         case RDMA_CM_EVENT_REJECTED:
3223                 conn = cmid->context;
3224                 switch (conn->ibc_state) {
3225                 default:
3226                         LBUG();
3227
3228                 case IBLND_CONN_PASSIVE_WAIT:
3229                         CERROR ("%s: REJECTED %d\n",
3230                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3231                                 event->status);
3232                         kiblnd_connreq_done(conn, -ECONNRESET);
3233                         break;
3234
3235                 case IBLND_CONN_ACTIVE_CONNECT:
3236                         kiblnd_rejected(conn, event->status,
3237                                         (void *)KIBLND_CONN_PARAM(event),
3238                                         KIBLND_CONN_PARAM_LEN(event));
3239                         break;
3240                 }
3241                 kiblnd_conn_decref(conn);
3242                 return 0;
3243
3244         case RDMA_CM_EVENT_ESTABLISHED:
3245                 conn = cmid->context;
3246                 switch (conn->ibc_state) {
3247                 default:
3248                         LBUG();
3249
3250                 case IBLND_CONN_PASSIVE_WAIT:
3251                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3252                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3253                         kiblnd_connreq_done(conn, 0);
3254                         break;
3255
3256                 case IBLND_CONN_ACTIVE_CONNECT:
3257                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3258                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3259                         kiblnd_check_connreply(conn,
3260                                                (void *)KIBLND_CONN_PARAM(event),
3261                                                KIBLND_CONN_PARAM_LEN(event));
3262                         break;
3263                 }
3264                 /* net keeps its ref on conn! */
3265                 return 0;
3266
3267         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3268                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3269                 return 0;
3270
3271         case RDMA_CM_EVENT_DISCONNECTED:
3272                 conn = cmid->context;
3273                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3274                         CERROR("%s DISCONNECTED\n",
3275                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3276                         kiblnd_connreq_done(conn, -ECONNRESET);
3277                 } else {
3278                         kiblnd_close_conn(conn, 0);
3279                 }
3280                 kiblnd_conn_decref(conn);
3281                 cmid->context = NULL;
3282                 return 0;
3283
3284         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3285                 LCONSOLE_ERROR_MSG(0x131,
3286                                    "Received notification of device removal\n"
3287                                    "Please shutdown LNET to allow this to proceed\n");
3288                 /* Can't remove network from underneath LNET for now, so I have
3289                  * to ignore this */
3290                 return 0;
3291
3292         case RDMA_CM_EVENT_ADDR_CHANGE:
3293                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3294                 return 0;
3295         }
3296 }
3297
3298 static int
3299 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
3300 {
3301         struct kib_tx *tx;
3302         struct list_head *ttmp;
3303
3304         list_for_each(ttmp, txs) {
3305                 tx = list_entry(ttmp, struct kib_tx, tx_list);
3306
3307                 if (txs != &conn->ibc_active_txs) {
3308                         LASSERT(tx->tx_queued);
3309                 } else {
3310                         LASSERT(!tx->tx_queued);
3311                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3312                 }
3313
3314                 if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3315                         CERROR("Timed out tx: %s, %lld seconds\n",
3316                                kiblnd_queue2str(conn, txs),
3317                                ktime_ms_delta(ktime_get(),
3318                                               tx->tx_deadline) / MSEC_PER_SEC);
3319                         return 1;
3320                 }
3321         }
3322
3323         return 0;
3324 }
3325
3326 static int
3327 kiblnd_conn_timed_out_locked(struct kib_conn *conn)
3328 {
3329         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3330                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3331                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3332                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3333                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3334 }
3335
3336 static void
3337 kiblnd_check_conns (int idx)
3338 {
3339         struct list_head  closes = LIST_HEAD_INIT(closes);
3340         struct list_head  checksends = LIST_HEAD_INIT(checksends);
3341         struct list_head  timedout_txs = LIST_HEAD_INIT(timedout_txs);
3342         struct list_head *peers = &kiblnd_data.kib_peers[idx];
3343         struct list_head *ptmp;
3344         struct kib_peer_ni *peer_ni;
3345         struct kib_conn *conn;
3346         struct kib_tx *tx, *tx_tmp;
3347         struct list_head *ctmp;
3348         unsigned long     flags;
3349
3350         /* NB. We expect to have a look at all the peers and not find any
3351          * RDMAs to time out, so we just use a shared lock while we
3352          * take a look... */
3353         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3354
3355         list_for_each(ptmp, peers) {
3356                 peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
3357
3358                 /* Check tx_deadline */
3359                 list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
3360                         if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3361                                 CWARN("Timed out tx for %s: %lld seconds\n",
3362                                       libcfs_nid2str(peer_ni->ibp_nid),
3363                                       ktime_ms_delta(ktime_get(),
3364                                                      tx->tx_deadline) / MSEC_PER_SEC);
3365                                 list_move(&tx->tx_list, &timedout_txs);
3366                         }
3367                 }
3368
3369                 list_for_each(ctmp, &peer_ni->ibp_conns) {
3370                         int timedout;
3371                         int sendnoop;
3372
3373                         conn = list_entry(ctmp, struct kib_conn, ibc_list);
3374
3375                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3376
3377                         spin_lock(&conn->ibc_lock);
3378
3379                         sendnoop = kiblnd_need_noop(conn);
3380                         timedout = kiblnd_conn_timed_out_locked(conn);
3381                         if (!sendnoop && !timedout) {
3382                                 spin_unlock(&conn->ibc_lock);
3383                                 continue;
3384                         }
3385
3386                         if (timedout) {
3387                                 CERROR("Timed out RDMA with %s (%lld): "
3388                                        "c: %u, oc: %u, rc: %u\n",
3389                                        libcfs_nid2str(peer_ni->ibp_nid),
3390                                        ktime_get_seconds() - peer_ni->ibp_last_alive,
3391                                        conn->ibc_credits,
3392                                        conn->ibc_outstanding_credits,
3393                                        conn->ibc_reserved_credits);
3394                                 list_add(&conn->ibc_connd_list, &closes);
3395                         } else {
3396                                 list_add(&conn->ibc_connd_list, &checksends);
3397                         }
3398                         /* +ref for 'closes' or 'checksends' */
3399                         kiblnd_conn_addref(conn);
3400
3401                         spin_unlock(&conn->ibc_lock);
3402                 }
3403         }
3404
3405         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3406
3407         if (!list_empty(&timedout_txs))
3408                 kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
3409                                    LNET_MSG_STATUS_LOCAL_TIMEOUT);
3410
3411         /* Handle timeout by closing the whole
3412          * connection. We can only be sure RDMA activity
3413          * has ceased once the QP has been modified. */
3414         while (!list_empty(&closes)) {
3415                 conn = list_entry(closes.next,
3416                                   struct kib_conn, ibc_connd_list);
3417                 list_del(&conn->ibc_connd_list);
3418                 kiblnd_close_conn(conn, -ETIMEDOUT);
3419                 kiblnd_conn_decref(conn);
3420         }
3421
3422         /* In case we have enough credits to return via a
3423          * NOOP, but there were no non-blocking tx descs
3424          * free to do it last time... */
3425         while (!list_empty(&checksends)) {
3426                 conn = list_entry(checksends.next,
3427                                   struct kib_conn, ibc_connd_list);
3428                 list_del(&conn->ibc_connd_list);
3429
3430                 spin_lock(&conn->ibc_lock);
3431                 kiblnd_check_sends_locked(conn);
3432                 spin_unlock(&conn->ibc_lock);
3433
3434                 kiblnd_conn_decref(conn);
3435         }
3436 }
3437
3438 static void
3439 kiblnd_disconnect_conn(struct kib_conn *conn)
3440 {
3441         LASSERT (!in_interrupt());
3442         LASSERT (current == kiblnd_data.kib_connd);
3443         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3444
3445         rdma_disconnect(conn->ibc_cmid);
3446         kiblnd_finalise_conn(conn);
3447
3448         kiblnd_peer_notify(conn->ibc_peer);
3449 }
3450
3451 /*
3452  * High-water for reconnection to the same peer_ni, reconnection attempt should
3453  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3454  */
3455 #define KIB_RECONN_HIGH_RACE    10
3456 /*
3457  * Allow connd to take a break and handle other things after consecutive
3458  * reconnection attemps.
3459  */
3460 #define KIB_RECONN_BREAK        100
3461
3462 int
3463 kiblnd_connd (void *arg)
3464 {
3465         spinlock_t        *lock= &kiblnd_data.kib_connd_lock;
3466         wait_queue_entry_t wait;
3467         unsigned long      flags;
3468         struct kib_conn *conn;
3469         int                timeout;
3470         int                i;
3471         int                dropped_lock;
3472         int                peer_index = 0;
3473         unsigned long      deadline = jiffies;
3474
3475         cfs_block_allsigs();
3476
3477         init_waitqueue_entry(&wait, current);
3478         kiblnd_data.kib_connd = current;
3479
3480         spin_lock_irqsave(lock, flags);
3481
3482         while (!kiblnd_data.kib_shutdown) {
3483                 int reconn = 0;
3484
3485                 dropped_lock = 0;
3486
3487                 if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
3488                         struct kib_peer_ni *peer_ni = NULL;
3489
3490                         conn = list_entry(kiblnd_data.kib_connd_zombies.next,
3491                                           struct kib_conn, ibc_list);
3492                         list_del(&conn->ibc_list);
3493                         if (conn->ibc_reconnect) {
3494                                 peer_ni = conn->ibc_peer;
3495                                 kiblnd_peer_addref(peer_ni);
3496                         }
3497
3498                         spin_unlock_irqrestore(lock, flags);
3499                         dropped_lock = 1;
3500
3501                         kiblnd_destroy_conn(conn);
3502
3503                         spin_lock_irqsave(lock, flags);
3504                         if (!peer_ni) {
3505                                 LIBCFS_FREE(conn, sizeof(*conn));
3506                                 continue;
3507                         }
3508
3509                         conn->ibc_peer = peer_ni;
3510                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3511                                 list_add_tail(&conn->ibc_list,
3512                                               &kiblnd_data.kib_reconn_list);
3513                         else
3514                                 list_add_tail(&conn->ibc_list,
3515                                               &kiblnd_data.kib_reconn_wait);
3516                 }
3517
3518                 if (!list_empty(&kiblnd_data.kib_connd_conns)) {
3519                         conn = list_entry(kiblnd_data.kib_connd_conns.next,
3520                                           struct kib_conn, ibc_list);
3521                         list_del(&conn->ibc_list);
3522
3523                         spin_unlock_irqrestore(lock, flags);
3524                         dropped_lock = 1;
3525
3526                         kiblnd_disconnect_conn(conn);
3527                         kiblnd_conn_decref(conn);
3528
3529                         spin_lock_irqsave(lock, flags);
3530                 }
3531
3532                 while (reconn < KIB_RECONN_BREAK) {
3533                         if (kiblnd_data.kib_reconn_sec !=
3534                             ktime_get_real_seconds()) {
3535                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3536                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3537                                                  &kiblnd_data.kib_reconn_list);
3538                         }
3539
3540                         if (list_empty(&kiblnd_data.kib_reconn_list))
3541                                 break;
3542
3543                         conn = list_entry(kiblnd_data.kib_reconn_list.next,
3544                                           struct kib_conn, ibc_list);
3545                         list_del(&conn->ibc_list);
3546
3547                         spin_unlock_irqrestore(lock, flags);
3548                         dropped_lock = 1;
3549
3550                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3551                         kiblnd_peer_decref(conn->ibc_peer);
3552                         LIBCFS_FREE(conn, sizeof(*conn));
3553
3554                         spin_lock_irqsave(lock, flags);
3555                 }
3556
3557                 /* careful with the jiffy wrap... */
3558                 timeout = (int)(deadline - jiffies);
3559                 if (timeout <= 0) {
3560                         const int n = 4;
3561                         const int p = 1;
3562                         int       chunk = kiblnd_data.kib_peer_hash_size;
3563                         unsigned int lnd_timeout;
3564
3565                         spin_unlock_irqrestore(lock, flags);
3566                         dropped_lock = 1;
3567
3568                         /* Time to check for RDMA timeouts on a few more
3569                          * peers: I do checks every 'p' seconds on a
3570                          * proportion of the peer_ni table and I need to check
3571                          * every connection 'n' times within a timeout
3572                          * interval, to ensure I detect a timeout on any
3573                          * connection within (n+1)/n times the timeout
3574                          * interval. */
3575
3576                         lnd_timeout = lnet_get_lnd_timeout();
3577                         if (lnd_timeout > n * p)
3578                                 chunk = (chunk * n * p) / lnd_timeout;
3579                         if (chunk == 0)
3580                                 chunk = 1;
3581
3582                         for (i = 0; i < chunk; i++) {
3583                                 kiblnd_check_conns(peer_index);
3584                                 peer_index = (peer_index + 1) %
3585                                              kiblnd_data.kib_peer_hash_size;
3586                         }
3587
3588                         deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
3589                         spin_lock_irqsave(lock, flags);
3590                 }
3591
3592                 if (dropped_lock)
3593                         continue;
3594
3595                 /* Nothing to do for 'timeout'  */
3596                 set_current_state(TASK_INTERRUPTIBLE);
3597                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3598                 spin_unlock_irqrestore(lock, flags);
3599
3600                 schedule_timeout(timeout);
3601
3602                 set_current_state(TASK_RUNNING);
3603                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3604                 spin_lock_irqsave(lock, flags);
3605         }
3606
3607         spin_unlock_irqrestore(lock, flags);
3608
3609         kiblnd_thread_fini();
3610         return 0;
3611 }
3612
3613 void
3614 kiblnd_qp_event(struct ib_event *event, void *arg)
3615 {
3616         struct kib_conn *conn = arg;
3617
3618         switch (event->event) {
3619         case IB_EVENT_COMM_EST:
3620                 CDEBUG(D_NET, "%s established\n",
3621                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3622                 /* We received a packet but connection isn't established
3623                  * probably handshake packet was lost, so free to
3624                  * force make connection established */
3625                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3626                 return;
3627
3628         case IB_EVENT_PORT_ERR:
3629         case IB_EVENT_DEVICE_FATAL:
3630                 CERROR("Fatal device error for NI %s\n",
3631                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3632                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
3633                 return;
3634
3635         case IB_EVENT_PORT_ACTIVE:
3636                 CERROR("Port reactivated for NI %s\n",
3637                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3638                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
3639                 return;
3640
3641         default:
3642                 CERROR("%s: Async QP event type %d\n",
3643                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3644                 return;
3645         }
3646 }
3647
3648 static void
3649 kiblnd_complete (struct ib_wc *wc)
3650 {
3651         switch (kiblnd_wreqid2type(wc->wr_id)) {
3652         default:
3653                 LBUG();
3654
3655         case IBLND_WID_MR:
3656                 if (wc->status != IB_WC_SUCCESS &&
3657                     wc->status != IB_WC_WR_FLUSH_ERR)
3658                         CNETERR("FastReg failed: %d\n", wc->status);
3659                 return;
3660
3661         case IBLND_WID_RDMA:
3662                 /* We only get RDMA completion notification if it fails.  All
3663                  * subsequent work items, including the final SEND will fail
3664                  * too.  However we can't print out any more info about the
3665                  * failing RDMA because 'tx' might be back on the idle list or
3666                  * even reused already if we didn't manage to post all our work
3667                  * items */
3668                 CNETERR("RDMA (tx: %p) failed: %d\n",
3669                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3670                 return;
3671
3672         case IBLND_WID_TX:
3673                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3674                 return;
3675
3676         case IBLND_WID_RX:
3677                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3678                                    wc->byte_len);
3679                 return;
3680         }
3681 }
3682
3683 void
3684 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3685 {
3686         /* NB I'm not allowed to schedule this conn once its refcount has
3687          * reached 0.  Since fundamentally I'm racing with scheduler threads
3688          * consuming my CQ I could be called after all completions have
3689          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3690          * and this CQ is about to be destroyed so I NOOP. */
3691         struct kib_conn *conn = arg;
3692         struct kib_sched_info *sched = conn->ibc_sched;
3693         unsigned long flags;
3694
3695         LASSERT(cq == conn->ibc_cq);
3696
3697         spin_lock_irqsave(&sched->ibs_lock, flags);
3698
3699         conn->ibc_ready = 1;
3700
3701         if (!conn->ibc_scheduled &&
3702             (conn->ibc_nrx > 0 ||
3703              conn->ibc_nsends_posted > 0)) {
3704                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3705                 conn->ibc_scheduled = 1;
3706                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3707
3708                 if (waitqueue_active(&sched->ibs_waitq))
3709                         wake_up(&sched->ibs_waitq);
3710         }
3711
3712         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3713 }
3714
3715 void
3716 kiblnd_cq_event(struct ib_event *event, void *arg)
3717 {
3718         struct kib_conn *conn = arg;
3719
3720         CERROR("%s: async CQ event type %d\n",
3721                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3722 }
3723
3724 int
3725 kiblnd_scheduler(void *arg)
3726 {
3727         long                    id = (long)arg;
3728         struct kib_sched_info   *sched;
3729         struct kib_conn *conn;
3730         wait_queue_entry_t      wait;
3731         unsigned long           flags;
3732         struct ib_wc            wc;
3733         int                     did_something;
3734         int                     busy_loops = 0;
3735         int                     rc;
3736
3737         cfs_block_allsigs();
3738
3739         init_waitqueue_entry(&wait, current);
3740
3741         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3742
3743         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3744         if (rc != 0) {
3745                 CWARN("Unable to bind on CPU partition %d, please verify "
3746                       "whether all CPUs are healthy and reload modules if "
3747                       "necessary, otherwise your system might under risk of "
3748                       "low performance\n", sched->ibs_cpt);
3749         }
3750
3751         spin_lock_irqsave(&sched->ibs_lock, flags);
3752
3753         while (!kiblnd_data.kib_shutdown) {
3754                 if (busy_loops++ >= IBLND_RESCHED) {
3755                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3756
3757                         cond_resched();
3758                         busy_loops = 0;
3759
3760                         spin_lock_irqsave(&sched->ibs_lock, flags);
3761                 }
3762
3763                 did_something = 0;
3764
3765                 if (!list_empty(&sched->ibs_conns)) {
3766                         conn = list_entry(sched->ibs_conns.next,
3767                                           struct kib_conn, ibc_sched_list);
3768                         /* take over kib_sched_conns' ref on conn... */
3769                         LASSERT(conn->ibc_scheduled);
3770                         list_del(&conn->ibc_sched_list);
3771                         conn->ibc_ready = 0;
3772
3773                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3774
3775                         wc.wr_id = IBLND_WID_INVAL;
3776
3777                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3778                         if (rc == 0) {
3779                                 rc = ib_req_notify_cq(conn->ibc_cq,
3780                                                       IB_CQ_NEXT_COMP);
3781                                 if (rc < 0) {
3782                                         CWARN("%s: ib_req_notify_cq failed: %d, "
3783                                               "closing connection\n",
3784                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3785                                         kiblnd_close_conn(conn, -EIO);
3786                                         kiblnd_conn_decref(conn);
3787                                         spin_lock_irqsave(&sched->ibs_lock,
3788                                                               flags);
3789                                         continue;
3790                                 }
3791
3792                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3793                         }
3794
3795                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3796                                 LCONSOLE_ERROR(
3797                                         "ib_poll_cq (rc: %d) returned invalid "
3798                                         "wr_id, opcode %d, status: %d, "
3799                                         "vendor_err: %d, conn: %s status: %d\n"
3800                                         "please upgrade firmware and OFED or "
3801                                         "contact vendor.\n", rc,
3802                                         wc.opcode, wc.status, wc.vendor_err,
3803                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3804                                         conn->ibc_state);
3805                                 rc = -EINVAL;
3806                         }
3807
3808                         if (rc < 0) {
3809                                 CWARN("%s: ib_poll_cq failed: %d, "
3810                                       "closing connection\n",
3811                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3812                                       rc);
3813                                 kiblnd_close_conn(conn, -EIO);
3814                                 kiblnd_conn_decref(conn);
3815                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3816                                 continue;
3817                         }
3818
3819                         spin_lock_irqsave(&sched->ibs_lock, flags);
3820
3821                         if (rc != 0 || conn->ibc_ready) {
3822                                 /* There may be another completion waiting; get
3823                                  * another scheduler to check while I handle
3824                                  * this one... */
3825                                 /* +1 ref for sched_conns */
3826                                 kiblnd_conn_addref(conn);
3827                                 list_add_tail(&conn->ibc_sched_list,
3828                                                   &sched->ibs_conns);
3829                                 if (waitqueue_active(&sched->ibs_waitq))
3830                                         wake_up(&sched->ibs_waitq);
3831                         } else {
3832                                 conn->ibc_scheduled = 0;
3833                         }
3834
3835                         if (rc != 0) {
3836                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3837                                 kiblnd_complete(&wc);
3838
3839                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3840                         }
3841
3842                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
3843                         did_something = 1;
3844                 }
3845
3846                 if (did_something)
3847                         continue;
3848
3849                 set_current_state(TASK_INTERRUPTIBLE);
3850                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3851                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3852
3853                 schedule();
3854                 busy_loops = 0;
3855
3856                 remove_wait_queue(&sched->ibs_waitq, &wait);
3857                 set_current_state(TASK_RUNNING);
3858                 spin_lock_irqsave(&sched->ibs_lock, flags);
3859         }
3860
3861         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3862
3863         kiblnd_thread_fini();
3864         return 0;
3865 }
3866
3867 int
3868 kiblnd_failover_thread(void *arg)
3869 {
3870         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
3871         struct kib_dev *dev;
3872         wait_queue_entry_t wait;
3873         unsigned long    flags;
3874         int              rc;
3875
3876         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3877
3878         cfs_block_allsigs();
3879
3880         init_waitqueue_entry(&wait, current);
3881         write_lock_irqsave(glock, flags);
3882
3883         while (!kiblnd_data.kib_shutdown) {
3884                 int     do_failover = 0;
3885                 int     long_sleep;
3886
3887                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3888                                     ibd_fail_list) {
3889                         if (ktime_get_seconds() < dev->ibd_next_failover)
3890                                 continue;
3891                         do_failover = 1;
3892                         break;
3893                 }
3894
3895                 if (do_failover) {
3896                         list_del_init(&dev->ibd_fail_list);
3897                         dev->ibd_failover = 1;
3898                         write_unlock_irqrestore(glock, flags);
3899
3900                         rc = kiblnd_dev_failover(dev);
3901
3902                         write_lock_irqsave(glock, flags);
3903
3904                         LASSERT (dev->ibd_failover);
3905                         dev->ibd_failover = 0;
3906                         if (rc >= 0) { /* Device is OK or failover succeed */
3907                                 dev->ibd_next_failover = ktime_get_seconds() + 3;
3908                                 continue;
3909                         }
3910
3911                         /* failed to failover, retry later */
3912                         dev->ibd_next_failover = ktime_get_seconds() +
3913                                                  min(dev->ibd_failed_failover, 10);
3914                         if (kiblnd_dev_can_failover(dev)) {
3915                                 list_add_tail(&dev->ibd_fail_list,
3916                                               &kiblnd_data.kib_failed_devs);
3917                         }
3918
3919                         continue;
3920                 }
3921
3922                 /* long sleep if no more pending failover */
3923                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3924
3925                 set_current_state(TASK_INTERRUPTIBLE);
3926                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3927                 write_unlock_irqrestore(glock, flags);
3928
3929                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3930                                                    cfs_time_seconds(1));
3931                 set_current_state(TASK_RUNNING);
3932                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3933                 write_lock_irqsave(glock, flags);
3934
3935                 if (!long_sleep || rc != 0)
3936                         continue;
3937
3938                 /* have a long sleep, routine check all active devices,
3939                  * we need checking like this because if there is not active
3940                  * connection on the dev and no SEND from local, we may listen
3941                  * on wrong HCA for ever while there is a bonding failover */
3942                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3943                         if (kiblnd_dev_can_failover(dev)) {
3944                                 list_add_tail(&dev->ibd_fail_list,
3945                                               &kiblnd_data.kib_failed_devs);
3946                         }
3947                 }
3948         }
3949
3950         write_unlock_irqrestore(glock, flags);
3951
3952         kiblnd_thread_fini();
3953         return 0;
3954 }