Whamcloud - gitweb
5e04b72fa77fc7bf5fa2e59abbf98d8b4fdb18d5
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/klnds/o2iblnd/o2iblnd_cb.c
32  *
33  * Author: Eric Barton <eric@bartonsoftware.com>
34  */
35
36 #include "o2iblnd.h"
37
38 #define MAX_CONN_RACES_BEFORE_ABORT 20
39
40 static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
41 static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
42                                        int error);
43 static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
44                                int type, int body_nob);
45 static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
46                             int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
47 static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
48 static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
49
50 static void kiblnd_unmap_tx(struct kib_tx *tx);
51 static void kiblnd_check_sends_locked(struct kib_conn *conn);
52
53 void
54 kiblnd_tx_done(struct kib_tx *tx)
55 {
56         struct lnet_msg *lntmsg[2];
57         int         rc;
58         int         i;
59
60         LASSERT (!in_interrupt());
61         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
62         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
63         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
64         LASSERT (tx->tx_pool != NULL);
65
66         kiblnd_unmap_tx(tx);
67
68         /* tx may have up to 2 lnet msgs to finalise */
69         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
70         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
71         rc = tx->tx_status;
72
73         if (tx->tx_conn != NULL) {
74                 kiblnd_conn_decref(tx->tx_conn);
75                 tx->tx_conn = NULL;
76         }
77
78         tx->tx_nwrq = tx->tx_nsge = 0;
79         tx->tx_status = 0;
80
81         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
82
83         /* delay finalize until my descs have been freed */
84         for (i = 0; i < 2; i++) {
85                 if (lntmsg[i] == NULL)
86                         continue;
87
88                 /* propagate health status to LNet for requests */
89                 if (i == 0 && lntmsg[i])
90                         lntmsg[i]->msg_health_status = tx->tx_hstatus;
91
92                 lnet_finalize(lntmsg[i], rc);
93         }
94 }
95
96 void
97 kiblnd_txlist_done(struct list_head *txlist, int status,
98                    enum lnet_msg_hstatus hstatus)
99 {
100         struct kib_tx *tx;
101
102         while (!list_empty(txlist)) {
103                 tx = list_entry(txlist->next, struct kib_tx, tx_list);
104
105                 list_del(&tx->tx_list);
106                 /* complete now */
107                 tx->tx_waiting = 0;
108                 tx->tx_status = status;
109                 if (hstatus != LNET_MSG_STATUS_OK)
110                         tx->tx_hstatus = hstatus;
111                 kiblnd_tx_done(tx);
112         }
113 }
114
115 static struct kib_tx *
116 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
117 {
118         struct kib_net *net = ni->ni_data;
119         struct list_head *node;
120         struct kib_tx *tx;
121         struct kib_tx_poolset *tps;
122
123         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
124         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
125         if (node == NULL)
126                 return NULL;
127         tx = container_of(node, struct kib_tx, tx_list);
128
129         LASSERT (tx->tx_nwrq == 0);
130         LASSERT (!tx->tx_queued);
131         LASSERT (tx->tx_sending == 0);
132         LASSERT (!tx->tx_waiting);
133         LASSERT (tx->tx_status == 0);
134         LASSERT (tx->tx_conn == NULL);
135         LASSERT (tx->tx_lntmsg[0] == NULL);
136         LASSERT (tx->tx_lntmsg[1] == NULL);
137         LASSERT (tx->tx_nfrags == 0);
138
139         tx->tx_gaps = false;
140         tx->tx_hstatus = LNET_MSG_STATUS_OK;
141
142         return tx;
143 }
144
145 static void
146 kiblnd_drop_rx(struct kib_rx *rx)
147 {
148         struct kib_conn *conn = rx->rx_conn;
149         struct kib_sched_info *sched = conn->ibc_sched;
150         unsigned long flags;
151
152         spin_lock_irqsave(&sched->ibs_lock, flags);
153         LASSERT(conn->ibc_nrx > 0);
154         conn->ibc_nrx--;
155         spin_unlock_irqrestore(&sched->ibs_lock, flags);
156
157         kiblnd_conn_decref(conn);
158 }
159
160 int
161 kiblnd_post_rx(struct kib_rx *rx, int credit)
162 {
163         struct kib_conn *conn = rx->rx_conn;
164         struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
165         struct ib_recv_wr *bad_wrq = NULL;
166 #ifdef HAVE_IB_GET_DMA_MR
167         struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
168 #endif
169         int rc;
170
171         LASSERT (net != NULL);
172         LASSERT (!in_interrupt());
173         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
174                  credit == IBLND_POSTRX_PEER_CREDIT ||
175                  credit == IBLND_POSTRX_RSRVD_CREDIT);
176 #ifdef HAVE_IB_GET_DMA_MR
177         LASSERT(mr != NULL);
178
179         rx->rx_sge.lkey   = mr->lkey;
180 #else
181         rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
182 #endif
183         rx->rx_sge.addr   = rx->rx_msgaddr;
184         rx->rx_sge.length = IBLND_MSG_SIZE;
185
186         rx->rx_wrq.next = NULL;
187         rx->rx_wrq.sg_list = &rx->rx_sge;
188         rx->rx_wrq.num_sge = 1;
189         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
190
191         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
192         LASSERT (rx->rx_nob >= 0);              /* not posted */
193
194         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
195                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
196                 return 0;
197         }
198
199         rx->rx_nob = -1;                        /* flag posted */
200
201         /* NB: need an extra reference after ib_post_recv because we don't
202          * own this rx (and rx::rx_conn) anymore, LU-5678.
203          */
204         kiblnd_conn_addref(conn);
205 #ifdef HAVE_IB_POST_SEND_RECV_CONST
206         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
207                           (const struct ib_recv_wr **)&bad_wrq);
208 #else
209         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
210 #endif
211         if (unlikely(rc != 0)) {
212                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
213                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
214                 rx->rx_nob = 0;
215         }
216
217         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
218                 goto out;
219
220         if (unlikely(rc != 0)) {
221                 kiblnd_close_conn(conn, rc);
222                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
223                 goto out;
224         }
225
226         if (credit == IBLND_POSTRX_NO_CREDIT)
227                 goto out;
228
229         spin_lock(&conn->ibc_lock);
230         if (credit == IBLND_POSTRX_PEER_CREDIT)
231                 conn->ibc_outstanding_credits++;
232         else
233                 conn->ibc_reserved_credits++;
234         kiblnd_check_sends_locked(conn);
235         spin_unlock(&conn->ibc_lock);
236
237 out:
238         kiblnd_conn_decref(conn);
239         return rc;
240 }
241
242 static struct kib_tx *
243 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
244 {
245         struct list_head *tmp;
246
247         list_for_each(tmp, &conn->ibc_active_txs) {
248                 struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
249
250                 LASSERT(!tx->tx_queued);
251                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
252
253                 if (tx->tx_cookie != cookie)
254                         continue;
255
256                 if (tx->tx_waiting &&
257                     tx->tx_msg->ibm_type == txtype)
258                         return tx;
259
260                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
261                       tx->tx_waiting ? "" : "NOT ",
262                       tx->tx_msg->ibm_type, txtype);
263         }
264         return NULL;
265 }
266
267 static void
268 kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
269 {
270         struct kib_tx *tx;
271         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
272         int idle;
273
274         spin_lock(&conn->ibc_lock);
275
276         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
277         if (tx == NULL) {
278                 spin_unlock(&conn->ibc_lock);
279
280                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
281                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
282                 kiblnd_close_conn(conn, -EPROTO);
283                 return;
284         }
285
286         if (tx->tx_status == 0) {               /* success so far */
287                 if (status < 0) {               /* failed? */
288                         tx->tx_status = status;
289                         tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
290                 } else if (txtype == IBLND_MSG_GET_REQ) {
291                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
292                 }
293         }
294
295         tx->tx_waiting = 0;
296
297         idle = !tx->tx_queued && (tx->tx_sending == 0);
298         if (idle)
299                 list_del(&tx->tx_list);
300
301         spin_unlock(&conn->ibc_lock);
302
303         if (idle)
304                 kiblnd_tx_done(tx);
305 }
306
307 static void
308 kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
309 {
310         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
311         struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
312
313         if (tx == NULL) {
314                 CERROR("Can't get tx for completion %x for %s\n",
315                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
316                 return;
317         }
318
319         tx->tx_msg->ibm_u.completion.ibcm_status = status;
320         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
321         kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
322
323         kiblnd_queue_tx(tx, conn);
324 }
325
326 static void
327 kiblnd_handle_rx(struct kib_rx *rx)
328 {
329         struct kib_msg *msg = rx->rx_msg;
330         struct kib_conn   *conn = rx->rx_conn;
331         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
332         int           credits = msg->ibm_credits;
333         struct kib_tx *tx;
334         int           rc = 0;
335         int           rc2;
336         int           post_credit;
337
338         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
339
340         CDEBUG (D_NET, "Received %x[%d] from %s\n",
341                 msg->ibm_type, credits,
342                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
343
344         if (credits != 0) {
345                 /* Have I received credits that will let me send? */
346                 spin_lock(&conn->ibc_lock);
347
348                 if (conn->ibc_credits + credits >
349                     conn->ibc_queue_depth) {
350                         rc2 = conn->ibc_credits;
351                         spin_unlock(&conn->ibc_lock);
352
353                         CERROR("Bad credits from %s: %d + %d > %d\n",
354                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
355                                rc2, credits,
356                                conn->ibc_queue_depth);
357
358                         kiblnd_close_conn(conn, -EPROTO);
359                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
360                         return;
361                 }
362
363                 conn->ibc_credits += credits;
364
365                 /* This ensures the credit taken by NOOP can be returned */
366                 if (msg->ibm_type == IBLND_MSG_NOOP &&
367                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
368                         conn->ibc_outstanding_credits++;
369
370                 kiblnd_check_sends_locked(conn);
371                 spin_unlock(&conn->ibc_lock);
372         }
373
374         switch (msg->ibm_type) {
375         default:
376                 CERROR("Bad IBLND message type %x from %s\n",
377                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
378                 post_credit = IBLND_POSTRX_NO_CREDIT;
379                 rc = -EPROTO;
380                 break;
381
382         case IBLND_MSG_NOOP:
383                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
384                         post_credit = IBLND_POSTRX_NO_CREDIT;
385                         break;
386                 }
387
388                 if (credits != 0) /* credit already posted */
389                         post_credit = IBLND_POSTRX_NO_CREDIT;
390                 else              /* a keepalive NOOP */
391                         post_credit = IBLND_POSTRX_PEER_CREDIT;
392                 break;
393
394         case IBLND_MSG_IMMEDIATE:
395                 post_credit = IBLND_POSTRX_DONT_POST;
396                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
397                                 msg->ibm_srcnid, rx, 0);
398                 if (rc < 0)                     /* repost on error */
399                         post_credit = IBLND_POSTRX_PEER_CREDIT;
400                 break;
401
402         case IBLND_MSG_PUT_REQ:
403                 post_credit = IBLND_POSTRX_DONT_POST;
404                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
405                                 msg->ibm_srcnid, rx, 1);
406                 if (rc < 0)                     /* repost on error */
407                         post_credit = IBLND_POSTRX_PEER_CREDIT;
408                 break;
409
410         case IBLND_MSG_PUT_NAK:
411                 CWARN ("PUT_NACK from %s\n",
412                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
413                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
414                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
415                                          msg->ibm_u.completion.ibcm_status,
416                                          msg->ibm_u.completion.ibcm_cookie);
417                 break;
418
419         case IBLND_MSG_PUT_ACK:
420                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
421
422                 spin_lock(&conn->ibc_lock);
423                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
424                                         msg->ibm_u.putack.ibpam_src_cookie);
425                 if (tx != NULL)
426                         list_del(&tx->tx_list);
427                 spin_unlock(&conn->ibc_lock);
428
429                 if (tx == NULL) {
430                         CERROR("Unmatched PUT_ACK from %s\n",
431                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
432                         rc = -EPROTO;
433                         break;
434                 }
435
436                 LASSERT (tx->tx_waiting);
437                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
438                  * (a) I can overwrite tx_msg since my peer_ni has received it!
439                  * (b) tx_waiting set tells tx_complete() it's not done. */
440
441                 tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
442
443                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
444                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
445                                        &msg->ibm_u.putack.ibpam_rd,
446                                        msg->ibm_u.putack.ibpam_dst_cookie);
447                 if (rc2 < 0)
448                         CERROR("Can't setup rdma for PUT to %s: %d\n",
449                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
450
451                 spin_lock(&conn->ibc_lock);
452                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
453                 kiblnd_queue_tx_locked(tx, conn);
454                 spin_unlock(&conn->ibc_lock);
455                 break;
456
457         case IBLND_MSG_PUT_DONE:
458                 post_credit = IBLND_POSTRX_PEER_CREDIT;
459                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
460                                          msg->ibm_u.completion.ibcm_status,
461                                          msg->ibm_u.completion.ibcm_cookie);
462                 break;
463
464         case IBLND_MSG_GET_REQ:
465                 post_credit = IBLND_POSTRX_DONT_POST;
466                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
467                                 msg->ibm_srcnid, rx, 1);
468                 if (rc < 0)                     /* repost on error */
469                         post_credit = IBLND_POSTRX_PEER_CREDIT;
470                 break;
471
472         case IBLND_MSG_GET_DONE:
473                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
474                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
475                                          msg->ibm_u.completion.ibcm_status,
476                                          msg->ibm_u.completion.ibcm_cookie);
477                 break;
478         }
479
480         if (rc < 0)                             /* protocol error */
481                 kiblnd_close_conn(conn, rc);
482
483         if (post_credit != IBLND_POSTRX_DONT_POST)
484                 kiblnd_post_rx(rx, post_credit);
485 }
486
487 static void
488 kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
489 {
490         struct kib_msg *msg = rx->rx_msg;
491         struct kib_conn   *conn = rx->rx_conn;
492         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
493         struct kib_net *net = ni->ni_data;
494         int rc;
495         int err = -EIO;
496
497         LASSERT (net != NULL);
498         LASSERT (rx->rx_nob < 0);               /* was posted */
499         rx->rx_nob = 0;                         /* isn't now */
500
501         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
502                 goto ignore;
503
504         if (status != IB_WC_SUCCESS) {
505                 CNETERR("Rx from %s failed: %d\n",
506                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
507                 goto failed;
508         }
509
510         LASSERT (nob >= 0);
511         rx->rx_nob = nob;
512
513         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
514         if (rc != 0) {
515                 CERROR ("Error %d unpacking rx from %s\n",
516                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
517                 goto failed;
518         }
519
520         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
521             msg->ibm_dstnid != ni->ni_nid ||
522             msg->ibm_srcstamp != conn->ibc_incarnation ||
523             msg->ibm_dststamp != net->ibn_incarnation) {
524                 CERROR ("Stale rx from %s\n",
525                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
526                 err = -ESTALE;
527                 goto failed;
528         }
529
530         /* set time last known alive */
531         kiblnd_peer_alive(conn->ibc_peer);
532
533         /* racing with connection establishment/teardown! */
534
535         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
536                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
537                 unsigned long  flags;
538
539                 write_lock_irqsave(g_lock, flags);
540                 /* must check holding global lock to eliminate race */
541                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
542                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
543                         write_unlock_irqrestore(g_lock, flags);
544                         return;
545                 }
546                 write_unlock_irqrestore(g_lock, flags);
547         }
548         kiblnd_handle_rx(rx);
549         return;
550
551  failed:
552         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
553         kiblnd_close_conn(conn, err);
554  ignore:
555         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
556 }
557
558 static int
559 kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
560                   struct kib_rdma_desc *rd, u32 nob)
561 {
562         struct kib_hca_dev *hdev;
563         struct kib_dev *dev;
564         struct kib_fmr_poolset *fps;
565         int                     cpt;
566         int                     rc;
567         int i;
568
569         LASSERT(tx->tx_pool != NULL);
570         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
571
572         dev = net->ibn_dev;
573         hdev = tx->tx_pool->tpo_hdev;
574         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
575
576         /*
577          * If we're dealing with FastReg, but the device doesn't
578          * support GAPS and the tx has GAPS, then there is no real point
579          * in trying to map the memory, because it'll just fail. So
580          * preemptively fail with an appropriate message
581          */
582         if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) &&
583             !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
584             tx->tx_gaps) {
585                 CERROR("Using FastReg with no GAPS support, but tx has gaps. "
586                        "Try setting use_fastreg_gaps to 1\n");
587                 return -EPROTONOSUPPORT;
588         }
589
590 #ifdef HAVE_FMR_POOL_API
591         /*
592          * FMR does not support gaps but the tx has gaps then
593          * we should make sure that the number of fragments we'll be sending
594          * over fits within the number of fragments negotiated on the
595          * connection, otherwise, we won't be able to RDMA the data.
596          * We need to maintain the number of fragments negotiation on the
597          * connection for backwards compatibility.
598          */
599         if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
600                 if (tx->tx_conn &&
601                     tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
602                         CERROR("TX number of frags (%d) is <= than connection"
603                                " number of frags (%d). Consider setting peer's"
604                                " map_on_demand to 256\n", tx->tx_nfrags,
605                                tx->tx_conn->ibc_max_frags);
606                         return -EFBIG;
607                 }
608         }
609 #endif
610
611         fps = net->ibn_fmr_ps[cpt];
612         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
613         if (rc != 0) {
614                 CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
615                        tx->tx_nfrags, rd->rd_nfrags, rc);
616                 return rc;
617         }
618
619         /*
620          * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
621          * need the rkey
622          */
623         rd->rd_key = tx->tx_fmr.fmr_key;
624         /*
625          * for FastReg or FMR with no gaps we can accumulate all
626          * the fragments in one FastReg or FMR fragment.
627          */
628         if (
629 #ifdef HAVE_FMR_POOL_API
630             ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
631              && !tx->tx_gaps) ||
632 #endif
633             (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
634                 /* FMR requires zero based address */
635 #ifdef HAVE_FMR_POOL_API
636                 if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
637                         rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
638 #endif
639                 rd->rd_frags[0].rf_nob = nob;
640                 rd->rd_nfrags = 1;
641         } else {
642                 /*
643                  * We're transmitting with gaps using FMR.
644                  * We'll need to use multiple fragments and identify the
645                  * zero based address of each fragment.
646                  */
647                 for (i = 0; i < rd->rd_nfrags; i++) {
648                         rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
649                         rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
650                 }
651         }
652
653         return 0;
654 }
655
656 static void
657 kiblnd_unmap_tx(struct kib_tx *tx)
658 {
659         if (
660 #ifdef HAVE_FMR_POOL_API
661                 tx->tx_fmr.fmr_pfmr ||
662 #endif
663                 tx->tx_fmr.fmr_frd)
664                 kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
665
666         if (tx->tx_nfrags != 0) {
667                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
668                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
669                 tx->tx_nfrags = 0;
670         }
671 }
672
673 #ifdef HAVE_IB_GET_DMA_MR
674 static struct ib_mr *
675 kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
676 {
677         struct kib_net *net = ni->ni_data;
678         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
679         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
680
681         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
682
683         /*
684          * if map-on-demand is turned on and the device supports
685          * either FMR or FastReg then use that. Otherwise use global
686          * memory regions. If that's not available either, then you're
687          * dead in the water and fail the operation.
688          */
689         if (tunables->lnd_map_on_demand &&
690             (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED
691 #ifdef HAVE_FMR_POOL_API
692              || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
693 #endif
694         ))
695                 return NULL;
696
697         /*
698          * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
699          * in the call chain. The mapping will fail with appropriate error
700          * message.
701          */
702         return hdev->ibh_mrs;
703 }
704 #endif
705
706 static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
707                          struct kib_rdma_desc *rd, int nfrags)
708 {
709         struct kib_net *net = ni->ni_data;
710         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
711 #ifdef HAVE_IB_GET_DMA_MR
712         struct ib_mr *mr = NULL;
713 #endif
714         __u32 nob;
715         int i;
716
717         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
718          * RDMA sink */
719         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
720         tx->tx_nfrags = nfrags;
721
722         rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
723                                           tx->tx_nfrags, tx->tx_dmadir);
724
725         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
726                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
727                         hdev->ibh_ibdev, &tx->tx_frags[i]);
728                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
729                         hdev->ibh_ibdev, &tx->tx_frags[i]);
730                 nob += rd->rd_frags[i].rf_nob;
731         }
732
733 #ifdef HAVE_IB_GET_DMA_MR
734         mr = kiblnd_find_rd_dma_mr(ni, rd);
735         if (mr != NULL) {
736                 /* found pre-mapping MR */
737                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
738                 return 0;
739         }
740 #endif
741
742         if (net->ibn_fmr_ps != NULL)
743                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
744
745         return -EINVAL;
746 }
747
748 static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
749                                 struct kib_rdma_desc *rd, int nkiov,
750                                 struct bio_vec *kiov, int offset, int nob)
751 {
752         struct kib_net *net = ni->ni_data;
753         struct scatterlist *sg;
754         int                 fragnob;
755         int                 max_nkiov;
756
757         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
758
759         LASSERT(nob > 0);
760         LASSERT(nkiov > 0);
761         LASSERT(net != NULL);
762
763         while (offset >= kiov->bv_len) {
764                 offset -= kiov->bv_len;
765                 nkiov--;
766                 kiov++;
767                 LASSERT(nkiov > 0);
768         }
769
770         max_nkiov = nkiov;
771
772         sg = tx->tx_frags;
773         do {
774                 LASSERT(nkiov > 0);
775
776                 fragnob = min((int)(kiov->bv_len - offset), nob);
777
778                 /*
779                  * We're allowed to start at a non-aligned page offset in
780                  * the first fragment and end at a non-aligned page offset
781                  * in the last fragment.
782                  */
783                 if ((fragnob < (int)(kiov->bv_len - offset)) &&
784                     nkiov < max_nkiov && nob > fragnob) {
785                         CDEBUG(D_NET, "fragnob %d < available page %d: with"
786                                       " remaining %d kiovs with %d nob left\n",
787                                fragnob, (int)(kiov->bv_len - offset),
788                                nkiov, nob);
789                         tx->tx_gaps = true;
790                 }
791
792                 sg_set_page(sg, kiov->bv_page, fragnob,
793                             kiov->bv_offset + offset);
794                 sg = sg_next(sg);
795                 if (!sg) {
796                         CERROR("lacking enough sg entries to map tx\n");
797                         return -EFAULT;
798                 }
799
800                 offset = 0;
801                 kiov++;
802                 nkiov--;
803                 nob -= fragnob;
804         } while (nob > 0);
805
806         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
807 }
808
809 static int
810 kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
811 __must_hold(&conn->ibc_lock)
812 {
813         struct kib_msg *msg = tx->tx_msg;
814         struct kib_peer_ni *peer_ni = conn->ibc_peer;
815         struct lnet_ni *ni = peer_ni->ibp_ni;
816         int ver = conn->ibc_version;
817         int rc;
818         int done;
819
820         LASSERT(tx->tx_queued);
821         /* We rely on this for QP sizing */
822         LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
823         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
824
825         LASSERT(credit == 0 || credit == 1);
826         LASSERT(conn->ibc_outstanding_credits >= 0);
827         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
828         LASSERT(conn->ibc_credits >= 0);
829         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
830
831         if (conn->ibc_nsends_posted ==
832             kiblnd_concurrent_sends(ver, ni)) {
833                 /* tx completions outstanding... */
834                 CDEBUG(D_NET, "%s: posted enough\n",
835                        libcfs_nid2str(peer_ni->ibp_nid));
836                 return -EAGAIN;
837         }
838
839         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
840                 CDEBUG(D_NET, "%s: no credits\n",
841                        libcfs_nid2str(peer_ni->ibp_nid));
842                 return -EAGAIN;
843         }
844
845         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
846             conn->ibc_credits == 1 &&   /* last credit reserved */
847             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
848                 CDEBUG(D_NET, "%s: not using last credit\n",
849                        libcfs_nid2str(peer_ni->ibp_nid));
850                 return -EAGAIN;
851         }
852
853         /* NB don't drop ibc_lock before bumping tx_sending */
854         list_del(&tx->tx_list);
855         tx->tx_queued = 0;
856
857         if (msg->ibm_type == IBLND_MSG_NOOP &&
858             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
859              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
860               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
861                 /* OK to drop when posted enough NOOPs, since
862                  * kiblnd_check_sends_locked will queue NOOP again when
863                  * posted NOOPs complete */
864                 spin_unlock(&conn->ibc_lock);
865                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
866                 kiblnd_tx_done(tx);
867                 spin_lock(&conn->ibc_lock);
868                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
869                        libcfs_nid2str(peer_ni->ibp_nid),
870                        conn->ibc_noops_posted);
871                 return 0;
872         }
873
874         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
875                         peer_ni->ibp_nid, conn->ibc_incarnation);
876
877         conn->ibc_credits -= credit;
878         conn->ibc_outstanding_credits = 0;
879         conn->ibc_nsends_posted++;
880         if (msg->ibm_type == IBLND_MSG_NOOP)
881                 conn->ibc_noops_posted++;
882
883         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
884          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
885          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
886          * and then re-queued here.  It's (just) possible that
887          * tx_sending is non-zero if we've not done the tx_complete()
888          * from the first send; hence the ++ rather than = below. */
889         tx->tx_sending++;
890         list_add(&tx->tx_list, &conn->ibc_active_txs);
891
892         /* I'm still holding ibc_lock! */
893         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
894                 rc = -ECONNABORTED;
895         } else if (tx->tx_pool->tpo_pool.po_failed ||
896                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
897                 /* close_conn will launch failover */
898                 rc = -ENETDOWN;
899         } else {
900                 struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
901                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
902                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
903
904                 if (frd != NULL) {
905                         if (!frd->frd_valid) {
906                                 wr = &frd->frd_inv_wr.wr;
907                                 wr->next = &frd->frd_fastreg_wr.wr;
908                         } else {
909                                 wr = &frd->frd_fastreg_wr.wr;
910                         }
911                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
912                 }
913
914                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
915                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
916                          bad->wr_id, bad->opcode, bad->send_flags,
917                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
918
919                 bad = NULL;
920                 if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
921                         rc = -EINVAL;
922                 else
923 #ifdef HAVE_IB_POST_SEND_RECV_CONST
924                         rc = ib_post_send(conn->ibc_cmid->qp, wr,
925                                           (const struct ib_send_wr **)&bad);
926 #else
927                         rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
928 #endif
929         }
930
931         conn->ibc_last_send = ktime_get();
932
933         if (rc == 0)
934                 return 0;
935
936         /* NB credits are transferred in the actual
937          * message, which can only be the last work item */
938         conn->ibc_credits += credit;
939         conn->ibc_outstanding_credits += msg->ibm_credits;
940         conn->ibc_nsends_posted--;
941         if (msg->ibm_type == IBLND_MSG_NOOP)
942                 conn->ibc_noops_posted--;
943
944         tx->tx_status = rc;
945         tx->tx_waiting = 0;
946         tx->tx_sending--;
947
948         done = (tx->tx_sending == 0);
949         if (done)
950                 list_del(&tx->tx_list);
951
952         spin_unlock(&conn->ibc_lock);
953
954         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
955                 CERROR("Error %d posting transmit to %s\n",
956                        rc, libcfs_nid2str(peer_ni->ibp_nid));
957         else
958                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
959                        rc, libcfs_nid2str(peer_ni->ibp_nid));
960
961         kiblnd_close_conn(conn, rc);
962
963         if (done)
964                 kiblnd_tx_done(tx);
965
966         spin_lock(&conn->ibc_lock);
967
968         return -EIO;
969 }
970
971 static void
972 kiblnd_check_sends_locked(struct kib_conn *conn)
973 {
974         int ver = conn->ibc_version;
975         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
976         struct kib_tx *tx;
977
978         /* Don't send anything until after the connection is established */
979         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
980                 CDEBUG(D_NET, "%s too soon\n",
981                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
982                 return;
983         }
984
985         LASSERT(conn->ibc_nsends_posted <=
986                 kiblnd_concurrent_sends(ver, ni));
987         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
988                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
989         LASSERT (conn->ibc_reserved_credits >= 0);
990
991         while (conn->ibc_reserved_credits > 0 &&
992                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
993                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
994                                 struct kib_tx, tx_list);
995                 list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
996                 conn->ibc_reserved_credits--;
997         }
998
999         if (kiblnd_need_noop(conn)) {
1000                 spin_unlock(&conn->ibc_lock);
1001
1002                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1003                 if (tx != NULL)
1004                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
1005
1006                 spin_lock(&conn->ibc_lock);
1007                 if (tx != NULL)
1008                         kiblnd_queue_tx_locked(tx, conn);
1009         }
1010
1011         for (;;) {
1012                 int credit;
1013
1014                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
1015                         credit = 0;
1016                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
1017                                         struct kib_tx, tx_list);
1018                 } else if (!list_empty(&conn->ibc_tx_noops)) {
1019                         LASSERT (!IBLND_OOB_CAPABLE(ver));
1020                         credit = 1;
1021                         tx = list_entry(conn->ibc_tx_noops.next,
1022                                         struct kib_tx, tx_list);
1023                 } else if (!list_empty(&conn->ibc_tx_queue)) {
1024                         credit = 1;
1025                         tx = list_entry(conn->ibc_tx_queue.next,
1026                                         struct kib_tx, tx_list);
1027                 } else
1028                         break;
1029
1030                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
1031                         break;
1032         }
1033 }
1034
1035 static void
1036 kiblnd_tx_complete(struct kib_tx *tx, int status)
1037 {
1038         int           failed = (status != IB_WC_SUCCESS);
1039         struct kib_conn   *conn = tx->tx_conn;
1040         int           idle;
1041
1042         if (tx->tx_sending <= 0) {
1043                 CERROR("Received an event on a freed tx: %p status %d\n",
1044                        tx, tx->tx_status);
1045                 return;
1046         }
1047
1048         if (failed) {
1049                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
1050                         CNETERR("Tx -> %s cookie %#llx"
1051                                 " sending %d waiting %d: failed %d\n",
1052                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1053                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1054                                 status);
1055
1056                 kiblnd_close_conn(conn, -EIO);
1057         } else {
1058                 kiblnd_peer_alive(conn->ibc_peer);
1059         }
1060
1061         spin_lock(&conn->ibc_lock);
1062
1063         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
1064          * gets to free it, which also drops its ref on 'conn'. */
1065
1066         tx->tx_sending--;
1067         conn->ibc_nsends_posted--;
1068         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1069                 conn->ibc_noops_posted--;
1070
1071         if (failed) {
1072                 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
1073                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
1074                 tx->tx_status = -EIO;
1075         }
1076
1077         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1078                !tx->tx_waiting &&               /* Not waiting for peer_ni */
1079                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1080         if (idle)
1081                 list_del(&tx->tx_list);
1082
1083         kiblnd_check_sends_locked(conn);
1084         spin_unlock(&conn->ibc_lock);
1085
1086         if (idle)
1087                 kiblnd_tx_done(tx);
1088 }
1089
1090 static void
1091 kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
1092                    int body_nob)
1093 {
1094         struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
1095         struct ib_sge *sge = &tx->tx_msgsge;
1096         struct ib_rdma_wr *wrq;
1097         int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1098 #ifdef HAVE_IB_GET_DMA_MR
1099         struct ib_mr *mr = hdev->ibh_mrs;
1100 #endif
1101
1102         LASSERT(tx->tx_nwrq >= 0);
1103         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1104         LASSERT(nob <= IBLND_MSG_SIZE);
1105 #ifdef HAVE_IB_GET_DMA_MR
1106         LASSERT(mr != NULL);
1107 #endif
1108
1109         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1110
1111 #ifdef HAVE_IB_GET_DMA_MR
1112         sge->lkey   = mr->lkey;
1113 #else
1114         sge->lkey   = hdev->ibh_pd->local_dma_lkey;
1115 #endif
1116         sge->addr   = tx->tx_msgaddr;
1117         sge->length = nob;
1118
1119         wrq = &tx->tx_wrq[tx->tx_nwrq];
1120         memset(wrq, 0, sizeof(*wrq));
1121
1122         wrq->wr.next            = NULL;
1123         wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1124         wrq->wr.sg_list         = sge;
1125         wrq->wr.num_sge         = 1;
1126         wrq->wr.opcode          = IB_WR_SEND;
1127         wrq->wr.send_flags      = IB_SEND_SIGNALED;
1128
1129         tx->tx_nwrq++;
1130 }
1131
1132 static int
1133 kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
1134                  int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
1135 {
1136         struct kib_msg *ibmsg = tx->tx_msg;
1137         struct kib_rdma_desc *srcrd = tx->tx_rd;
1138         struct ib_rdma_wr *wrq = NULL;
1139         struct ib_sge     *sge;
1140         int                rc  = resid;
1141         int                srcidx;
1142         int                dstidx;
1143         int                sge_nob;
1144         int                wrq_sge;
1145
1146         LASSERT(!in_interrupt());
1147         LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
1148         LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
1149
1150         for (srcidx = dstidx = wrq_sge = sge_nob = 0;
1151              resid > 0; resid -= sge_nob) {
1152                 int     prev = dstidx;
1153
1154                 if (srcidx >= srcrd->rd_nfrags) {
1155                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1156                         rc = -EPROTO;
1157                         break;
1158                 }
1159
1160                 if (dstidx >= dstrd->rd_nfrags) {
1161                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1162                         rc = -EPROTO;
1163                         break;
1164                 }
1165
1166                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1167                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1168                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1169                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1170                                conn->ibc_max_frags,
1171                                srcidx, srcrd->rd_nfrags,
1172                                dstidx, dstrd->rd_nfrags);
1173                         rc = -EMSGSIZE;
1174                         break;
1175                 }
1176
1177                 sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
1178                                kiblnd_rd_frag_size(dstrd, dstidx),
1179                                resid);
1180
1181                 sge = &tx->tx_sge[tx->tx_nsge];
1182                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1183                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1184                 sge->length = sge_nob;
1185
1186                 if (wrq_sge == 0) {
1187                         wrq = &tx->tx_wrq[tx->tx_nwrq];
1188
1189                         wrq->wr.next    = &(wrq + 1)->wr;
1190                         wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1191                         wrq->wr.sg_list = sge;
1192                         wrq->wr.opcode  = IB_WR_RDMA_WRITE;
1193                         wrq->wr.send_flags = 0;
1194
1195 #ifdef HAVE_IB_RDMA_WR
1196                         wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
1197                                                                       dstidx);
1198                         wrq->rkey               = kiblnd_rd_frag_key(dstrd,
1199                                                                      dstidx);
1200 #else
1201                         wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
1202                                                                         dstidx);
1203                         wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
1204                                                                      dstidx);
1205 #endif
1206                 }
1207
1208                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
1209                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
1210
1211                 wrq_sge++;
1212                 if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
1213                         tx->tx_nwrq++;
1214                         wrq->wr.num_sge = wrq_sge;
1215                         wrq_sge = 0;
1216                 }
1217                 tx->tx_nsge++;
1218         }
1219
1220         if (rc < 0)     /* no RDMA if completing with failure */
1221                 tx->tx_nwrq = tx->tx_nsge = 0;
1222
1223         ibmsg->ibm_u.completion.ibcm_status = rc;
1224         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1225         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1226                            type, sizeof(struct kib_completion_msg));
1227
1228         return rc;
1229 }
1230
1231 static void
1232 kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
1233 {
1234         struct list_head *q;
1235         s64 timeout_ns;
1236
1237         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1238         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1239         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1240
1241         if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
1242                 tx->tx_status = -ECONNABORTED;
1243                 tx->tx_waiting = 0;
1244                 if (tx->tx_conn != NULL) {
1245                         /* PUT_DONE first attached to conn as a PUT_REQ */
1246                         LASSERT(tx->tx_conn == conn);
1247                         LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1248                         tx->tx_conn = NULL;
1249                         kiblnd_conn_decref(conn);
1250                 }
1251                 list_add(&tx->tx_list, &conn->ibc_zombie_txs);
1252
1253                 return;
1254         }
1255
1256         timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
1257         tx->tx_queued = 1;
1258         tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
1259
1260         if (tx->tx_conn == NULL) {
1261                 kiblnd_conn_addref(conn);
1262                 tx->tx_conn = conn;
1263                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1264         } else {
1265                 /* PUT_DONE first attached to conn as a PUT_REQ */
1266                 LASSERT (tx->tx_conn == conn);
1267                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1268         }
1269
1270         switch (tx->tx_msg->ibm_type) {
1271         default:
1272                 LBUG();
1273
1274         case IBLND_MSG_PUT_REQ:
1275         case IBLND_MSG_GET_REQ:
1276                 q = &conn->ibc_tx_queue_rsrvd;
1277                 break;
1278
1279         case IBLND_MSG_PUT_NAK:
1280         case IBLND_MSG_PUT_ACK:
1281         case IBLND_MSG_PUT_DONE:
1282         case IBLND_MSG_GET_DONE:
1283                 q = &conn->ibc_tx_queue_nocred;
1284                 break;
1285
1286         case IBLND_MSG_NOOP:
1287                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1288                         q = &conn->ibc_tx_queue_nocred;
1289                 else
1290                         q = &conn->ibc_tx_noops;
1291                 break;
1292
1293         case IBLND_MSG_IMMEDIATE:
1294                 q = &conn->ibc_tx_queue;
1295                 break;
1296         }
1297
1298         list_add_tail(&tx->tx_list, q);
1299 }
1300
1301 static void
1302 kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
1303 {
1304         spin_lock(&conn->ibc_lock);
1305         kiblnd_queue_tx_locked(tx, conn);
1306         kiblnd_check_sends_locked(conn);
1307         spin_unlock(&conn->ibc_lock);
1308 }
1309
1310 static int
1311 kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
1312                         struct sockaddr_in *srcaddr,
1313                         struct sockaddr_in *dstaddr,
1314                         int timeout_ms)
1315 {
1316         unsigned short port;
1317         int rc;
1318
1319         /* allow the port to be reused */
1320         rc = rdma_set_reuseaddr(cmid, 1);
1321         if (rc != 0) {
1322                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1323                 return rc;
1324         }
1325
1326         /* look for a free privileged port */
1327         for (port = PROT_SOCK-1; port > 0; port--) {
1328                 srcaddr->sin_port = htons(port);
1329                 rc = rdma_resolve_addr(cmid,
1330                                        (struct sockaddr *)srcaddr,
1331                                        (struct sockaddr *)dstaddr,
1332                                        timeout_ms);
1333                 if (rc == 0) {
1334                         CDEBUG(D_NET, "bound to port %hu\n", port);
1335                         return 0;
1336                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1337                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1338                                port, rc);
1339                 } else {
1340                         return rc;
1341                 }
1342         }
1343
1344         CERROR("cannot bind to a free privileged port: rc = %d\n", rc);
1345
1346         return rc;
1347 }
1348
1349 static int
1350 kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1351                     struct sockaddr_in *srcaddr,
1352                     struct sockaddr_in *dstaddr,
1353                     int timeout_ms)
1354 {
1355         const struct cred *old_creds = NULL;
1356         struct cred *new_creds;
1357         int rc;
1358
1359         if (!capable(CAP_NET_BIND_SERVICE)) {
1360                 new_creds = prepare_kernel_cred(NULL);
1361                 if (!new_creds)
1362                         return -ENOMEM;
1363
1364                 cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
1365                 old_creds = override_creds(new_creds);
1366         }
1367
1368         rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
1369
1370         if (old_creds)
1371                 revert_creds(old_creds);
1372
1373         return rc;
1374 }
1375
1376 static void
1377 kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
1378 {
1379         struct rdma_cm_id *cmid;
1380         struct kib_dev *dev;
1381         struct kib_net *net = peer_ni->ibp_ni->ni_data;
1382         struct sockaddr_in srcaddr;
1383         struct sockaddr_in dstaddr;
1384         int rc;
1385
1386         LASSERT (net != NULL);
1387         LASSERT (peer_ni->ibp_connecting > 0);
1388
1389         cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns,
1390                                      kiblnd_cm_callback, peer_ni,
1391                                      RDMA_PS_TCP, IB_QPT_RC);
1392
1393         if (IS_ERR(cmid)) {
1394                 CERROR("Can't create CMID for %s: %ld\n",
1395                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1396                 rc = PTR_ERR(cmid);
1397                 goto failed;
1398         }
1399
1400         dev = net->ibn_dev;
1401         memset(&srcaddr, 0, sizeof(srcaddr));
1402         srcaddr.sin_family = AF_INET;
1403         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1404
1405         memset(&dstaddr, 0, sizeof(dstaddr));
1406         dstaddr.sin_family = AF_INET;
1407         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1408         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1409
1410         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1411
1412         if (*kiblnd_tunables.kib_use_priv_port) {
1413                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1414                                          kiblnd_timeout() * 1000);
1415         } else {
1416                 rc = rdma_resolve_addr(cmid,
1417                                        (struct sockaddr *)&srcaddr,
1418                                        (struct sockaddr *)&dstaddr,
1419                                        kiblnd_timeout() * 1000);
1420         }
1421         if (rc != 0) {
1422                 /* Can't initiate address resolution:  */
1423                 CERROR("Can't resolve addr for %s: %d\n",
1424                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1425                 goto failed2;
1426         }
1427
1428         return;
1429
1430  failed2:
1431         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1432         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1433         rdma_destroy_id(cmid);
1434         return;
1435  failed:
1436         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1437 }
1438
1439 bool
1440 kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
1441 {
1442         rwlock_t *glock = &kiblnd_data.kib_global_lock;
1443         char *reason = NULL;
1444         LIST_HEAD(txs);
1445         unsigned long flags;
1446
1447         write_lock_irqsave(glock, flags);
1448         if (peer_ni->ibp_reconnecting == 0) {
1449                 if (peer_ni->ibp_accepting)
1450                         reason = "accepting";
1451                 else if (peer_ni->ibp_connecting)
1452                         reason = "connecting";
1453                 else if (!list_empty(&peer_ni->ibp_conns))
1454                         reason = "connected";
1455                 else /* connected then closed */
1456                         reason = "closed";
1457
1458                 goto no_reconnect;
1459         }
1460
1461         if (peer_ni->ibp_accepting)
1462                 CNETERR("Detecting race between accepting and reconnecting\n");
1463         peer_ni->ibp_reconnecting--;
1464
1465         if (!kiblnd_peer_active(peer_ni)) {
1466                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1467                 reason = "unlinked";
1468                 goto no_reconnect;
1469         }
1470
1471         peer_ni->ibp_connecting++;
1472         peer_ni->ibp_reconnected++;
1473
1474         write_unlock_irqrestore(glock, flags);
1475
1476         kiblnd_connect_peer(peer_ni);
1477         return true;
1478
1479  no_reconnect:
1480         write_unlock_irqrestore(glock, flags);
1481
1482         CWARN("Abort reconnection of %s: %s\n",
1483               libcfs_nid2str(peer_ni->ibp_nid), reason);
1484         kiblnd_txlist_done(&txs, -ECONNABORTED,
1485                            LNET_MSG_STATUS_LOCAL_ABORTED);
1486         return false;
1487 }
1488
1489 void
1490 kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
1491 {
1492         struct kib_peer_ni *peer_ni;
1493         struct kib_peer_ni *peer2;
1494         struct kib_conn *conn;
1495         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1496         unsigned long flags;
1497         int rc;
1498         int i;
1499         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1500
1501         /* If I get here, I've committed to send, so I complete the tx with
1502          * failure on any problems
1503          */
1504
1505         LASSERT(!tx || !tx->tx_conn);     /* only set when assigned a conn */
1506         LASSERT(!tx || tx->tx_nwrq > 0);  /* work items have been set up */
1507
1508         /* First time, just use a read lock since I expect to find my peer_ni
1509          * connected
1510          */
1511         read_lock_irqsave(g_lock, flags);
1512
1513         peer_ni = kiblnd_find_peer_locked(ni, nid);
1514         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1515                 /* Found a peer_ni with an established connection */
1516                 conn = kiblnd_get_conn_locked(peer_ni);
1517                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1518
1519                 read_unlock_irqrestore(g_lock, flags);
1520
1521                 if (tx != NULL)
1522                         kiblnd_queue_tx(tx, conn);
1523                 kiblnd_conn_decref(conn); /* ...to here */
1524                 return;
1525         }
1526
1527         read_unlock(g_lock);
1528         /* Re-try with a write lock */
1529         write_lock(g_lock);
1530
1531         peer_ni = kiblnd_find_peer_locked(ni, nid);
1532         if (peer_ni != NULL) {
1533                 if (list_empty(&peer_ni->ibp_conns)) {
1534                         /* found a peer_ni, but it's still connecting... */
1535                         LASSERT(kiblnd_peer_connecting(peer_ni));
1536                         if (tx != NULL)
1537                                 list_add_tail(&tx->tx_list,
1538                                               &peer_ni->ibp_tx_queue);
1539                         write_unlock_irqrestore(g_lock, flags);
1540                 } else {
1541                         conn = kiblnd_get_conn_locked(peer_ni);
1542                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1543
1544                         write_unlock_irqrestore(g_lock, flags);
1545
1546                         if (tx != NULL)
1547                                 kiblnd_queue_tx(tx, conn);
1548                         kiblnd_conn_decref(conn); /* ...to here */
1549                 }
1550                 return;
1551         }
1552
1553         write_unlock_irqrestore(g_lock, flags);
1554
1555         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1556         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1557         if (rc != 0) {
1558                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1559                 if (tx != NULL) {
1560                         tx->tx_status = -EHOSTUNREACH;
1561                         tx->tx_waiting = 0;
1562                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1563                         kiblnd_tx_done(tx);
1564                 }
1565                 return;
1566         }
1567
1568         write_lock_irqsave(g_lock, flags);
1569
1570         peer2 = kiblnd_find_peer_locked(ni, nid);
1571         if (peer2 != NULL) {
1572                 if (list_empty(&peer2->ibp_conns)) {
1573                         /* found a peer_ni, but it's still connecting... */
1574                         LASSERT(kiblnd_peer_connecting(peer2));
1575                         if (tx != NULL)
1576                                 list_add_tail(&tx->tx_list,
1577                                               &peer2->ibp_tx_queue);
1578                         write_unlock_irqrestore(g_lock, flags);
1579                 } else {
1580                         conn = kiblnd_get_conn_locked(peer2);
1581                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1582
1583                         write_unlock_irqrestore(g_lock, flags);
1584
1585                         if (tx != NULL)
1586                                 kiblnd_queue_tx(tx, conn);
1587                         kiblnd_conn_decref(conn); /* ...to here */
1588                 }
1589
1590                 kiblnd_peer_decref(peer_ni);
1591                 return;
1592         }
1593
1594         /* Brand new peer_ni */
1595         LASSERT(peer_ni->ibp_connecting == 0);
1596         tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
1597         peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
1598
1599         /* always called with a ref on ni, which prevents ni being shutdown */
1600         LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
1601
1602         if (tx != NULL)
1603                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1604
1605         kiblnd_peer_addref(peer_ni);
1606         hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
1607
1608         write_unlock_irqrestore(g_lock, flags);
1609
1610         for (i = 0; i < tunables->lnd_conns_per_peer; i++)
1611                 kiblnd_connect_peer(peer_ni);
1612         kiblnd_peer_decref(peer_ni);
1613 }
1614
1615 int
1616 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1617 {
1618         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1619         int               type = lntmsg->msg_type;
1620         struct lnet_process_id target = lntmsg->msg_target;
1621         int               target_is_router = lntmsg->msg_target_is_router;
1622         int               routing = lntmsg->msg_routing;
1623         unsigned int      payload_niov = lntmsg->msg_niov;
1624         struct bio_vec   *payload_kiov = lntmsg->msg_kiov;
1625         unsigned int      payload_offset = lntmsg->msg_offset;
1626         unsigned int      payload_nob = lntmsg->msg_len;
1627         struct kib_msg *ibmsg;
1628         struct kib_rdma_desc *rd;
1629         struct kib_tx *tx;
1630         int               nob;
1631         int               rc;
1632
1633         /* NB 'private' is different depending on what we're sending.... */
1634
1635         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1636                payload_nob, payload_niov, libcfs_id2str(target));
1637
1638         LASSERT (payload_nob == 0 || payload_niov > 0);
1639         LASSERT (payload_niov <= LNET_MAX_IOV);
1640
1641         /* Thread context */
1642         LASSERT (!in_interrupt());
1643
1644         switch (type) {
1645         default:
1646                 LBUG();
1647                 return (-EIO);
1648
1649         case LNET_MSG_ACK:
1650                 LASSERT (payload_nob == 0);
1651                 break;
1652
1653         case LNET_MSG_GET:
1654                 if (routing || target_is_router)
1655                         break;                  /* send IMMEDIATE */
1656
1657                 /* is the REPLY message too small for RDMA? */
1658                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1659                 if (nob <= IBLND_MSG_SIZE)
1660                         break;                  /* send IMMEDIATE */
1661
1662                 tx = kiblnd_get_idle_tx(ni, target.nid);
1663                 if (tx == NULL) {
1664                         CERROR("Can't allocate txd for GET to %s\n",
1665                                libcfs_nid2str(target.nid));
1666                         return -ENOMEM;
1667                 }
1668
1669                 ibmsg = tx->tx_msg;
1670                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1671                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1672                                           lntmsg->msg_md->md_niov,
1673                                           lntmsg->msg_md->md_kiov,
1674                                           0, lntmsg->msg_md->md_length);
1675                 if (rc != 0) {
1676                         CERROR("Can't setup GET sink for %s: %d\n",
1677                                libcfs_nid2str(target.nid), rc);
1678                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1679                         kiblnd_tx_done(tx);
1680                         return -EIO;
1681                 }
1682
1683                 nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
1684                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1685                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1686
1687                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1688
1689                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1690                 if (tx->tx_lntmsg[1] == NULL) {
1691                         CERROR("Can't create reply for GET -> %s\n",
1692                                libcfs_nid2str(target.nid));
1693                         kiblnd_tx_done(tx);
1694                         return -EIO;
1695                 }
1696
1697                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1698                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1699                 kiblnd_launch_tx(ni, tx, target.nid);
1700                 return 0;
1701
1702         case LNET_MSG_REPLY:
1703         case LNET_MSG_PUT:
1704                 /* Is the payload small enough not to need RDMA? */
1705                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
1706                 if (nob <= IBLND_MSG_SIZE)
1707                         break;                  /* send IMMEDIATE */
1708
1709                 tx = kiblnd_get_idle_tx(ni, target.nid);
1710                 if (tx == NULL) {
1711                         CERROR("Can't allocate %s txd for %s\n",
1712                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1713                                libcfs_nid2str(target.nid));
1714                         return -ENOMEM;
1715                 }
1716
1717                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1718                                           payload_niov, payload_kiov,
1719                                           payload_offset, payload_nob);
1720                 if (rc != 0) {
1721                         CERROR("Can't setup PUT src for %s: %d\n",
1722                                libcfs_nid2str(target.nid), rc);
1723                         kiblnd_tx_done(tx);
1724                         return -EIO;
1725                 }
1726
1727                 ibmsg = tx->tx_msg;
1728                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1729                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1730                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
1731                                    sizeof(struct kib_putreq_msg));
1732
1733                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1734                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1735                 kiblnd_launch_tx(ni, tx, target.nid);
1736                 return 0;
1737         }
1738
1739         /* send IMMEDIATE */
1740         LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
1741                 <= IBLND_MSG_SIZE);
1742
1743         tx = kiblnd_get_idle_tx(ni, target.nid);
1744         if (tx == NULL) {
1745                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1746                         type, libcfs_nid2str(target.nid));
1747                 return -ENOMEM;
1748         }
1749
1750         ibmsg = tx->tx_msg;
1751         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1752
1753         lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1754                             offsetof(struct kib_msg,
1755                                      ibm_u.immediate.ibim_payload),
1756                             payload_niov, payload_kiov,
1757                             payload_offset, payload_nob);
1758
1759         nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
1760         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1761
1762         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1763         kiblnd_launch_tx(ni, tx, target.nid);
1764         return 0;
1765 }
1766
1767 static void
1768 kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
1769 {
1770         struct lnet_process_id target = lntmsg->msg_target;
1771         unsigned int niov = lntmsg->msg_niov;
1772         struct bio_vec *kiov = lntmsg->msg_kiov;
1773         unsigned int offset = lntmsg->msg_offset;
1774         unsigned int nob = lntmsg->msg_len;
1775         struct kib_tx *tx;
1776         int rc;
1777
1778         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1779         if (tx == NULL) {
1780                 CERROR("Can't get tx for REPLY to %s\n",
1781                        libcfs_nid2str(target.nid));
1782                 goto failed_0;
1783         }
1784
1785         if (nob == 0)
1786                 rc = 0;
1787         else
1788                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1789                                           niov, kiov, offset, nob);
1790
1791         if (rc != 0) {
1792                 CERROR("Can't setup GET src for %s: %d\n",
1793                        libcfs_nid2str(target.nid), rc);
1794                 goto failed_1;
1795         }
1796
1797         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1798                               IBLND_MSG_GET_DONE, nob,
1799                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1800                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1801         if (rc < 0) {
1802                 CERROR("Can't setup rdma for GET from %s: %d\n",
1803                        libcfs_nid2str(target.nid), rc);
1804                 goto failed_1;
1805         }
1806
1807         if (nob == 0) {
1808                 /* No RDMA: local completion may happen now! */
1809                 lnet_finalize(lntmsg, 0);
1810         } else {
1811                 /* RDMA: lnet_finalize(lntmsg) when it
1812                  * completes */
1813                 tx->tx_lntmsg[0] = lntmsg;
1814         }
1815
1816         kiblnd_queue_tx(tx, rx->rx_conn);
1817         return;
1818
1819
1820 failed_1:
1821         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1822         kiblnd_tx_done(tx);
1823 failed_0:
1824         lnet_finalize(lntmsg, -EIO);
1825 }
1826
1827 int
1828 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1829             int delayed, unsigned int niov, struct bio_vec *kiov,
1830             unsigned int offset, unsigned int mlen, unsigned int rlen)
1831 {
1832         struct kib_rx *rx = private;
1833         struct kib_msg *rxmsg = rx->rx_msg;
1834         struct kib_conn *conn = rx->rx_conn;
1835         struct kib_tx *tx;
1836         __u64        ibprm_cookie;
1837         int          nob;
1838         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1839         int          rc = 0;
1840
1841         LASSERT (mlen <= rlen);
1842         LASSERT (!in_interrupt());
1843
1844         switch (rxmsg->ibm_type) {
1845         default:
1846                 LBUG();
1847
1848         case IBLND_MSG_IMMEDIATE:
1849                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
1850                 if (nob > rx->rx_nob) {
1851                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1852                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1853                                 nob, rx->rx_nob);
1854                         rc = -EPROTO;
1855                         break;
1856                 }
1857
1858                 lnet_copy_flat2kiov(niov, kiov, offset,
1859                                     IBLND_MSG_SIZE, rxmsg,
1860                                     offsetof(struct kib_msg,
1861                                              ibm_u.immediate.ibim_payload),
1862                                     mlen);
1863                 lnet_finalize(lntmsg, 0);
1864                 break;
1865
1866         case IBLND_MSG_PUT_REQ: {
1867                 struct kib_msg  *txmsg;
1868                 struct kib_rdma_desc *rd;
1869                 ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1870
1871                 if (mlen == 0) {
1872                         lnet_finalize(lntmsg, 0);
1873                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1874                                                0, ibprm_cookie);
1875                         break;
1876                 }
1877
1878                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1879                 if (tx == NULL) {
1880                         CERROR("Can't allocate tx for %s\n",
1881                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1882                         /* Not replying will break the connection */
1883                         rc = -ENOMEM;
1884                         break;
1885                 }
1886
1887                 txmsg = tx->tx_msg;
1888                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1889                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1890                                           niov, kiov, offset, mlen);
1891                 if (rc != 0) {
1892                         CERROR("Can't setup PUT sink for %s: %d\n",
1893                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1894                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1895                         kiblnd_tx_done(tx);
1896                         /* tell peer_ni it's over */
1897                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1898                                                rc, ibprm_cookie);
1899                         break;
1900                 }
1901
1902                 nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
1903                 txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
1904                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1905
1906                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1907
1908                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1909                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1910                 kiblnd_queue_tx(tx, conn);
1911
1912                 /* reposted buffer reserved for PUT_DONE */
1913                 post_credit = IBLND_POSTRX_NO_CREDIT;
1914                 break;
1915                 }
1916
1917         case IBLND_MSG_GET_REQ:
1918                 if (lntmsg != NULL) {
1919                         /* Optimized GET; RDMA lntmsg's payload */
1920                         kiblnd_reply(ni, rx, lntmsg);
1921                 } else {
1922                         /* GET didn't match anything */
1923                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1924                                                -ENODATA,
1925                                                rxmsg->ibm_u.get.ibgm_cookie);
1926                 }
1927                 break;
1928         }
1929
1930         kiblnd_post_rx(rx, post_credit);
1931         return rc;
1932 }
1933
1934 int
1935 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1936 {
1937         struct task_struct *task = kthread_run(fn, arg, "%s", name);
1938
1939         if (IS_ERR(task))
1940                 return PTR_ERR(task);
1941
1942         atomic_inc(&kiblnd_data.kib_nthreads);
1943         return 0;
1944 }
1945
1946 static void
1947 kiblnd_thread_fini (void)
1948 {
1949         atomic_dec (&kiblnd_data.kib_nthreads);
1950 }
1951
1952 static void
1953 kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
1954 {
1955         /* This is racy, but everyone's only writing ktime_get_seconds() */
1956         peer_ni->ibp_last_alive = ktime_get_seconds();
1957         smp_mb();
1958 }
1959
1960 static void
1961 kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
1962 {
1963         int           error = 0;
1964         time64_t last_alive = 0;
1965         unsigned long flags;
1966
1967         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1968
1969         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
1970                 error = peer_ni->ibp_error;
1971                 peer_ni->ibp_error = 0;
1972
1973                 last_alive = peer_ni->ibp_last_alive;
1974         }
1975
1976         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1977
1978         if (error != 0)
1979                 lnet_notify(peer_ni->ibp_ni,
1980                             peer_ni->ibp_nid, false, false, last_alive);
1981 }
1982
1983 void
1984 kiblnd_close_conn_locked(struct kib_conn *conn, int error)
1985 {
1986         /* This just does the immediate housekeeping.  'error' is zero for a
1987          * normal shutdown which can happen only after the connection has been
1988          * established.  If the connection is established, schedule the
1989          * connection to be finished off by the connd.  Otherwise the connd is
1990          * already dealing with it (either to set it up or tear it down).
1991          * Caller holds kib_global_lock exclusively in irq context */
1992         struct kib_peer_ni *peer_ni = conn->ibc_peer;
1993         struct kib_dev *dev;
1994         unsigned long flags;
1995
1996         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1997
1998         if (error != 0 && conn->ibc_comms_error == 0)
1999                 conn->ibc_comms_error = error;
2000
2001         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
2002                 return; /* already being handled  */
2003
2004         if (error == 0 &&
2005             list_empty(&conn->ibc_tx_noops) &&
2006             list_empty(&conn->ibc_tx_queue) &&
2007             list_empty(&conn->ibc_tx_queue_rsrvd) &&
2008             list_empty(&conn->ibc_tx_queue_nocred) &&
2009             list_empty(&conn->ibc_active_txs)) {
2010                 CDEBUG(D_NET, "closing conn to %s\n", 
2011                        libcfs_nid2str(peer_ni->ibp_nid));
2012         } else {
2013                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
2014                        libcfs_nid2str(peer_ni->ibp_nid), error,
2015                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
2016                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
2017                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
2018                                                 "" : "(sending_rsrvd)",
2019                        list_empty(&conn->ibc_tx_queue_nocred) ?
2020                                                  "" : "(sending_nocred)",
2021                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
2022         }
2023
2024         dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
2025         if (peer_ni->ibp_next_conn == conn)
2026                 /* clear next_conn so it won't be used */
2027                 peer_ni->ibp_next_conn = NULL;
2028         list_del(&conn->ibc_list);
2029         /* connd (see below) takes over ibc_list's ref */
2030
2031         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
2032             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
2033                 kiblnd_unlink_peer_locked(peer_ni);
2034
2035                 /* set/clear error on last conn */
2036                 peer_ni->ibp_error = conn->ibc_comms_error;
2037         }
2038
2039         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
2040
2041         if (error != 0 &&
2042             kiblnd_dev_can_failover(dev)) {
2043                 list_add_tail(&dev->ibd_fail_list,
2044                               &kiblnd_data.kib_failed_devs);
2045                 wake_up(&kiblnd_data.kib_failover_waitq);
2046         }
2047
2048         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
2049
2050         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
2051         wake_up(&kiblnd_data.kib_connd_waitq);
2052
2053         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
2054 }
2055
2056 void
2057 kiblnd_close_conn(struct kib_conn *conn, int error)
2058 {
2059         unsigned long flags;
2060
2061         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2062
2063         kiblnd_close_conn_locked(conn, error);
2064
2065         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2066 }
2067
2068 static void
2069 kiblnd_handle_early_rxs(struct kib_conn *conn)
2070 {
2071         unsigned long flags;
2072         struct kib_rx *rx;
2073
2074         LASSERT(!in_interrupt());
2075         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2076
2077         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2078         while (!list_empty(&conn->ibc_early_rxs)) {
2079                 rx = list_entry(conn->ibc_early_rxs.next,
2080                                 struct kib_rx, rx_list);
2081                 list_del(&rx->rx_list);
2082                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2083
2084                 kiblnd_handle_rx(rx);
2085
2086                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2087         }
2088         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2089 }
2090
2091 void
2092 kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
2093 {
2094         LIST_HEAD(zombies);
2095         struct kib_tx *nxt;
2096         struct kib_tx *tx;
2097
2098         spin_lock(&conn->ibc_lock);
2099
2100         list_for_each_entry_safe(tx, nxt, txs, tx_list) {
2101                 if (txs == &conn->ibc_active_txs) {
2102                         LASSERT(!tx->tx_queued);
2103                         LASSERT(tx->tx_waiting ||
2104                                 tx->tx_sending != 0);
2105                         if (conn->ibc_comms_error == -ETIMEDOUT) {
2106                                 if (tx->tx_waiting && !tx->tx_sending)
2107                                         tx->tx_hstatus =
2108                                           LNET_MSG_STATUS_REMOTE_TIMEOUT;
2109                                 else if (tx->tx_sending)
2110                                         tx->tx_hstatus =
2111                                           LNET_MSG_STATUS_NETWORK_TIMEOUT;
2112                         }
2113                 } else {
2114                         LASSERT(tx->tx_queued);
2115                         if (conn->ibc_comms_error == -ETIMEDOUT)
2116                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2117                         else
2118                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
2119                 }
2120
2121                 tx->tx_status = -ECONNABORTED;
2122                 tx->tx_waiting = 0;
2123
2124                 /*
2125                  * TODO: This makes an assumption that
2126                  * kiblnd_tx_complete() will be called for each tx. If
2127                  * that event is dropped we could end up with stale
2128                  * connections floating around. We'd like to deal with
2129                  * that in a better way.
2130                  *
2131                  * Also that means we can exceed the timeout by many
2132                  * seconds.
2133                  */
2134                 if (tx->tx_sending == 0) {
2135                         tx->tx_queued = 0;
2136                         list_move(&tx->tx_list, &zombies);
2137                 } else {
2138                         /* keep tx until cq destroy */
2139                         list_move(&tx->tx_list, &conn->ibc_zombie_txs);
2140                         conn->ibc_waits ++;
2141                 }
2142         }
2143
2144         spin_unlock(&conn->ibc_lock);
2145
2146         /*
2147          * aborting transmits occurs when finalizing the connection.
2148          * The connection is finalized on error.
2149          * Passing LNET_MSG_STATUS_OK to txlist_done() will not
2150          * override the value already set in tx->tx_hstatus above.
2151          */
2152         kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
2153 }
2154
2155 static bool
2156 kiblnd_tx_may_discard(struct kib_conn *conn)
2157 {
2158         bool rc = false;
2159         struct kib_tx *nxt;
2160         struct kib_tx *tx;
2161
2162         spin_lock(&conn->ibc_lock);
2163
2164         list_for_each_entry_safe(tx, nxt, &conn->ibc_zombie_txs, tx_list) {
2165                 if (tx->tx_sending > 0 && tx->tx_lntmsg[0] &&
2166                     lnet_md_discarded(tx->tx_lntmsg[0]->msg_md)) {
2167                         tx->tx_sending --;
2168                         if (tx->tx_sending == 0) {
2169                                 kiblnd_conn_decref(tx->tx_conn);
2170                                 tx->tx_conn = NULL;
2171                                 rc = true;
2172                         }
2173                 }
2174         }
2175
2176         spin_unlock(&conn->ibc_lock);
2177         return rc;
2178 }
2179
2180 static void
2181 kiblnd_finalise_conn(struct kib_conn *conn)
2182 {
2183         LASSERT (!in_interrupt());
2184         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2185
2186         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2187          * for connections that didn't get as far as being connected, because
2188          * rdma_disconnect() does this for free. */
2189         kiblnd_abort_receives(conn);
2190
2191         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2192
2193         /* Complete all tx descs not waiting for sends to complete.
2194          * NB we should be safe from RDMA now that the QP has changed state */
2195
2196         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2197         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2198         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2199         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2200         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2201
2202         kiblnd_handle_early_rxs(conn);
2203 }
2204
2205 static void
2206 kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
2207                            int error)
2208 {
2209         LIST_HEAD(zombies);
2210         unsigned long flags;
2211         enum lnet_msg_hstatus hstatus;
2212
2213         LASSERT(error != 0);
2214         LASSERT(!in_interrupt());
2215
2216         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2217
2218         if (active) {
2219                 LASSERT(peer_ni->ibp_connecting > 0);
2220                 peer_ni->ibp_connecting--;
2221         } else {
2222                 LASSERT (peer_ni->ibp_accepting > 0);
2223                 peer_ni->ibp_accepting--;
2224         }
2225
2226         if (kiblnd_peer_connecting(peer_ni)) {
2227                 /* another connection attempt under way... */
2228                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2229                                         flags);
2230                 return;
2231         }
2232
2233         peer_ni->ibp_reconnected = 0;
2234         if (list_empty(&peer_ni->ibp_conns)) {
2235                 /* Take peer_ni's blocked transmits to complete with error */
2236                 list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
2237
2238                 if (kiblnd_peer_active(peer_ni))
2239                         kiblnd_unlink_peer_locked(peer_ni);
2240
2241                 peer_ni->ibp_error = error;
2242         } else {
2243                 /* Can't have blocked transmits if there are connections */
2244                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2245         }
2246
2247         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2248
2249         kiblnd_peer_notify(peer_ni);
2250
2251         if (list_empty(&zombies))
2252                 return;
2253
2254         CNETERR("Deleting messages for %s: connection failed\n",
2255                 libcfs_nid2str(peer_ni->ibp_nid));
2256
2257         switch (error) {
2258         case -EHOSTUNREACH:
2259         case -ETIMEDOUT:
2260                 hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
2261                 break;
2262         case -ECONNREFUSED:
2263                 hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
2264                 break;
2265         default:
2266                 hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
2267                 break;
2268         }
2269
2270         kiblnd_txlist_done(&zombies, error, hstatus);
2271 }
2272
2273 static void
2274 kiblnd_connreq_done(struct kib_conn *conn, int status)
2275 {
2276         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2277         struct kib_tx *tx;
2278         LIST_HEAD(txs);
2279         unsigned long    flags;
2280         int              active;
2281
2282         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2283
2284         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2285                libcfs_nid2str(peer_ni->ibp_nid), active,
2286                conn->ibc_version, status);
2287
2288         LASSERT (!in_interrupt());
2289         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2290                   peer_ni->ibp_connecting > 0) ||
2291                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2292                   peer_ni->ibp_accepting > 0));
2293
2294         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2295         conn->ibc_connvars = NULL;
2296
2297         if (status != 0) {
2298                 /* failed to establish connection */
2299                 kiblnd_peer_connect_failed(peer_ni, active, status);
2300                 kiblnd_finalise_conn(conn);
2301                 return;
2302         }
2303
2304         /* connection established */
2305         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2306
2307         conn->ibc_last_send = ktime_get();
2308         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2309         kiblnd_peer_alive(peer_ni);
2310
2311         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2312          * peer_ni instance... */
2313         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2314         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2315         peer_ni->ibp_reconnected = 0;
2316         if (active)
2317                 peer_ni->ibp_connecting--;
2318         else
2319                 peer_ni->ibp_accepting--;
2320
2321         if (peer_ni->ibp_version == 0) {
2322                 peer_ni->ibp_version     = conn->ibc_version;
2323                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2324         }
2325
2326         if (peer_ni->ibp_version     != conn->ibc_version ||
2327             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2328                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2329                                                 conn->ibc_incarnation);
2330                 peer_ni->ibp_version     = conn->ibc_version;
2331                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2332         }
2333
2334         /* grab pending txs while I have the lock */
2335         list_splice_init(&peer_ni->ibp_tx_queue, &txs);
2336
2337         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2338             conn->ibc_comms_error != 0) {       /* error has happened already */
2339
2340                 /* start to shut down connection */
2341                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2342                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2343
2344                 kiblnd_txlist_done(&txs, -ECONNABORTED,
2345                                    LNET_MSG_STATUS_LOCAL_ERROR);
2346
2347                 return;
2348         }
2349
2350         /* +1 ref for myself, this connection is visible to other threads
2351          * now, refcount of peer:ibp_conns can be released by connection
2352          * close from either a different thread, or the calling of
2353          * kiblnd_check_sends_locked() below. See bz21911 for details.
2354          */
2355         kiblnd_conn_addref(conn);
2356         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2357
2358         /* Schedule blocked txs
2359          * Note: if we are running with conns_per_peer > 1, these blocked
2360          * txs will all get scheduled to the first connection which gets
2361          * scheduled.  We won't be using round robin on this first batch.
2362          */
2363         spin_lock(&conn->ibc_lock);
2364         while (!list_empty(&txs)) {
2365                 tx = list_entry(txs.next, struct kib_tx, tx_list);
2366                 list_del(&tx->tx_list);
2367
2368                 kiblnd_queue_tx_locked(tx, conn);
2369         }
2370         kiblnd_check_sends_locked(conn);
2371         spin_unlock(&conn->ibc_lock);
2372
2373         /* schedule blocked rxs */
2374         kiblnd_handle_early_rxs(conn);
2375         kiblnd_conn_decref(conn);
2376 }
2377
2378 static void
2379 kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
2380 {
2381         int          rc;
2382
2383 #ifdef HAVE_RDMA_REJECT_4ARGS
2384         rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
2385 #else
2386         rc = rdma_reject(cmid, rej, sizeof(*rej));
2387 #endif
2388
2389         if (rc != 0)
2390                 CWARN("Error %d sending reject\n", rc);
2391 }
2392
2393 static int
2394 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2395 {
2396         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2397         struct kib_msg *reqmsg = priv;
2398         struct kib_msg *ackmsg;
2399         struct kib_dev *ibdev;
2400         struct kib_peer_ni *peer_ni;
2401         struct kib_peer_ni *peer2;
2402         struct kib_conn *conn;
2403         struct lnet_ni *ni = NULL;
2404         struct kib_net *net = NULL;
2405         lnet_nid_t nid;
2406         struct rdma_conn_param cp;
2407         struct kib_rej rej;
2408         int version = IBLND_MSG_VERSION;
2409         unsigned long flags;
2410         int rc;
2411         struct sockaddr_in *peer_addr;
2412
2413         LASSERT(!in_interrupt());
2414         /* cmid inherits 'context' from the corresponding listener id */
2415         ibdev = cmid->context;
2416         LASSERT(ibdev);
2417
2418         memset(&rej, 0, sizeof(rej));
2419         rej.ibr_magic                = IBLND_MSG_MAGIC;
2420         rej.ibr_why                  = IBLND_REJECT_FATAL;
2421         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2422
2423         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2424         if (*kiblnd_tunables.kib_require_priv_port &&
2425             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2426                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2427                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2428                        &ip, ntohs(peer_addr->sin_port));
2429                 goto failed;
2430         }
2431
2432         if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
2433                 CERROR("Short connection request\n");
2434                 goto failed;
2435         }
2436
2437         /* Future protocol version compatibility support!  If the
2438          * o2iblnd-specific protocol changes, or when LNET unifies
2439          * protocols over all LNDs, the initial connection will
2440          * negotiate a protocol version.  I trap this here to avoid
2441          * console errors; the reject tells the peer_ni which protocol I
2442          * speak. */
2443         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2444             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2445                 goto failed;
2446         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2447             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2448             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2449                 goto failed;
2450         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2451             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2452             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2453                 goto failed;
2454
2455         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2456         if (rc != 0) {
2457                 CERROR("Can't parse connection request: %d\n", rc);
2458                 goto failed;
2459         }
2460
2461         nid = reqmsg->ibm_srcnid;
2462         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2463
2464         if (ni != NULL) {
2465                 net = (struct kib_net *)ni->ni_data;
2466                 rej.ibr_incarnation = net->ibn_incarnation;
2467         }
2468
2469         if (ni == NULL ||                         /* no matching net */
2470             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2471             net->ibn_dev != ibdev) {              /* wrong device */
2472                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
2473                        ni ? libcfs_nid2str(ni->ni_nid) : "NA",
2474                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2475                        &ibdev->ibd_ifip,
2476                        libcfs_nid2str(reqmsg->ibm_dstnid));
2477
2478                 goto failed;
2479         }
2480
2481         /* check time stamp as soon as possible */
2482         if (reqmsg->ibm_dststamp != 0 &&
2483             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2484                 CWARN("Stale connection request\n");
2485                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2486                 goto failed;
2487         }
2488
2489         /* I can accept peer_ni's version */
2490         version = reqmsg->ibm_version;
2491
2492         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2493                 CERROR("Unexpected connreq msg type: %x from %s\n",
2494                        reqmsg->ibm_type, libcfs_nid2str(nid));
2495                 goto failed;
2496         }
2497
2498         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2499             kiblnd_msg_queue_size(version, ni)) {
2500                 CERROR("Can't accept conn from %s, queue depth too large:  %d (<=%d wanted)\n",
2501                        libcfs_nid2str(nid),
2502                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2503                        kiblnd_msg_queue_size(version, ni));
2504
2505                 if (version == IBLND_MSG_VERSION)
2506                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2507
2508                 goto failed;
2509         }
2510
2511         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2512             IBLND_MAX_RDMA_FRAGS) {
2513                 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
2514                       libcfs_nid2str(nid), version,
2515                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2516                       IBLND_MAX_RDMA_FRAGS);
2517
2518                 if (version >= IBLND_MSG_VERSION)
2519                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2520
2521                 goto failed;
2522         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2523                    IBLND_MAX_RDMA_FRAGS &&
2524                    net->ibn_fmr_ps == NULL) {
2525                 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
2526                       libcfs_nid2str(nid), version,
2527                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2528                       IBLND_MAX_RDMA_FRAGS);
2529
2530                 if (version == IBLND_MSG_VERSION)
2531                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2532
2533                 goto failed;
2534         }
2535
2536         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2537                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2538                        libcfs_nid2str(nid),
2539                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2540                        IBLND_MSG_SIZE);
2541                 goto failed;
2542         }
2543
2544         /* assume 'nid' is a new peer_ni; create  */
2545         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2546         if (rc != 0) {
2547                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2548                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2549                 goto failed;
2550         }
2551
2552         /* We have validated the peer's parameters so use those */
2553         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2554         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2555
2556         write_lock_irqsave(g_lock, flags);
2557
2558         peer2 = kiblnd_find_peer_locked(ni, nid);
2559         if (peer2 != NULL) {
2560                 if (peer2->ibp_version == 0) {
2561                         peer2->ibp_version     = version;
2562                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2563                 }
2564
2565                 /* not the guy I've talked with */
2566                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2567                     peer2->ibp_version     != version) {
2568                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2569
2570                         if (kiblnd_peer_active(peer2)) {
2571                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2572                                 peer2->ibp_version = version;
2573                         }
2574                         write_unlock_irqrestore(g_lock, flags);
2575
2576                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2577                               libcfs_nid2str(nid), peer2->ibp_version, version,
2578                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2579
2580                         kiblnd_peer_decref(peer_ni);
2581                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2582                         goto failed;
2583                 }
2584
2585                 /* Tie-break connection race in favour of the higher NID.
2586                  * If we keep running into a race condition multiple times,
2587                  * we have to assume that the connection attempt with the
2588                  * higher NID is stuck in a connecting state and will never
2589                  * recover.  As such, we pass through this if-block and let
2590                  * the lower NID connection win so we can move forward.
2591                  */
2592                 if (peer2->ibp_connecting != 0 &&
2593                     nid < ni->ni_nid && peer2->ibp_races <
2594                     MAX_CONN_RACES_BEFORE_ABORT) {
2595                         peer2->ibp_races++;
2596                         write_unlock_irqrestore(g_lock, flags);
2597
2598                         CDEBUG(D_NET, "Conn race %s\n",
2599                                libcfs_nid2str(peer2->ibp_nid));
2600
2601                         kiblnd_peer_decref(peer_ni);
2602                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2603                         goto failed;
2604                 }
2605                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2606                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2607                                 libcfs_nid2str(peer2->ibp_nid),
2608                                 MAX_CONN_RACES_BEFORE_ABORT);
2609                 /*
2610                  * passive connection is allowed even this peer_ni is waiting for
2611                  * reconnection.
2612                  */
2613                 peer2->ibp_reconnecting = 0;
2614                 peer2->ibp_races = 0;
2615                 peer2->ibp_accepting++;
2616                 kiblnd_peer_addref(peer2);
2617
2618                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2619                  * so copy validated parameters since we now know what the
2620                  * peer_ni's limits are */
2621                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2622                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2623
2624                 write_unlock_irqrestore(g_lock, flags);
2625                 kiblnd_peer_decref(peer_ni);
2626                 peer_ni = peer2;
2627         } else {
2628                 /* Brand new peer_ni */
2629                 LASSERT(peer_ni->ibp_accepting == 0);
2630                 LASSERT(peer_ni->ibp_version == 0 &&
2631                         peer_ni->ibp_incarnation == 0);
2632
2633                 peer_ni->ibp_accepting   = 1;
2634                 peer_ni->ibp_version     = version;
2635                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2636
2637                 /* I have a ref on ni that prevents it being shutdown */
2638                 LASSERT(net->ibn_shutdown == 0);
2639
2640                 kiblnd_peer_addref(peer_ni);
2641                 hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
2642
2643                 write_unlock_irqrestore(g_lock, flags);
2644         }
2645
2646         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT,
2647                                   version);
2648         if (!conn) {
2649                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2650                 kiblnd_peer_decref(peer_ni);
2651                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2652                 goto failed;
2653         }
2654
2655         /* conn now "owns" cmid, so I return success from here on to ensure the
2656          * CM callback doesn't destroy cmid.
2657          */
2658         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2659         conn->ibc_credits          = conn->ibc_queue_depth;
2660         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2661         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2662                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2663
2664         ackmsg = &conn->ibc_connvars->cv_msg;
2665         memset(ackmsg, 0, sizeof(*ackmsg));
2666
2667         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2668                         sizeof(ackmsg->ibm_u.connparams));
2669         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2670         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2671         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2672
2673         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2674
2675         memset(&cp, 0, sizeof(cp));
2676         cp.private_data        = ackmsg;
2677         cp.private_data_len    = ackmsg->ibm_nob;
2678         cp.responder_resources = 0;            /* No atomic ops or RDMA reads */
2679         cp.initiator_depth     = 0;
2680         cp.flow_control        = 1;
2681         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2682         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2683
2684         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2685
2686         rc = rdma_accept(cmid, &cp);
2687         if (rc != 0) {
2688                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2689                 rej.ibr_version = version;
2690                 rej.ibr_why     = IBLND_REJECT_FATAL;
2691
2692                 kiblnd_reject(cmid, &rej);
2693                 kiblnd_connreq_done(conn, rc);
2694                 kiblnd_conn_decref(conn);
2695         }
2696
2697         lnet_ni_decref(ni);
2698         return 0;
2699
2700  failed:
2701         if (ni != NULL) {
2702                 rej.ibr_cp.ibcp_queue_depth =
2703                         kiblnd_msg_queue_size(version, ni);
2704                 rej.ibr_cp.ibcp_max_frags   = IBLND_MAX_RDMA_FRAGS;
2705                 lnet_ni_decref(ni);
2706         }
2707
2708         rej.ibr_version = version;
2709         kiblnd_reject(cmid, &rej);
2710
2711         return -ECONNREFUSED;
2712 }
2713
2714 static void
2715 kiblnd_check_reconnect(struct kib_conn *conn, int version,
2716                        u64 incarnation, int why, struct kib_connparams *cp)
2717 {
2718         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2719         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2720         char            *reason;
2721         int              msg_size = IBLND_MSG_SIZE;
2722         int              frag_num = -1;
2723         int              queue_dep = -1;
2724         bool             reconnect;
2725         unsigned long    flags;
2726
2727         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2728         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2729
2730         if (cp) {
2731                 msg_size        = cp->ibcp_max_msg_size;
2732                 frag_num        = cp->ibcp_max_frags;
2733                 queue_dep       = cp->ibcp_queue_depth;
2734         }
2735
2736         write_lock_irqsave(glock, flags);
2737         /* retry connection if it's still needed and no other connection
2738          * attempts (active or passive) are in progress
2739          * NB: reconnect is still needed even when ibp_tx_queue is
2740          * empty if ibp_version != version because reconnect may be
2741          * initiated.
2742          */
2743         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2744                      peer_ni->ibp_version != version) &&
2745                     peer_ni->ibp_connecting &&
2746                     peer_ni->ibp_accepting == 0;
2747         if (!reconnect) {
2748                 reason = "no need";
2749                 goto out;
2750         }
2751
2752         switch (why) {
2753         default:
2754                 reason = "Unknown";
2755                 break;
2756
2757         case IBLND_REJECT_RDMA_FRAGS: {
2758                 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2759
2760                 if (!cp) {
2761                         reason = "can't negotiate max frags";
2762                         goto out;
2763                 }
2764                 tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2765 #ifdef HAVE_IB_GET_DMA_MR
2766                 /*
2767                  * This check only makes sense if the kernel supports global
2768                  * memory registration. Otherwise, map_on_demand will never == 0
2769                  */
2770                 if (!tunables->lnd_map_on_demand) {
2771                         reason = "map_on_demand must be enabled";
2772                         goto out;
2773                 }
2774 #endif
2775                 if (conn->ibc_max_frags <= frag_num) {
2776                         reason = "unsupported max frags";
2777                         goto out;
2778                 }
2779
2780                 peer_ni->ibp_max_frags = frag_num;
2781                 reason = "rdma fragments";
2782                 break;
2783         }
2784         case IBLND_REJECT_MSG_QUEUE_SIZE:
2785                 if (!cp) {
2786                         reason = "can't negotiate queue depth";
2787                         goto out;
2788                 }
2789                 if (conn->ibc_queue_depth <= queue_dep) {
2790                         reason = "unsupported queue depth";
2791                         goto out;
2792                 }
2793
2794                 peer_ni->ibp_queue_depth = queue_dep;
2795                 reason = "queue depth";
2796                 break;
2797
2798         case IBLND_REJECT_CONN_STALE:
2799                 reason = "stale";
2800                 break;
2801
2802         case IBLND_REJECT_CONN_RACE:
2803                 reason = "conn race";
2804                 break;
2805
2806         case IBLND_REJECT_CONN_UNCOMPAT:
2807                 reason = "version negotiation";
2808                 break;
2809         }
2810
2811         conn->ibc_reconnect = 1;
2812         peer_ni->ibp_reconnecting++;
2813         peer_ni->ibp_version = version;
2814         if (incarnation != 0)
2815                 peer_ni->ibp_incarnation = incarnation;
2816  out:
2817         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2818
2819         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2820                 libcfs_nid2str(peer_ni->ibp_nid),
2821                 reconnect ? "reconnect" : "don't reconnect",
2822                 reason, IBLND_MSG_VERSION, version, msg_size,
2823                 conn->ibc_queue_depth, queue_dep,
2824                 conn->ibc_max_frags, frag_num);
2825         /*
2826          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2827          * while destroying the zombie
2828          */
2829 }
2830
2831 static void
2832 kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
2833 {
2834         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2835         int status = -ECONNREFUSED;
2836
2837         LASSERT (!in_interrupt());
2838         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2839
2840         switch (reason) {
2841         case IB_CM_REJ_STALE_CONN:
2842                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2843                                        IBLND_REJECT_CONN_STALE, NULL);
2844                 break;
2845
2846         case IB_CM_REJ_INVALID_SERVICE_ID:
2847                 status = -EHOSTUNREACH;
2848                 CNETERR("%s rejected: no listener at %d\n",
2849                         libcfs_nid2str(peer_ni->ibp_nid),
2850                         *kiblnd_tunables.kib_service);
2851                 break;
2852
2853         case IB_CM_REJ_CONSUMER_DEFINED:
2854                 if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
2855                         struct kib_rej *rej = priv;
2856                         struct kib_connparams *cp = NULL;
2857                         bool flip = false;
2858                         __u64 incarnation = -1;
2859
2860                         /* NB. default incarnation is -1 because:
2861                          * a) V1 will ignore dst incarnation in connreq.
2862                          * b) V2 will provide incarnation while rejecting me,
2863                          *    -1 will be overwrote.
2864                          *
2865                          * if I try to connect to a V1 peer_ni with V2 protocol,
2866                          * it rejected me then upgrade to V2, I have no idea
2867                          * about the upgrading and try to reconnect with V1,
2868                          * in this case upgraded V2 can find out I'm trying to
2869                          * talk to the old guy and reject me(incarnation is -1).
2870                          */
2871
2872                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2873                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2874                                 __swab32s(&rej->ibr_magic);
2875                                 __swab16s(&rej->ibr_version);
2876                                 flip = true;
2877                         }
2878
2879                         if (priv_nob >= sizeof(struct kib_rej) &&
2880                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2881                                 /* priv_nob is always 148 in current version
2882                                  * of OFED, so we still need to check version.
2883                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
2884                                  */
2885                                 cp = &rej->ibr_cp;
2886
2887                                 if (flip) {
2888                                         __swab64s(&rej->ibr_incarnation);
2889                                         __swab16s(&cp->ibcp_queue_depth);
2890                                         __swab16s(&cp->ibcp_max_frags);
2891                                         __swab32s(&cp->ibcp_max_msg_size);
2892                                 }
2893
2894                                 incarnation = rej->ibr_incarnation;
2895                         }
2896
2897                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2898                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2899                                 CERROR("%s rejected: consumer defined fatal error\n",
2900                                        libcfs_nid2str(peer_ni->ibp_nid));
2901                                 break;
2902                         }
2903
2904                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2905                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2906                                 CERROR("%s rejected: o2iblnd version %x error\n",
2907                                        libcfs_nid2str(peer_ni->ibp_nid),
2908                                        rej->ibr_version);
2909                                 break;
2910                         }
2911
2912                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2913                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2914                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2915                                        libcfs_nid2str(peer_ni->ibp_nid),
2916                                        rej->ibr_version);
2917
2918                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2919                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2920                         }
2921
2922                         switch (rej->ibr_why) {
2923                         case IBLND_REJECT_CONN_RACE:
2924                         case IBLND_REJECT_CONN_STALE:
2925                         case IBLND_REJECT_CONN_UNCOMPAT:
2926                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2927                         case IBLND_REJECT_RDMA_FRAGS:
2928                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2929                                                        incarnation,
2930                                                        rej->ibr_why, cp);
2931                                 break;
2932
2933                         case IBLND_REJECT_NO_RESOURCES:
2934                                 CERROR("%s rejected: o2iblnd no resources\n",
2935                                        libcfs_nid2str(peer_ni->ibp_nid));
2936                                 break;
2937
2938                         case IBLND_REJECT_FATAL:
2939                                 CERROR("%s rejected: o2iblnd fatal error\n",
2940                                        libcfs_nid2str(peer_ni->ibp_nid));
2941                                 break;
2942
2943                         default:
2944                                 CERROR("%s rejected: o2iblnd reason %d\n",
2945                                        libcfs_nid2str(peer_ni->ibp_nid),
2946                                        rej->ibr_why);
2947                                 break;
2948                         }
2949                         break;
2950                 }
2951                 /* fall through */
2952         default:
2953                 CNETERR("%s rejected: reason %d, size %d\n",
2954                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2955                 break;
2956         }
2957
2958         kiblnd_connreq_done(conn, status);
2959 }
2960
2961 static void
2962 kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
2963 {
2964         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2965         struct lnet_ni *ni = peer_ni->ibp_ni;
2966         struct kib_net *net = ni->ni_data;
2967         struct kib_msg *msg = priv;
2968         int            ver  = conn->ibc_version;
2969         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2970         unsigned long  flags;
2971
2972         LASSERT (net != NULL);
2973
2974         if (rc != 0) {
2975                 CERROR("Can't unpack connack from %s: %d\n",
2976                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2977                 goto failed;
2978         }
2979
2980         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2981                 CERROR("Unexpected message %d from %s\n",
2982                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
2983                 rc = -EPROTO;
2984                 goto failed;
2985         }
2986
2987         if (ver != msg->ibm_version) {
2988                 CERROR("%s replied version %x is different with "
2989                        "requested version %x\n",
2990                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
2991                 rc = -EPROTO;
2992                 goto failed;
2993         }
2994
2995         if (msg->ibm_u.connparams.ibcp_queue_depth >
2996             conn->ibc_queue_depth) {
2997                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2998                        libcfs_nid2str(peer_ni->ibp_nid),
2999                        msg->ibm_u.connparams.ibcp_queue_depth,
3000                        conn->ibc_queue_depth);
3001                 rc = -EPROTO;
3002                 goto failed;
3003         }
3004
3005         if (msg->ibm_u.connparams.ibcp_max_frags >
3006             conn->ibc_max_frags) {
3007                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
3008                        libcfs_nid2str(peer_ni->ibp_nid),
3009                        msg->ibm_u.connparams.ibcp_max_frags,
3010                        conn->ibc_max_frags);
3011                 rc = -EPROTO;
3012                 goto failed;
3013         }
3014
3015         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
3016                 CERROR("%s max message size %d too big (%d max)\n",
3017                        libcfs_nid2str(peer_ni->ibp_nid),
3018                        msg->ibm_u.connparams.ibcp_max_msg_size,
3019                        IBLND_MSG_SIZE);
3020                 rc = -EPROTO;
3021                 goto failed;
3022         }
3023
3024         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3025         if (msg->ibm_dstnid == ni->ni_nid &&
3026             msg->ibm_dststamp == net->ibn_incarnation)
3027                 rc = 0;
3028         else
3029                 rc = -ESTALE;
3030         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3031
3032         if (rc != 0) {
3033                 CERROR("Bad connection reply from %s, rc = %d, "
3034                        "version: %x max_frags: %d\n",
3035                        libcfs_nid2str(peer_ni->ibp_nid), rc,
3036                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
3037                 goto failed;
3038         }
3039
3040         conn->ibc_incarnation      = msg->ibm_srcstamp;
3041         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
3042         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
3043         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
3044         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
3045         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
3046                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
3047
3048         kiblnd_connreq_done(conn, 0);
3049         return;
3050
3051  failed:
3052         /* NB My QP has already established itself, so I handle anything going
3053          * wrong here by setting ibc_comms_error.
3054          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
3055          * immediately tears it down. */
3056
3057         LASSERT (rc != 0);
3058         conn->ibc_comms_error = rc;
3059         kiblnd_connreq_done(conn, 0);
3060 }
3061
3062 static int
3063 kiblnd_active_connect(struct rdma_cm_id *cmid)
3064 {
3065         struct kib_peer_ni *peer_ni = cmid->context;
3066         struct kib_conn *conn;
3067         struct kib_msg *msg;
3068         struct rdma_conn_param cp;
3069         int                      version;
3070         __u64                    incarnation;
3071         unsigned long            flags;
3072         int                      rc;
3073
3074         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3075
3076         incarnation = peer_ni->ibp_incarnation;
3077         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
3078                                                  peer_ni->ibp_version;
3079
3080         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3081
3082         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
3083                                   version);
3084         if (conn == NULL) {
3085                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
3086                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
3087                 return -ENOMEM;
3088         }
3089
3090         /* conn "owns" cmid now, so I return success from here on to ensure the
3091          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
3092          * on peer_ni */
3093
3094         msg = &conn->ibc_connvars->cv_msg;
3095
3096         memset(msg, 0, sizeof(*msg));
3097         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
3098         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
3099         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
3100         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
3101
3102         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
3103                         0, peer_ni->ibp_nid, incarnation);
3104
3105         memset(&cp, 0, sizeof(cp));
3106         cp.private_data        = msg;
3107         cp.private_data_len    = msg->ibm_nob;
3108         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
3109         cp.initiator_depth     = 0;
3110         cp.flow_control        = 1;
3111         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
3112         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
3113
3114         LASSERT(cmid->context == (void *)conn);
3115         LASSERT(conn->ibc_cmid == cmid);
3116         rc = rdma_connect_locked(cmid, &cp);
3117         if (rc != 0) {
3118                 CERROR("Can't connect to %s: %d\n",
3119                        libcfs_nid2str(peer_ni->ibp_nid), rc);
3120                 kiblnd_connreq_done(conn, rc);
3121                 kiblnd_conn_decref(conn);
3122         }
3123
3124         return 0;
3125 }
3126
3127 int
3128 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3129 {
3130         struct kib_peer_ni *peer_ni;
3131         struct kib_conn *conn;
3132         int rc;
3133
3134         switch (event->event) {
3135         default:
3136                 CERROR("Unexpected event: %d, status: %d\n",
3137                        event->event, event->status);
3138                 LBUG();
3139
3140         case RDMA_CM_EVENT_CONNECT_REQUEST:
3141                 /* destroy cmid on failure */
3142                 rc = kiblnd_passive_connect(cmid,
3143                                             (void *)KIBLND_CONN_PARAM(event),
3144                                             KIBLND_CONN_PARAM_LEN(event));
3145                 CDEBUG(D_NET, "connreq: %d\n", rc);
3146                 return rc;
3147
3148         case RDMA_CM_EVENT_ADDR_ERROR:
3149                 peer_ni = cmid->context;
3150                 CNETERR("%s: ADDR ERROR %d\n",
3151                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3152                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3153                 kiblnd_peer_decref(peer_ni);
3154                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
3155
3156         case RDMA_CM_EVENT_ADDR_RESOLVED:
3157                 peer_ni = cmid->context;
3158
3159                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
3160                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3161
3162                 if (event->status != 0) {
3163                         CNETERR("Can't resolve address for %s: %d\n",
3164                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
3165                         rc = event->status;
3166                 } else {
3167                         rc = rdma_resolve_route(
3168                                 cmid, kiblnd_timeout() * 1000);
3169                         if (rc == 0) {
3170                                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
3171                                 struct kib_dev *dev = net->ibn_dev;
3172
3173                                 CDEBUG(D_NET, "%s: connection bound to "\
3174                                        "%s:%pI4h:%s\n",
3175                                        libcfs_nid2str(peer_ni->ibp_nid),
3176                                        dev->ibd_ifname,
3177                                        &dev->ibd_ifip, cmid->device->name);
3178
3179                                 return 0;
3180                         }
3181
3182                         /* Can't initiate route resolution */
3183                         CERROR("Can't resolve route for %s: %d\n",
3184                                libcfs_nid2str(peer_ni->ibp_nid), rc);
3185                 }
3186                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
3187                 kiblnd_peer_decref(peer_ni);
3188                 return rc;                      /* rc != 0 destroys cmid */
3189
3190         case RDMA_CM_EVENT_ROUTE_ERROR:
3191                 peer_ni = cmid->context;
3192                 CNETERR("%s: ROUTE ERROR %d\n",
3193                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3194                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3195                 kiblnd_peer_decref(peer_ni);
3196                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3197
3198         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3199                 peer_ni = cmid->context;
3200                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3201                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3202
3203                 if (event->status == 0)
3204                         return kiblnd_active_connect(cmid);
3205
3206                 CNETERR("Can't resolve route for %s: %d\n",
3207                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3208                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3209                 kiblnd_peer_decref(peer_ni);
3210                 return event->status;           /* rc != 0 destroys cmid */
3211
3212         case RDMA_CM_EVENT_UNREACHABLE:
3213                 conn = cmid->context;
3214                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3215                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3216                 CNETERR("%s: UNREACHABLE %d\n",
3217                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3218                 kiblnd_connreq_done(conn, -ENETDOWN);
3219                 kiblnd_conn_decref(conn);
3220                 return 0;
3221
3222         case RDMA_CM_EVENT_CONNECT_ERROR:
3223                 conn = cmid->context;
3224                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3225                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3226                 CNETERR("%s: CONNECT ERROR %d\n",
3227                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3228                 kiblnd_connreq_done(conn, -ENOTCONN);
3229                 kiblnd_conn_decref(conn);
3230                 return 0;
3231
3232         case RDMA_CM_EVENT_REJECTED:
3233                 conn = cmid->context;
3234                 switch (conn->ibc_state) {
3235                 default:
3236                         LBUG();
3237
3238                 case IBLND_CONN_PASSIVE_WAIT:
3239                         CERROR ("%s: REJECTED %d\n",
3240                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3241                                 event->status);
3242                         kiblnd_connreq_done(conn, -ECONNRESET);
3243                         break;
3244
3245                 case IBLND_CONN_ACTIVE_CONNECT:
3246                         kiblnd_rejected(conn, event->status,
3247                                         (void *)KIBLND_CONN_PARAM(event),
3248                                         KIBLND_CONN_PARAM_LEN(event));
3249                         break;
3250                 }
3251                 kiblnd_conn_decref(conn);
3252                 return 0;
3253
3254         case RDMA_CM_EVENT_ESTABLISHED:
3255                 conn = cmid->context;
3256                 switch (conn->ibc_state) {
3257                 default:
3258                         LBUG();
3259
3260                 case IBLND_CONN_PASSIVE_WAIT:
3261                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3262                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3263                         kiblnd_connreq_done(conn, 0);
3264                         break;
3265
3266                 case IBLND_CONN_ACTIVE_CONNECT:
3267                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3268                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3269                         kiblnd_check_connreply(conn,
3270                                                (void *)KIBLND_CONN_PARAM(event),
3271                                                KIBLND_CONN_PARAM_LEN(event));
3272                         break;
3273                 }
3274                 /* net keeps its ref on conn! */
3275                 return 0;
3276
3277         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3278                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3279                 return 0;
3280
3281         case RDMA_CM_EVENT_DISCONNECTED:
3282                 conn = cmid->context;
3283                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3284                         CERROR("%s DISCONNECTED\n",
3285                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3286                         kiblnd_connreq_done(conn, -ECONNRESET);
3287                 } else {
3288                         kiblnd_close_conn(conn, 0);
3289                 }
3290                 kiblnd_conn_decref(conn);
3291                 cmid->context = NULL;
3292                 return 0;
3293
3294         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3295                 LCONSOLE_ERROR_MSG(0x131,
3296                                    "Received notification of device removal\n"
3297                                    "Please shutdown LNET to allow this to proceed\n");
3298                 /* Can't remove network from underneath LNET for now, so I have
3299                  * to ignore this */
3300                 return 0;
3301
3302         case RDMA_CM_EVENT_ADDR_CHANGE:
3303                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3304                 return 0;
3305         }
3306 }
3307
3308 static int
3309 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
3310 {
3311         struct kib_tx *tx;
3312         struct list_head *ttmp;
3313
3314         list_for_each(ttmp, txs) {
3315                 tx = list_entry(ttmp, struct kib_tx, tx_list);
3316
3317                 if (txs != &conn->ibc_active_txs) {
3318                         LASSERT(tx->tx_queued);
3319                 } else {
3320                         LASSERT(!tx->tx_queued);
3321                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3322                 }
3323
3324                 if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3325                         CERROR("Timed out tx: %s(WSQ:%d%d%d), %lld seconds\n",
3326                                kiblnd_queue2str(conn, txs),
3327                                tx->tx_waiting, tx->tx_sending, tx->tx_queued,
3328                                kiblnd_timeout() +
3329                                ktime_ms_delta(ktime_get(),
3330                                               tx->tx_deadline) / MSEC_PER_SEC);
3331                         return 1;
3332                 }
3333         }
3334
3335         return 0;
3336 }
3337
3338 static int
3339 kiblnd_conn_timed_out_locked(struct kib_conn *conn)
3340 {
3341         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3342                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3343                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3344                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3345                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3346 }
3347
3348 static void
3349 kiblnd_check_conns (int idx)
3350 {
3351         LIST_HEAD(closes);
3352         LIST_HEAD(checksends);
3353         LIST_HEAD(timedout_txs);
3354         struct hlist_head *peers = &kiblnd_data.kib_peers[idx];
3355         struct kib_peer_ni *peer_ni;
3356         struct kib_conn *conn;
3357         struct kib_tx *tx, *tx_tmp;
3358         struct list_head *ctmp;
3359         unsigned long flags;
3360
3361         /* NB. We expect to have a look at all the peers and not find any
3362          * RDMAs to time out, so we just use a shared lock while we
3363          * take a look...
3364          */
3365         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3366
3367         hlist_for_each_entry(peer_ni, peers, ibp_list) {
3368                 /* Check tx_deadline */
3369                 list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
3370                         if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3371                                 CWARN("Timed out tx for %s: %lld seconds\n",
3372                                       libcfs_nid2str(peer_ni->ibp_nid),
3373                                       ktime_ms_delta(ktime_get(),
3374                                                      tx->tx_deadline) / MSEC_PER_SEC);
3375                                 list_move(&tx->tx_list, &timedout_txs);
3376                         }
3377                 }
3378
3379                 list_for_each(ctmp, &peer_ni->ibp_conns) {
3380                         int timedout;
3381                         int sendnoop;
3382
3383                         conn = list_entry(ctmp, struct kib_conn, ibc_list);
3384
3385                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3386
3387                         spin_lock(&conn->ibc_lock);
3388
3389                         sendnoop = kiblnd_need_noop(conn);
3390                         timedout = kiblnd_conn_timed_out_locked(conn);
3391                         if (!sendnoop && !timedout) {
3392                                 spin_unlock(&conn->ibc_lock);
3393                                 continue;
3394                         }
3395
3396                         if (timedout) {
3397                                 CERROR("Timed out RDMA with %s (%lld): c: %u, oc: %u, rc: %u\n",
3398                                        libcfs_nid2str(peer_ni->ibp_nid),
3399                                        ktime_get_seconds()
3400                                        - peer_ni->ibp_last_alive,
3401                                        conn->ibc_credits,
3402                                        conn->ibc_outstanding_credits,
3403                                        conn->ibc_reserved_credits);
3404                                 list_add(&conn->ibc_connd_list, &closes);
3405                         } else {
3406                                 list_add(&conn->ibc_connd_list, &checksends);
3407                         }
3408                         /* +ref for 'closes' or 'checksends' */
3409                         kiblnd_conn_addref(conn);
3410
3411                         spin_unlock(&conn->ibc_lock);
3412                 }
3413         }
3414
3415         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3416
3417         if (!list_empty(&timedout_txs))
3418                 kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
3419                                    LNET_MSG_STATUS_NETWORK_TIMEOUT);
3420
3421         /* Handle timeout by closing the whole
3422          * connection. We can only be sure RDMA activity
3423          * has ceased once the QP has been modified.
3424          */
3425         while (!list_empty(&closes)) {
3426                 conn = list_entry(closes.next,
3427                                   struct kib_conn, ibc_connd_list);
3428                 list_del(&conn->ibc_connd_list);
3429                 kiblnd_close_conn(conn, -ETIMEDOUT);
3430                 kiblnd_conn_decref(conn);
3431         }
3432
3433         /* In case we have enough credits to return via a
3434          * NOOP, but there were no non-blocking tx descs
3435          * free to do it last time...
3436          */
3437         while (!list_empty(&checksends)) {
3438                 conn = list_entry(checksends.next,
3439                                   struct kib_conn, ibc_connd_list);
3440                 list_del(&conn->ibc_connd_list);
3441
3442                 spin_lock(&conn->ibc_lock);
3443                 kiblnd_check_sends_locked(conn);
3444                 spin_unlock(&conn->ibc_lock);
3445
3446                 kiblnd_conn_decref(conn);
3447         }
3448 }
3449
3450 static void
3451 kiblnd_disconnect_conn(struct kib_conn *conn)
3452 {
3453         LASSERT (!in_interrupt());
3454         LASSERT (current == kiblnd_data.kib_connd);
3455         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3456
3457         rdma_disconnect(conn->ibc_cmid);
3458         kiblnd_finalise_conn(conn);
3459
3460         kiblnd_peer_notify(conn->ibc_peer);
3461 }
3462
3463 /*
3464  * High-water for reconnection to the same peer_ni, reconnection attempt should
3465  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3466  */
3467 #define KIB_RECONN_HIGH_RACE    10
3468 /*
3469  * Allow connd to take a break and handle other things after consecutive
3470  * reconnection attemps.
3471  */
3472 #define KIB_RECONN_BREAK        100
3473
3474 int
3475 kiblnd_connd (void *arg)
3476 {
3477         spinlock_t *lock = &kiblnd_data.kib_connd_lock;
3478         wait_queue_entry_t wait;
3479         unsigned long flags;
3480         struct kib_conn *conn;
3481         int timeout;
3482         int i;
3483         bool dropped_lock;
3484         int peer_index = 0;
3485         unsigned long deadline = jiffies;
3486
3487         init_wait(&wait);
3488         kiblnd_data.kib_connd = current;
3489
3490         spin_lock_irqsave(lock, flags);
3491
3492         while (!kiblnd_data.kib_shutdown) {
3493                 int reconn = 0;
3494
3495                 dropped_lock = false;
3496
3497                 if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
3498                         struct kib_peer_ni *peer_ni = NULL;
3499
3500                         conn = list_entry(kiblnd_data.kib_connd_zombies.next,
3501                                           struct kib_conn, ibc_list);
3502                         list_del(&conn->ibc_list);
3503                         if (conn->ibc_reconnect) {
3504                                 peer_ni = conn->ibc_peer;
3505                                 kiblnd_peer_addref(peer_ni);
3506                         }
3507
3508                         spin_unlock_irqrestore(lock, flags);
3509                         dropped_lock = true;
3510
3511                         kiblnd_destroy_conn(conn);
3512
3513                         spin_lock_irqsave(lock, flags);
3514                         if (!peer_ni) {
3515                                 LIBCFS_FREE(conn, sizeof(*conn));
3516                                 continue;
3517                         }
3518
3519                         conn->ibc_peer = peer_ni;
3520                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3521                                 list_add_tail(&conn->ibc_list,
3522                                               &kiblnd_data.kib_reconn_list);
3523                         else
3524                                 list_add_tail(&conn->ibc_list,
3525                                               &kiblnd_data.kib_reconn_wait);
3526                 }
3527
3528                 if (!list_empty(&kiblnd_data.kib_connd_conns)) {
3529                         int wait;
3530                         conn = list_entry(kiblnd_data.kib_connd_conns.next,
3531                                           struct kib_conn, ibc_list);
3532                         list_del(&conn->ibc_list);
3533
3534                         spin_unlock_irqrestore(lock, flags);
3535                         dropped_lock = true;
3536
3537                         kiblnd_disconnect_conn(conn);
3538                         wait = conn->ibc_waits;
3539                         if (wait == 0) /* keep ref for connd_wait, see below */
3540                                 kiblnd_conn_decref(conn);
3541
3542                         spin_lock_irqsave(lock, flags);
3543
3544                         if (wait)
3545                                 list_add_tail(&conn->ibc_list,
3546                                               &kiblnd_data.kib_connd_waits);
3547                 }
3548
3549                 while (reconn < KIB_RECONN_BREAK) {
3550                         if (kiblnd_data.kib_reconn_sec !=
3551                             ktime_get_real_seconds()) {
3552                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3553                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3554                                                  &kiblnd_data.kib_reconn_list);
3555                         }
3556
3557                         if (list_empty(&kiblnd_data.kib_reconn_list))
3558                                 break;
3559
3560                         conn = list_entry(kiblnd_data.kib_reconn_list.next,
3561                                           struct kib_conn, ibc_list);
3562                         list_del(&conn->ibc_list);
3563
3564                         spin_unlock_irqrestore(lock, flags);
3565                         dropped_lock = true;
3566
3567                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3568                         kiblnd_peer_decref(conn->ibc_peer);
3569                         LIBCFS_FREE(conn, sizeof(*conn));
3570
3571                         spin_lock_irqsave(lock, flags);
3572                 }
3573
3574                 if (!list_empty(&kiblnd_data.kib_connd_waits)) {
3575                         conn = list_entry(kiblnd_data.kib_connd_waits.next,
3576                                           struct kib_conn, ibc_list);
3577                         list_del(&conn->ibc_list);
3578                         spin_unlock_irqrestore(lock, flags);
3579
3580                         dropped_lock = kiblnd_tx_may_discard(conn);
3581                         if (dropped_lock)
3582                                 kiblnd_conn_decref(conn);
3583
3584                         spin_lock_irqsave(lock, flags);
3585                         if (!dropped_lock)
3586                                 list_add_tail(&conn->ibc_list,
3587                                               &kiblnd_data.kib_connd_waits);
3588                 }
3589
3590                 /* careful with the jiffy wrap... */
3591                 timeout = (int)(deadline - jiffies);
3592                 if (timeout <= 0) {
3593                         const int n = 4;
3594                         const int p = 1;
3595                         int chunk = HASH_SIZE(kiblnd_data.kib_peers);
3596                         unsigned int lnd_timeout;
3597
3598                         spin_unlock_irqrestore(lock, flags);
3599                         dropped_lock = true;
3600
3601                         /* Time to check for RDMA timeouts on a few more
3602                          * peers: I do checks every 'p' seconds on a
3603                          * proportion of the peer_ni table and I need to check
3604                          * every connection 'n' times within a timeout
3605                          * interval, to ensure I detect a timeout on any
3606                          * connection within (n+1)/n times the timeout
3607                          * interval.
3608                          */
3609
3610                         lnd_timeout = kiblnd_timeout();
3611                         if (lnd_timeout > n * p)
3612                                 chunk = (chunk * n * p) / lnd_timeout;
3613                         if (chunk == 0)
3614                                 chunk = 1;
3615
3616                         for (i = 0; i < chunk; i++) {
3617                                 kiblnd_check_conns(peer_index);
3618                                 peer_index = (peer_index + 1) %
3619                                         HASH_SIZE(kiblnd_data.kib_peers);
3620                         }
3621
3622                         deadline += cfs_time_seconds(p);
3623                         spin_lock_irqsave(lock, flags);
3624                 }
3625
3626                 if (dropped_lock)
3627                         continue;
3628
3629                 /* Nothing to do for 'timeout'  */
3630                 set_current_state(TASK_INTERRUPTIBLE);
3631                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3632                 spin_unlock_irqrestore(lock, flags);
3633
3634                 schedule_timeout(timeout);
3635
3636                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3637                 spin_lock_irqsave(lock, flags);
3638         }
3639
3640         spin_unlock_irqrestore(lock, flags);
3641
3642         kiblnd_thread_fini();
3643         return 0;
3644 }
3645
3646 void
3647 kiblnd_qp_event(struct ib_event *event, void *arg)
3648 {
3649         struct kib_conn *conn = arg;
3650
3651         switch (event->event) {
3652         case IB_EVENT_COMM_EST:
3653                 CDEBUG(D_NET, "%s established\n",
3654                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3655                 /* We received a packet but connection isn't established
3656                  * probably handshake packet was lost, so free to
3657                  * force make connection established */
3658                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3659                 return;
3660
3661         case IB_EVENT_PORT_ERR:
3662         case IB_EVENT_DEVICE_FATAL:
3663                 CERROR("Fatal device error for NI %s\n",
3664                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3665                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
3666                 return;
3667
3668         case IB_EVENT_PORT_ACTIVE:
3669                 CERROR("Port reactivated for NI %s\n",
3670                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3671                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
3672                 return;
3673
3674         default:
3675                 CERROR("%s: Async QP event type %d\n",
3676                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3677                 return;
3678         }
3679 }
3680
3681 static void
3682 kiblnd_complete (struct ib_wc *wc)
3683 {
3684         switch (kiblnd_wreqid2type(wc->wr_id)) {
3685         default:
3686                 LBUG();
3687
3688         case IBLND_WID_MR:
3689                 if (wc->status != IB_WC_SUCCESS &&
3690                     wc->status != IB_WC_WR_FLUSH_ERR)
3691                         CNETERR("FastReg failed: %d\n", wc->status);
3692                 return;
3693
3694         case IBLND_WID_RDMA:
3695                 /* We only get RDMA completion notification if it fails.  All
3696                  * subsequent work items, including the final SEND will fail
3697                  * too.  However we can't print out any more info about the
3698                  * failing RDMA because 'tx' might be back on the idle list or
3699                  * even reused already if we didn't manage to post all our work
3700                  * items */
3701                 CNETERR("RDMA (tx: %p) failed: %d\n",
3702                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3703                 return;
3704
3705         case IBLND_WID_TX:
3706                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3707                 return;
3708
3709         case IBLND_WID_RX:
3710                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3711                                    wc->byte_len);
3712                 return;
3713         }
3714 }
3715
3716 void
3717 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3718 {
3719         /* NB I'm not allowed to schedule this conn once its refcount has
3720          * reached 0.  Since fundamentally I'm racing with scheduler threads
3721          * consuming my CQ I could be called after all completions have
3722          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3723          * and this CQ is about to be destroyed so I NOOP. */
3724         struct kib_conn *conn = arg;
3725         struct kib_sched_info *sched = conn->ibc_sched;
3726         unsigned long flags;
3727
3728         LASSERT(cq == conn->ibc_cq);
3729
3730         spin_lock_irqsave(&sched->ibs_lock, flags);
3731
3732         conn->ibc_ready = 1;
3733
3734         if (!conn->ibc_scheduled &&
3735             (conn->ibc_nrx > 0 ||
3736              conn->ibc_nsends_posted > 0)) {
3737                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3738                 conn->ibc_scheduled = 1;
3739                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3740
3741                 if (waitqueue_active(&sched->ibs_waitq))
3742                         wake_up(&sched->ibs_waitq);
3743         }
3744
3745         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3746 }
3747
3748 void
3749 kiblnd_cq_event(struct ib_event *event, void *arg)
3750 {
3751         struct kib_conn *conn = arg;
3752
3753         CERROR("%s: async CQ event type %d\n",
3754                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3755 }
3756
3757 int
3758 kiblnd_scheduler(void *arg)
3759 {
3760         long id = (long)arg;
3761         struct kib_sched_info *sched;
3762         struct kib_conn *conn;
3763         wait_queue_entry_t wait;
3764         unsigned long flags;
3765         struct ib_wc wc;
3766         bool did_something;
3767         int rc;
3768
3769         init_wait(&wait);
3770
3771         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3772
3773         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3774         if (rc != 0) {
3775                 CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
3776         }
3777
3778         spin_lock_irqsave(&sched->ibs_lock, flags);
3779
3780         while (!kiblnd_data.kib_shutdown) {
3781                 if (need_resched()) {
3782                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3783
3784                         cond_resched();
3785
3786                         spin_lock_irqsave(&sched->ibs_lock, flags);
3787                 }
3788
3789                 did_something = false;
3790
3791                 if (!list_empty(&sched->ibs_conns)) {
3792                         conn = list_entry(sched->ibs_conns.next,
3793                                           struct kib_conn, ibc_sched_list);
3794                         /* take over kib_sched_conns' ref on conn... */
3795                         LASSERT(conn->ibc_scheduled);
3796                         list_del(&conn->ibc_sched_list);
3797                         conn->ibc_ready = 0;
3798
3799                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3800
3801                         wc.wr_id = IBLND_WID_INVAL;
3802
3803                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3804                         if (rc == 0) {
3805                                 rc = ib_req_notify_cq(conn->ibc_cq,
3806                                                       IB_CQ_NEXT_COMP);
3807                                 if (rc < 0) {
3808                                         CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
3809                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3810                                         kiblnd_close_conn(conn, -EIO);
3811                                         kiblnd_conn_decref(conn);
3812                                         spin_lock_irqsave(&sched->ibs_lock,
3813                                                           flags);
3814                                         continue;
3815                                 }
3816
3817                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3818                         }
3819
3820                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3821                                 LCONSOLE_ERROR(
3822                                         "ib_poll_cq (rc: %d) returned invalid "
3823                                         "wr_id, opcode %d, status: %d, "
3824                                         "vendor_err: %d, conn: %s status: %d\n"
3825                                         "please upgrade firmware and OFED or "
3826                                         "contact vendor.\n", rc,
3827                                         wc.opcode, wc.status, wc.vendor_err,
3828                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3829                                         conn->ibc_state);
3830                                 rc = -EINVAL;
3831                         }
3832
3833                         if (rc < 0) {
3834                                 CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
3835                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3836                                       rc);
3837                                 kiblnd_close_conn(conn, -EIO);
3838                                 kiblnd_conn_decref(conn);
3839                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3840                                 continue;
3841                         }
3842
3843                         spin_lock_irqsave(&sched->ibs_lock, flags);
3844
3845                         if (rc != 0 || conn->ibc_ready) {
3846                                 /* There may be another completion waiting; get
3847                                  * another scheduler to check while I handle
3848                                  * this one... */
3849                                 /* +1 ref for sched_conns */
3850                                 kiblnd_conn_addref(conn);
3851                                 list_add_tail(&conn->ibc_sched_list,
3852                                               &sched->ibs_conns);
3853                                 if (waitqueue_active(&sched->ibs_waitq))
3854                                         wake_up(&sched->ibs_waitq);
3855                         } else {
3856                                 conn->ibc_scheduled = 0;
3857                         }
3858
3859                         if (rc != 0) {
3860                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3861                                 kiblnd_complete(&wc);
3862
3863                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3864                         }
3865
3866                         kiblnd_conn_decref(conn); /* ..drop my ref from above */
3867                         did_something = true;
3868                 }
3869
3870                 if (did_something)
3871                         continue;
3872
3873                 set_current_state(TASK_INTERRUPTIBLE);
3874                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3875                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3876
3877                 schedule();
3878
3879                 remove_wait_queue(&sched->ibs_waitq, &wait);
3880                 set_current_state(TASK_RUNNING);
3881                 spin_lock_irqsave(&sched->ibs_lock, flags);
3882         }
3883
3884         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3885
3886         kiblnd_thread_fini();
3887         return 0;
3888 }
3889
3890 int
3891 kiblnd_failover_thread(void *arg)
3892 {
3893         rwlock_t *glock = &kiblnd_data.kib_global_lock;
3894         struct kib_dev *dev;
3895         struct net *ns = arg;
3896         wait_queue_entry_t wait;
3897         unsigned long flags;
3898         int rc;
3899
3900         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3901
3902         init_wait(&wait);
3903         write_lock_irqsave(glock, flags);
3904
3905         while (!kiblnd_data.kib_shutdown) {
3906                 bool do_failover = false;
3907                 int long_sleep;
3908
3909                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3910                                     ibd_fail_list) {
3911                         if (ktime_get_seconds() < dev->ibd_next_failover)
3912                                 continue;
3913                         do_failover = true;
3914                         break;
3915                 }
3916
3917                 if (do_failover) {
3918                         list_del_init(&dev->ibd_fail_list);
3919                         dev->ibd_failover = 1;
3920                         write_unlock_irqrestore(glock, flags);
3921
3922                         rc = kiblnd_dev_failover(dev, ns);
3923
3924                         write_lock_irqsave(glock, flags);
3925
3926                         LASSERT(dev->ibd_failover);
3927                         dev->ibd_failover = 0;
3928                         if (rc >= 0) { /* Device is OK or failover succeed */
3929                                 dev->ibd_next_failover = ktime_get_seconds() + 3;
3930                                 continue;
3931                         }
3932
3933                         /* failed to failover, retry later */
3934                         dev->ibd_next_failover = ktime_get_seconds() +
3935                                 min(dev->ibd_failed_failover, 10);
3936                         if (kiblnd_dev_can_failover(dev)) {
3937                                 list_add_tail(&dev->ibd_fail_list,
3938                                               &kiblnd_data.kib_failed_devs);
3939                         }
3940
3941                         continue;
3942                 }
3943
3944                 /* long sleep if no more pending failover */
3945                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3946
3947                 set_current_state(TASK_INTERRUPTIBLE);
3948                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3949                 write_unlock_irqrestore(glock, flags);
3950
3951                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3952                                       cfs_time_seconds(1));
3953                 set_current_state(TASK_RUNNING);
3954                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3955                 write_lock_irqsave(glock, flags);
3956
3957                 if (!long_sleep || rc != 0)
3958                         continue;
3959
3960                 /* have a long sleep, routine check all active devices,
3961                  * we need checking like this because if there is not active
3962                  * connection on the dev and no SEND from local, we may listen
3963                  * on wrong HCA for ever while there is a bonding failover
3964                  */
3965                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3966                         if (kiblnd_dev_can_failover(dev)) {
3967                                 list_add_tail(&dev->ibd_fail_list,
3968                                               &kiblnd_data.kib_failed_devs);
3969                         }
3970                 }
3971         }
3972
3973         write_unlock_irqrestore(glock, flags);
3974
3975         kiblnd_thread_fini();
3976         return 0;
3977 }