Whamcloud - gitweb
LU-14536 obi2lnd: don't try to reconnect if there's no listener
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd_cb.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include "o2iblnd.h"
38
39 #define MAX_CONN_RACES_BEFORE_ABORT 20
40
41 static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
42 static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
43                                        int error);
44 static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
45                                int type, int body_nob);
46 static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
47                             int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
48 static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
49 static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
50
51 static void kiblnd_unmap_tx(struct kib_tx *tx);
52 static void kiblnd_check_sends_locked(struct kib_conn *conn);
53
54 void
55 kiblnd_tx_done(struct kib_tx *tx)
56 {
57         struct lnet_msg *lntmsg[2];
58         int         rc;
59         int         i;
60
61         LASSERT (!in_interrupt());
62         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
63         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
64         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
65         LASSERT (tx->tx_pool != NULL);
66
67         kiblnd_unmap_tx(tx);
68
69         /* tx may have up to 2 lnet msgs to finalise */
70         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
71         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
72         rc = tx->tx_status;
73
74         if (tx->tx_conn != NULL) {
75                 kiblnd_conn_decref(tx->tx_conn);
76                 tx->tx_conn = NULL;
77         }
78
79         tx->tx_nwrq = tx->tx_nsge = 0;
80         tx->tx_status = 0;
81
82         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
83
84         /* delay finalize until my descs have been freed */
85         for (i = 0; i < 2; i++) {
86                 if (lntmsg[i] == NULL)
87                         continue;
88
89                 /* propagate health status to LNet for requests */
90                 if (i == 0 && lntmsg[i])
91                         lntmsg[i]->msg_health_status = tx->tx_hstatus;
92
93                 lnet_finalize(lntmsg[i], rc);
94         }
95 }
96
97 void
98 kiblnd_txlist_done(struct list_head *txlist, int status,
99                    enum lnet_msg_hstatus hstatus)
100 {
101         struct kib_tx *tx;
102
103         while (!list_empty(txlist)) {
104                 tx = list_entry(txlist->next, struct kib_tx, tx_list);
105
106                 list_del(&tx->tx_list);
107                 /* complete now */
108                 tx->tx_waiting = 0;
109                 tx->tx_status = status;
110                 if (hstatus != LNET_MSG_STATUS_OK)
111                         tx->tx_hstatus = hstatus;
112                 kiblnd_tx_done(tx);
113         }
114 }
115
116 static struct kib_tx *
117 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
118 {
119         struct kib_net *net = ni->ni_data;
120         struct list_head *node;
121         struct kib_tx *tx;
122         struct kib_tx_poolset *tps;
123
124         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
125         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
126         if (node == NULL)
127                 return NULL;
128         tx = container_of(node, struct kib_tx, tx_list);
129
130         LASSERT (tx->tx_nwrq == 0);
131         LASSERT (!tx->tx_queued);
132         LASSERT (tx->tx_sending == 0);
133         LASSERT (!tx->tx_waiting);
134         LASSERT (tx->tx_status == 0);
135         LASSERT (tx->tx_conn == NULL);
136         LASSERT (tx->tx_lntmsg[0] == NULL);
137         LASSERT (tx->tx_lntmsg[1] == NULL);
138         LASSERT (tx->tx_nfrags == 0);
139
140         tx->tx_gaps = false;
141         tx->tx_hstatus = LNET_MSG_STATUS_OK;
142
143         return tx;
144 }
145
146 static void
147 kiblnd_drop_rx(struct kib_rx *rx)
148 {
149         struct kib_conn *conn = rx->rx_conn;
150         struct kib_sched_info *sched = conn->ibc_sched;
151         unsigned long flags;
152
153         spin_lock_irqsave(&sched->ibs_lock, flags);
154         LASSERT(conn->ibc_nrx > 0);
155         conn->ibc_nrx--;
156         spin_unlock_irqrestore(&sched->ibs_lock, flags);
157
158         kiblnd_conn_decref(conn);
159 }
160
161 int
162 kiblnd_post_rx(struct kib_rx *rx, int credit)
163 {
164         struct kib_conn *conn = rx->rx_conn;
165         struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
166         struct ib_recv_wr *bad_wrq = NULL;
167 #ifdef HAVE_IB_GET_DMA_MR
168         struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
169 #endif
170         int rc;
171
172         LASSERT (net != NULL);
173         LASSERT (!in_interrupt());
174         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
175                  credit == IBLND_POSTRX_PEER_CREDIT ||
176                  credit == IBLND_POSTRX_RSRVD_CREDIT);
177 #ifdef HAVE_IB_GET_DMA_MR
178         LASSERT(mr != NULL);
179
180         rx->rx_sge.lkey   = mr->lkey;
181 #else
182         rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
183 #endif
184         rx->rx_sge.addr   = rx->rx_msgaddr;
185         rx->rx_sge.length = IBLND_MSG_SIZE;
186
187         rx->rx_wrq.next = NULL;
188         rx->rx_wrq.sg_list = &rx->rx_sge;
189         rx->rx_wrq.num_sge = 1;
190         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
191
192         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
193         LASSERT (rx->rx_nob >= 0);              /* not posted */
194
195         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
196                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
197                 return 0;
198         }
199
200         rx->rx_nob = -1;                        /* flag posted */
201
202         /* NB: need an extra reference after ib_post_recv because we don't
203          * own this rx (and rx::rx_conn) anymore, LU-5678.
204          */
205         kiblnd_conn_addref(conn);
206 #ifdef HAVE_IB_POST_SEND_RECV_CONST
207         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
208                           (const struct ib_recv_wr **)&bad_wrq);
209 #else
210         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
211 #endif
212         if (unlikely(rc != 0)) {
213                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
214                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
215                 rx->rx_nob = 0;
216         }
217
218         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
219                 goto out;
220
221         if (unlikely(rc != 0)) {
222                 kiblnd_close_conn(conn, rc);
223                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
224                 goto out;
225         }
226
227         if (credit == IBLND_POSTRX_NO_CREDIT)
228                 goto out;
229
230         spin_lock(&conn->ibc_lock);
231         if (credit == IBLND_POSTRX_PEER_CREDIT)
232                 conn->ibc_outstanding_credits++;
233         else
234                 conn->ibc_reserved_credits++;
235         kiblnd_check_sends_locked(conn);
236         spin_unlock(&conn->ibc_lock);
237
238 out:
239         kiblnd_conn_decref(conn);
240         return rc;
241 }
242
243 static struct kib_tx *
244 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
245 {
246         struct list_head *tmp;
247
248         list_for_each(tmp, &conn->ibc_active_txs) {
249                 struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
250
251                 LASSERT(!tx->tx_queued);
252                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
253
254                 if (tx->tx_cookie != cookie)
255                         continue;
256
257                 if (tx->tx_waiting &&
258                     tx->tx_msg->ibm_type == txtype)
259                         return tx;
260
261                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
262                       tx->tx_waiting ? "" : "NOT ",
263                       tx->tx_msg->ibm_type, txtype);
264         }
265         return NULL;
266 }
267
268 static void
269 kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
270 {
271         struct kib_tx *tx;
272         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
273         int idle;
274
275         spin_lock(&conn->ibc_lock);
276
277         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
278         if (tx == NULL) {
279                 spin_unlock(&conn->ibc_lock);
280
281                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
282                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
283                 kiblnd_close_conn(conn, -EPROTO);
284                 return;
285         }
286
287         if (tx->tx_status == 0) {               /* success so far */
288                 if (status < 0) {               /* failed? */
289                         tx->tx_status = status;
290                         tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
291                 } else if (txtype == IBLND_MSG_GET_REQ) {
292                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
293                 }
294         }
295
296         tx->tx_waiting = 0;
297
298         idle = !tx->tx_queued && (tx->tx_sending == 0);
299         if (idle)
300                 list_del(&tx->tx_list);
301
302         spin_unlock(&conn->ibc_lock);
303
304         if (idle)
305                 kiblnd_tx_done(tx);
306 }
307
308 static void
309 kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
310 {
311         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
312         struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
313
314         if (tx == NULL) {
315                 CERROR("Can't get tx for completion %x for %s\n",
316                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
317                 return;
318         }
319
320         tx->tx_msg->ibm_u.completion.ibcm_status = status;
321         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
322         kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
323
324         kiblnd_queue_tx(tx, conn);
325 }
326
327 static void
328 kiblnd_handle_rx(struct kib_rx *rx)
329 {
330         struct kib_msg *msg = rx->rx_msg;
331         struct kib_conn   *conn = rx->rx_conn;
332         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
333         int           credits = msg->ibm_credits;
334         struct kib_tx *tx;
335         int           rc = 0;
336         int           rc2;
337         int           post_credit;
338
339         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
340
341         CDEBUG (D_NET, "Received %x[%d] from %s\n",
342                 msg->ibm_type, credits,
343                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
344
345         if (credits != 0) {
346                 /* Have I received credits that will let me send? */
347                 spin_lock(&conn->ibc_lock);
348
349                 if (conn->ibc_credits + credits >
350                     conn->ibc_queue_depth) {
351                         rc2 = conn->ibc_credits;
352                         spin_unlock(&conn->ibc_lock);
353
354                         CERROR("Bad credits from %s: %d + %d > %d\n",
355                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
356                                rc2, credits,
357                                conn->ibc_queue_depth);
358
359                         kiblnd_close_conn(conn, -EPROTO);
360                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
361                         return;
362                 }
363
364                 conn->ibc_credits += credits;
365
366                 /* This ensures the credit taken by NOOP can be returned */
367                 if (msg->ibm_type == IBLND_MSG_NOOP &&
368                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
369                         conn->ibc_outstanding_credits++;
370
371                 kiblnd_check_sends_locked(conn);
372                 spin_unlock(&conn->ibc_lock);
373         }
374
375         switch (msg->ibm_type) {
376         default:
377                 CERROR("Bad IBLND message type %x from %s\n",
378                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
379                 post_credit = IBLND_POSTRX_NO_CREDIT;
380                 rc = -EPROTO;
381                 break;
382
383         case IBLND_MSG_NOOP:
384                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
385                         post_credit = IBLND_POSTRX_NO_CREDIT;
386                         break;
387                 }
388
389                 if (credits != 0) /* credit already posted */
390                         post_credit = IBLND_POSTRX_NO_CREDIT;
391                 else              /* a keepalive NOOP */
392                         post_credit = IBLND_POSTRX_PEER_CREDIT;
393                 break;
394
395         case IBLND_MSG_IMMEDIATE:
396                 post_credit = IBLND_POSTRX_DONT_POST;
397                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
398                                 msg->ibm_srcnid, rx, 0);
399                 if (rc < 0)                     /* repost on error */
400                         post_credit = IBLND_POSTRX_PEER_CREDIT;
401                 break;
402
403         case IBLND_MSG_PUT_REQ:
404                 post_credit = IBLND_POSTRX_DONT_POST;
405                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
406                                 msg->ibm_srcnid, rx, 1);
407                 if (rc < 0)                     /* repost on error */
408                         post_credit = IBLND_POSTRX_PEER_CREDIT;
409                 break;
410
411         case IBLND_MSG_PUT_NAK:
412                 CWARN ("PUT_NACK from %s\n",
413                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
414                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
415                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
416                                          msg->ibm_u.completion.ibcm_status,
417                                          msg->ibm_u.completion.ibcm_cookie);
418                 break;
419
420         case IBLND_MSG_PUT_ACK:
421                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
422
423                 spin_lock(&conn->ibc_lock);
424                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
425                                         msg->ibm_u.putack.ibpam_src_cookie);
426                 if (tx != NULL)
427                         list_del(&tx->tx_list);
428                 spin_unlock(&conn->ibc_lock);
429
430                 if (tx == NULL) {
431                         CERROR("Unmatched PUT_ACK from %s\n",
432                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
433                         rc = -EPROTO;
434                         break;
435                 }
436
437                 LASSERT (tx->tx_waiting);
438                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
439                  * (a) I can overwrite tx_msg since my peer_ni has received it!
440                  * (b) tx_waiting set tells tx_complete() it's not done. */
441
442                 tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
443
444                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
445                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
446                                        &msg->ibm_u.putack.ibpam_rd,
447                                        msg->ibm_u.putack.ibpam_dst_cookie);
448                 if (rc2 < 0)
449                         CERROR("Can't setup rdma for PUT to %s: %d\n",
450                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
451
452                 spin_lock(&conn->ibc_lock);
453                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
454                 kiblnd_queue_tx_locked(tx, conn);
455                 spin_unlock(&conn->ibc_lock);
456                 break;
457
458         case IBLND_MSG_PUT_DONE:
459                 post_credit = IBLND_POSTRX_PEER_CREDIT;
460                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
461                                          msg->ibm_u.completion.ibcm_status,
462                                          msg->ibm_u.completion.ibcm_cookie);
463                 break;
464
465         case IBLND_MSG_GET_REQ:
466                 post_credit = IBLND_POSTRX_DONT_POST;
467                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
468                                 msg->ibm_srcnid, rx, 1);
469                 if (rc < 0)                     /* repost on error */
470                         post_credit = IBLND_POSTRX_PEER_CREDIT;
471                 break;
472
473         case IBLND_MSG_GET_DONE:
474                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
475                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
476                                          msg->ibm_u.completion.ibcm_status,
477                                          msg->ibm_u.completion.ibcm_cookie);
478                 break;
479         }
480
481         if (rc < 0)                             /* protocol error */
482                 kiblnd_close_conn(conn, rc);
483
484         if (post_credit != IBLND_POSTRX_DONT_POST)
485                 kiblnd_post_rx(rx, post_credit);
486 }
487
488 static void
489 kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
490 {
491         struct kib_msg *msg = rx->rx_msg;
492         struct kib_conn   *conn = rx->rx_conn;
493         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
494         struct kib_net *net = ni->ni_data;
495         int rc;
496         int err = -EIO;
497
498         LASSERT (net != NULL);
499         LASSERT (rx->rx_nob < 0);               /* was posted */
500         rx->rx_nob = 0;                         /* isn't now */
501
502         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
503                 goto ignore;
504
505         if (status != IB_WC_SUCCESS) {
506                 CNETERR("Rx from %s failed: %d\n",
507                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
508                 goto failed;
509         }
510
511         LASSERT (nob >= 0);
512         rx->rx_nob = nob;
513
514         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
515         if (rc != 0) {
516                 CERROR ("Error %d unpacking rx from %s\n",
517                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
518                 goto failed;
519         }
520
521         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
522             msg->ibm_dstnid != ni->ni_nid ||
523             msg->ibm_srcstamp != conn->ibc_incarnation ||
524             msg->ibm_dststamp != net->ibn_incarnation) {
525                 CERROR ("Stale rx from %s\n",
526                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
527                 err = -ESTALE;
528                 goto failed;
529         }
530
531         /* set time last known alive */
532         kiblnd_peer_alive(conn->ibc_peer);
533
534         /* racing with connection establishment/teardown! */
535
536         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
537                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
538                 unsigned long  flags;
539
540                 write_lock_irqsave(g_lock, flags);
541                 /* must check holding global lock to eliminate race */
542                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
543                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
544                         write_unlock_irqrestore(g_lock, flags);
545                         return;
546                 }
547                 write_unlock_irqrestore(g_lock, flags);
548         }
549         kiblnd_handle_rx(rx);
550         return;
551
552  failed:
553         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
554         kiblnd_close_conn(conn, err);
555  ignore:
556         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
557 }
558
559 static int
560 kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
561                   struct kib_rdma_desc *rd, u32 nob)
562 {
563         struct kib_hca_dev *hdev;
564         struct kib_dev *dev;
565         struct kib_fmr_poolset *fps;
566         int                     cpt;
567         int                     rc;
568         int i;
569
570         LASSERT(tx->tx_pool != NULL);
571         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
572
573         dev = net->ibn_dev;
574         hdev = tx->tx_pool->tpo_hdev;
575         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
576
577         /*
578          * If we're dealing with FastReg, but the device doesn't
579          * support GAPS and the tx has GAPS, then there is no real point
580          * in trying to map the memory, because it'll just fail. So
581          * preemptively fail with an appropriate message
582          */
583         if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) &&
584             !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
585             tx->tx_gaps) {
586                 CERROR("Using FastReg with no GAPS support, but tx has gaps. "
587                        "Try setting use_fastreg_gaps to 1\n");
588                 return -EPROTONOSUPPORT;
589         }
590
591 #ifdef HAVE_FMR_POOL_API
592         /*
593          * FMR does not support gaps but the tx has gaps then
594          * we should make sure that the number of fragments we'll be sending
595          * over fits within the number of fragments negotiated on the
596          * connection, otherwise, we won't be able to RDMA the data.
597          * We need to maintain the number of fragments negotiation on the
598          * connection for backwards compatibility.
599          */
600         if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
601                 if (tx->tx_conn &&
602                     tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
603                         CERROR("TX number of frags (%d) is <= than connection"
604                                " number of frags (%d). Consider setting peer's"
605                                " map_on_demand to 256\n", tx->tx_nfrags,
606                                tx->tx_conn->ibc_max_frags);
607                         return -EFBIG;
608                 }
609         }
610 #endif
611
612         fps = net->ibn_fmr_ps[cpt];
613         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
614         if (rc != 0) {
615                 CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
616                        tx->tx_nfrags, rd->rd_nfrags, rc);
617                 return rc;
618         }
619
620         /*
621          * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
622          * need the rkey
623          */
624         rd->rd_key = tx->tx_fmr.fmr_key;
625         /*
626          * for FastReg or FMR with no gaps we can accumulate all
627          * the fragments in one FastReg or FMR fragment.
628          */
629         if (
630 #ifdef HAVE_FMR_POOL_API
631             ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
632              && !tx->tx_gaps) ||
633 #endif
634             (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
635                 /* FMR requires zero based address */
636 #ifdef HAVE_FMR_POOL_API
637                 if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
638                         rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
639 #endif
640                 rd->rd_frags[0].rf_nob = nob;
641                 rd->rd_nfrags = 1;
642         } else {
643                 /*
644                  * We're transmitting with gaps using FMR.
645                  * We'll need to use multiple fragments and identify the
646                  * zero based address of each fragment.
647                  */
648                 for (i = 0; i < rd->rd_nfrags; i++) {
649                         rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
650                         rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
651                 }
652         }
653
654         return 0;
655 }
656
657 static void
658 kiblnd_unmap_tx(struct kib_tx *tx)
659 {
660         if (
661 #ifdef HAVE_FMR_POOL_API
662                 tx->tx_fmr.fmr_pfmr ||
663 #endif
664                 tx->tx_fmr.fmr_frd)
665                 kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
666
667         if (tx->tx_nfrags != 0) {
668                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
669                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
670                 tx->tx_nfrags = 0;
671         }
672 }
673
674 #ifdef HAVE_IB_GET_DMA_MR
675 static struct ib_mr *
676 kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
677 {
678         struct kib_net *net = ni->ni_data;
679         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
680         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
681
682         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
683
684         /*
685          * if map-on-demand is turned on and the device supports
686          * either FMR or FastReg then use that. Otherwise use global
687          * memory regions. If that's not available either, then you're
688          * dead in the water and fail the operation.
689          */
690         if (tunables->lnd_map_on_demand &&
691             (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED
692 #ifdef HAVE_FMR_POOL_API
693              || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
694 #endif
695         ))
696                 return NULL;
697
698         /*
699          * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
700          * in the call chain. The mapping will fail with appropriate error
701          * message.
702          */
703         return hdev->ibh_mrs;
704 }
705 #endif
706
707 static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
708                          struct kib_rdma_desc *rd, int nfrags)
709 {
710         struct kib_net *net = ni->ni_data;
711         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
712 #ifdef HAVE_IB_GET_DMA_MR
713         struct ib_mr *mr = NULL;
714 #endif
715         __u32 nob;
716         int i;
717
718         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
719          * RDMA sink */
720         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
721         tx->tx_nfrags = nfrags;
722
723         rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
724                                           tx->tx_nfrags, tx->tx_dmadir);
725
726         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
727                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
728                         hdev->ibh_ibdev, &tx->tx_frags[i]);
729                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
730                         hdev->ibh_ibdev, &tx->tx_frags[i]);
731                 nob += rd->rd_frags[i].rf_nob;
732         }
733
734 #ifdef HAVE_IB_GET_DMA_MR
735         mr = kiblnd_find_rd_dma_mr(ni, rd);
736         if (mr != NULL) {
737                 /* found pre-mapping MR */
738                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
739                 return 0;
740         }
741 #endif
742
743         if (net->ibn_fmr_ps != NULL)
744                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
745
746         return -EINVAL;
747 }
748
749 static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
750                                 struct kib_rdma_desc *rd, int nkiov,
751                                 struct bio_vec *kiov, int offset, int nob)
752 {
753         struct kib_net *net = ni->ni_data;
754         struct scatterlist *sg;
755         int                 fragnob;
756         int                 max_nkiov;
757
758         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
759
760         LASSERT(nob > 0);
761         LASSERT(nkiov > 0);
762         LASSERT(net != NULL);
763
764         while (offset >= kiov->bv_len) {
765                 offset -= kiov->bv_len;
766                 nkiov--;
767                 kiov++;
768                 LASSERT(nkiov > 0);
769         }
770
771         max_nkiov = nkiov;
772
773         sg = tx->tx_frags;
774         do {
775                 LASSERT(nkiov > 0);
776
777                 fragnob = min((int)(kiov->bv_len - offset), nob);
778
779                 /*
780                  * We're allowed to start at a non-aligned page offset in
781                  * the first fragment and end at a non-aligned page offset
782                  * in the last fragment.
783                  */
784                 if ((fragnob < (int)(kiov->bv_len - offset)) &&
785                     nkiov < max_nkiov && nob > fragnob) {
786                         CDEBUG(D_NET, "fragnob %d < available page %d: with"
787                                       " remaining %d kiovs with %d nob left\n",
788                                fragnob, (int)(kiov->bv_len - offset),
789                                nkiov, nob);
790                         tx->tx_gaps = true;
791                 }
792
793                 sg_set_page(sg, kiov->bv_page, fragnob,
794                             kiov->bv_offset + offset);
795                 sg = sg_next(sg);
796                 if (!sg) {
797                         CERROR("lacking enough sg entries to map tx\n");
798                         return -EFAULT;
799                 }
800
801                 offset = 0;
802                 kiov++;
803                 nkiov--;
804                 nob -= fragnob;
805         } while (nob > 0);
806
807         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
808 }
809
810 static int
811 kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
812 __must_hold(&conn->ibc_lock)
813 {
814         struct kib_msg *msg = tx->tx_msg;
815         struct kib_peer_ni *peer_ni = conn->ibc_peer;
816         struct lnet_ni *ni = peer_ni->ibp_ni;
817         int ver = conn->ibc_version;
818         int rc;
819         int done;
820
821         LASSERT(tx->tx_queued);
822         /* We rely on this for QP sizing */
823         LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
824         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
825
826         LASSERT(credit == 0 || credit == 1);
827         LASSERT(conn->ibc_outstanding_credits >= 0);
828         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
829         LASSERT(conn->ibc_credits >= 0);
830         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
831
832         if (conn->ibc_nsends_posted ==
833             kiblnd_concurrent_sends(ver, ni)) {
834                 /* tx completions outstanding... */
835                 CDEBUG(D_NET, "%s: posted enough\n",
836                        libcfs_nid2str(peer_ni->ibp_nid));
837                 return -EAGAIN;
838         }
839
840         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
841                 CDEBUG(D_NET, "%s: no credits\n",
842                        libcfs_nid2str(peer_ni->ibp_nid));
843                 return -EAGAIN;
844         }
845
846         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
847             conn->ibc_credits == 1 &&   /* last credit reserved */
848             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
849                 CDEBUG(D_NET, "%s: not using last credit\n",
850                        libcfs_nid2str(peer_ni->ibp_nid));
851                 return -EAGAIN;
852         }
853
854         /* NB don't drop ibc_lock before bumping tx_sending */
855         list_del(&tx->tx_list);
856         tx->tx_queued = 0;
857
858         if (msg->ibm_type == IBLND_MSG_NOOP &&
859             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
860              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
861               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
862                 /* OK to drop when posted enough NOOPs, since
863                  * kiblnd_check_sends_locked will queue NOOP again when
864                  * posted NOOPs complete */
865                 spin_unlock(&conn->ibc_lock);
866                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
867                 kiblnd_tx_done(tx);
868                 spin_lock(&conn->ibc_lock);
869                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
870                        libcfs_nid2str(peer_ni->ibp_nid),
871                        conn->ibc_noops_posted);
872                 return 0;
873         }
874
875         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
876                         peer_ni->ibp_nid, conn->ibc_incarnation);
877
878         conn->ibc_credits -= credit;
879         conn->ibc_outstanding_credits = 0;
880         conn->ibc_nsends_posted++;
881         if (msg->ibm_type == IBLND_MSG_NOOP)
882                 conn->ibc_noops_posted++;
883
884         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
885          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
886          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
887          * and then re-queued here.  It's (just) possible that
888          * tx_sending is non-zero if we've not done the tx_complete()
889          * from the first send; hence the ++ rather than = below. */
890         tx->tx_sending++;
891         list_add(&tx->tx_list, &conn->ibc_active_txs);
892
893         /* I'm still holding ibc_lock! */
894         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
895                 rc = -ECONNABORTED;
896         } else if (tx->tx_pool->tpo_pool.po_failed ||
897                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
898                 /* close_conn will launch failover */
899                 rc = -ENETDOWN;
900         } else {
901                 struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
902                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
903                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
904
905                 if (frd != NULL) {
906                         if (!frd->frd_valid) {
907                                 wr = &frd->frd_inv_wr.wr;
908                                 wr->next = &frd->frd_fastreg_wr.wr;
909                         } else {
910                                 wr = &frd->frd_fastreg_wr.wr;
911                         }
912                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
913                 }
914
915                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
916                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
917                          bad->wr_id, bad->opcode, bad->send_flags,
918                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
919
920                 bad = NULL;
921                 if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
922                         rc = -EINVAL;
923                 else
924 #ifdef HAVE_IB_POST_SEND_RECV_CONST
925                         rc = ib_post_send(conn->ibc_cmid->qp, wr,
926                                           (const struct ib_send_wr **)&bad);
927 #else
928                         rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
929 #endif
930         }
931
932         conn->ibc_last_send = ktime_get();
933
934         if (rc == 0)
935                 return 0;
936
937         /* NB credits are transferred in the actual
938          * message, which can only be the last work item */
939         conn->ibc_credits += credit;
940         conn->ibc_outstanding_credits += msg->ibm_credits;
941         conn->ibc_nsends_posted--;
942         if (msg->ibm_type == IBLND_MSG_NOOP)
943                 conn->ibc_noops_posted--;
944
945         tx->tx_status = rc;
946         tx->tx_waiting = 0;
947         tx->tx_sending--;
948
949         done = (tx->tx_sending == 0);
950         if (done)
951                 list_del(&tx->tx_list);
952
953         spin_unlock(&conn->ibc_lock);
954
955         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
956                 CERROR("Error %d posting transmit to %s\n",
957                        rc, libcfs_nid2str(peer_ni->ibp_nid));
958         else
959                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
960                        rc, libcfs_nid2str(peer_ni->ibp_nid));
961
962         kiblnd_close_conn(conn, rc);
963
964         if (done)
965                 kiblnd_tx_done(tx);
966
967         spin_lock(&conn->ibc_lock);
968
969         return -EIO;
970 }
971
972 static void
973 kiblnd_check_sends_locked(struct kib_conn *conn)
974 {
975         int ver = conn->ibc_version;
976         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
977         struct kib_tx *tx;
978
979         /* Don't send anything until after the connection is established */
980         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
981                 CDEBUG(D_NET, "%s too soon\n",
982                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
983                 return;
984         }
985
986         LASSERT(conn->ibc_nsends_posted <=
987                 kiblnd_concurrent_sends(ver, ni));
988         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
989                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
990         LASSERT (conn->ibc_reserved_credits >= 0);
991
992         while (conn->ibc_reserved_credits > 0 &&
993                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
994                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
995                                 struct kib_tx, tx_list);
996                 list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
997                 conn->ibc_reserved_credits--;
998         }
999
1000         if (kiblnd_need_noop(conn)) {
1001                 spin_unlock(&conn->ibc_lock);
1002
1003                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1004                 if (tx != NULL)
1005                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
1006
1007                 spin_lock(&conn->ibc_lock);
1008                 if (tx != NULL)
1009                         kiblnd_queue_tx_locked(tx, conn);
1010         }
1011
1012         for (;;) {
1013                 int credit;
1014
1015                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
1016                         credit = 0;
1017                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
1018                                         struct kib_tx, tx_list);
1019                 } else if (!list_empty(&conn->ibc_tx_noops)) {
1020                         LASSERT (!IBLND_OOB_CAPABLE(ver));
1021                         credit = 1;
1022                         tx = list_entry(conn->ibc_tx_noops.next,
1023                                         struct kib_tx, tx_list);
1024                 } else if (!list_empty(&conn->ibc_tx_queue)) {
1025                         credit = 1;
1026                         tx = list_entry(conn->ibc_tx_queue.next,
1027                                         struct kib_tx, tx_list);
1028                 } else
1029                         break;
1030
1031                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
1032                         break;
1033         }
1034 }
1035
1036 static void
1037 kiblnd_tx_complete(struct kib_tx *tx, int status)
1038 {
1039         int           failed = (status != IB_WC_SUCCESS);
1040         struct kib_conn   *conn = tx->tx_conn;
1041         int           idle;
1042
1043         if (tx->tx_sending <= 0) {
1044                 CERROR("Received an event on a freed tx: %p status %d\n",
1045                        tx, tx->tx_status);
1046                 return;
1047         }
1048
1049         if (failed) {
1050                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
1051                         CNETERR("Tx -> %s cookie %#llx"
1052                                 " sending %d waiting %d: failed %d\n",
1053                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1054                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1055                                 status);
1056
1057                 kiblnd_close_conn(conn, -EIO);
1058         } else {
1059                 kiblnd_peer_alive(conn->ibc_peer);
1060         }
1061
1062         spin_lock(&conn->ibc_lock);
1063
1064         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
1065          * gets to free it, which also drops its ref on 'conn'. */
1066
1067         tx->tx_sending--;
1068         conn->ibc_nsends_posted--;
1069         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1070                 conn->ibc_noops_posted--;
1071
1072         if (failed) {
1073                 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
1074                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
1075                 tx->tx_status = -EIO;
1076         }
1077
1078         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1079                !tx->tx_waiting &&               /* Not waiting for peer_ni */
1080                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1081         if (idle)
1082                 list_del(&tx->tx_list);
1083
1084         kiblnd_check_sends_locked(conn);
1085         spin_unlock(&conn->ibc_lock);
1086
1087         if (idle)
1088                 kiblnd_tx_done(tx);
1089 }
1090
1091 static void
1092 kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
1093                    int body_nob)
1094 {
1095         struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
1096         struct ib_sge *sge = &tx->tx_msgsge;
1097         struct ib_rdma_wr *wrq;
1098         int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1099 #ifdef HAVE_IB_GET_DMA_MR
1100         struct ib_mr *mr = hdev->ibh_mrs;
1101 #endif
1102
1103         LASSERT(tx->tx_nwrq >= 0);
1104         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1105         LASSERT(nob <= IBLND_MSG_SIZE);
1106 #ifdef HAVE_IB_GET_DMA_MR
1107         LASSERT(mr != NULL);
1108 #endif
1109
1110         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1111
1112 #ifdef HAVE_IB_GET_DMA_MR
1113         sge->lkey   = mr->lkey;
1114 #else
1115         sge->lkey   = hdev->ibh_pd->local_dma_lkey;
1116 #endif
1117         sge->addr   = tx->tx_msgaddr;
1118         sge->length = nob;
1119
1120         wrq = &tx->tx_wrq[tx->tx_nwrq];
1121         memset(wrq, 0, sizeof(*wrq));
1122
1123         wrq->wr.next            = NULL;
1124         wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1125         wrq->wr.sg_list         = sge;
1126         wrq->wr.num_sge         = 1;
1127         wrq->wr.opcode          = IB_WR_SEND;
1128         wrq->wr.send_flags      = IB_SEND_SIGNALED;
1129
1130         tx->tx_nwrq++;
1131 }
1132
1133 static int
1134 kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
1135                  int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
1136 {
1137         struct kib_msg *ibmsg = tx->tx_msg;
1138         struct kib_rdma_desc *srcrd = tx->tx_rd;
1139         struct ib_rdma_wr *wrq = NULL;
1140         struct ib_sge     *sge;
1141         int                rc  = resid;
1142         int                srcidx;
1143         int                dstidx;
1144         int                sge_nob;
1145         int                wrq_sge;
1146
1147         LASSERT(!in_interrupt());
1148         LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
1149         LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
1150
1151         for (srcidx = dstidx = wrq_sge = sge_nob = 0;
1152              resid > 0; resid -= sge_nob) {
1153                 int     prev = dstidx;
1154
1155                 if (srcidx >= srcrd->rd_nfrags) {
1156                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1157                         rc = -EPROTO;
1158                         break;
1159                 }
1160
1161                 if (dstidx >= dstrd->rd_nfrags) {
1162                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1163                         rc = -EPROTO;
1164                         break;
1165                 }
1166
1167                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1168                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1169                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1170                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1171                                conn->ibc_max_frags,
1172                                srcidx, srcrd->rd_nfrags,
1173                                dstidx, dstrd->rd_nfrags);
1174                         rc = -EMSGSIZE;
1175                         break;
1176                 }
1177
1178                 sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
1179                                kiblnd_rd_frag_size(dstrd, dstidx),
1180                                resid);
1181
1182                 sge = &tx->tx_sge[tx->tx_nsge];
1183                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1184                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1185                 sge->length = sge_nob;
1186
1187                 if (wrq_sge == 0) {
1188                         wrq = &tx->tx_wrq[tx->tx_nwrq];
1189
1190                         wrq->wr.next    = &(wrq + 1)->wr;
1191                         wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1192                         wrq->wr.sg_list = sge;
1193                         wrq->wr.opcode  = IB_WR_RDMA_WRITE;
1194                         wrq->wr.send_flags = 0;
1195
1196 #ifdef HAVE_IB_RDMA_WR
1197                         wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
1198                                                                       dstidx);
1199                         wrq->rkey               = kiblnd_rd_frag_key(dstrd,
1200                                                                      dstidx);
1201 #else
1202                         wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
1203                                                                         dstidx);
1204                         wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
1205                                                                      dstidx);
1206 #endif
1207                 }
1208
1209                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
1210                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
1211
1212                 wrq_sge++;
1213                 if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
1214                         tx->tx_nwrq++;
1215                         wrq->wr.num_sge = wrq_sge;
1216                         wrq_sge = 0;
1217                 }
1218                 tx->tx_nsge++;
1219         }
1220
1221         if (rc < 0)     /* no RDMA if completing with failure */
1222                 tx->tx_nwrq = tx->tx_nsge = 0;
1223
1224         ibmsg->ibm_u.completion.ibcm_status = rc;
1225         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1226         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1227                            type, sizeof(struct kib_completion_msg));
1228
1229         return rc;
1230 }
1231
1232 static void
1233 kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
1234 {
1235         struct list_head *q;
1236         s64 timeout_ns;
1237
1238         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1239         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1240         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1241
1242         if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
1243                 tx->tx_status = -ECONNABORTED;
1244                 tx->tx_waiting = 0;
1245                 if (tx->tx_conn != NULL) {
1246                         /* PUT_DONE first attached to conn as a PUT_REQ */
1247                         LASSERT(tx->tx_conn == conn);
1248                         LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1249                         tx->tx_conn = NULL;
1250                         kiblnd_conn_decref(conn);
1251                 }
1252                 list_add(&tx->tx_list, &conn->ibc_zombie_txs);
1253
1254                 return;
1255         }
1256
1257         timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
1258         tx->tx_queued = 1;
1259         tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
1260
1261         if (tx->tx_conn == NULL) {
1262                 kiblnd_conn_addref(conn);
1263                 tx->tx_conn = conn;
1264                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1265         } else {
1266                 /* PUT_DONE first attached to conn as a PUT_REQ */
1267                 LASSERT (tx->tx_conn == conn);
1268                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1269         }
1270
1271         switch (tx->tx_msg->ibm_type) {
1272         default:
1273                 LBUG();
1274
1275         case IBLND_MSG_PUT_REQ:
1276         case IBLND_MSG_GET_REQ:
1277                 q = &conn->ibc_tx_queue_rsrvd;
1278                 break;
1279
1280         case IBLND_MSG_PUT_NAK:
1281         case IBLND_MSG_PUT_ACK:
1282         case IBLND_MSG_PUT_DONE:
1283         case IBLND_MSG_GET_DONE:
1284                 q = &conn->ibc_tx_queue_nocred;
1285                 break;
1286
1287         case IBLND_MSG_NOOP:
1288                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1289                         q = &conn->ibc_tx_queue_nocred;
1290                 else
1291                         q = &conn->ibc_tx_noops;
1292                 break;
1293
1294         case IBLND_MSG_IMMEDIATE:
1295                 q = &conn->ibc_tx_queue;
1296                 break;
1297         }
1298
1299         list_add_tail(&tx->tx_list, q);
1300 }
1301
1302 static void
1303 kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
1304 {
1305         spin_lock(&conn->ibc_lock);
1306         kiblnd_queue_tx_locked(tx, conn);
1307         kiblnd_check_sends_locked(conn);
1308         spin_unlock(&conn->ibc_lock);
1309 }
1310
1311 static int
1312 kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
1313                         struct sockaddr_in *srcaddr,
1314                         struct sockaddr_in *dstaddr,
1315                         int timeout_ms)
1316 {
1317         unsigned short port;
1318         int rc;
1319
1320         /* allow the port to be reused */
1321         rc = rdma_set_reuseaddr(cmid, 1);
1322         if (rc != 0) {
1323                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1324                 return rc;
1325         }
1326
1327         /* look for a free privileged port */
1328         for (port = PROT_SOCK-1; port > 0; port--) {
1329                 srcaddr->sin_port = htons(port);
1330                 rc = rdma_resolve_addr(cmid,
1331                                        (struct sockaddr *)srcaddr,
1332                                        (struct sockaddr *)dstaddr,
1333                                        timeout_ms);
1334                 if (rc == 0) {
1335                         CDEBUG(D_NET, "bound to port %hu\n", port);
1336                         return 0;
1337                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1338                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1339                                port, rc);
1340                 } else {
1341                         return rc;
1342                 }
1343         }
1344
1345         CERROR("cannot bind to a free privileged port: rc = %d\n", rc);
1346
1347         return rc;
1348 }
1349
1350 static int
1351 kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1352                     struct sockaddr_in *srcaddr,
1353                     struct sockaddr_in *dstaddr,
1354                     int timeout_ms)
1355 {
1356         const struct cred *old_creds = NULL;
1357         struct cred *new_creds;
1358         int rc;
1359
1360         if (!capable(CAP_NET_BIND_SERVICE)) {
1361                 new_creds = prepare_kernel_cred(NULL);
1362                 if (!new_creds)
1363                         return -ENOMEM;
1364
1365                 cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
1366                 old_creds = override_creds(new_creds);
1367         }
1368
1369         rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
1370
1371         if (old_creds)
1372                 revert_creds(old_creds);
1373
1374         return rc;
1375 }
1376
1377 static void
1378 kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
1379 {
1380         struct rdma_cm_id *cmid;
1381         struct kib_dev *dev;
1382         struct kib_net *net = peer_ni->ibp_ni->ni_data;
1383         struct sockaddr_in srcaddr;
1384         struct sockaddr_in dstaddr;
1385         int rc;
1386
1387         LASSERT (net != NULL);
1388         LASSERT (peer_ni->ibp_connecting > 0);
1389
1390         cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns,
1391                                      kiblnd_cm_callback, peer_ni,
1392                                      RDMA_PS_TCP, IB_QPT_RC);
1393
1394         if (IS_ERR(cmid)) {
1395                 CERROR("Can't create CMID for %s: %ld\n",
1396                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1397                 rc = PTR_ERR(cmid);
1398                 goto failed;
1399         }
1400
1401         dev = net->ibn_dev;
1402         memset(&srcaddr, 0, sizeof(srcaddr));
1403         srcaddr.sin_family = AF_INET;
1404         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1405
1406         memset(&dstaddr, 0, sizeof(dstaddr));
1407         dstaddr.sin_family = AF_INET;
1408         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1409         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1410
1411         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1412
1413         if (*kiblnd_tunables.kib_use_priv_port) {
1414                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1415                                          kiblnd_timeout() * 1000);
1416         } else {
1417                 rc = rdma_resolve_addr(cmid,
1418                                        (struct sockaddr *)&srcaddr,
1419                                        (struct sockaddr *)&dstaddr,
1420                                        kiblnd_timeout() * 1000);
1421         }
1422         if (rc != 0) {
1423                 /* Can't initiate address resolution:  */
1424                 CERROR("Can't resolve addr for %s: %d\n",
1425                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1426                 goto failed2;
1427         }
1428
1429         return;
1430
1431  failed2:
1432         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1433         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1434         rdma_destroy_id(cmid);
1435         return;
1436  failed:
1437         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1438 }
1439
1440 bool
1441 kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
1442 {
1443         rwlock_t *glock = &kiblnd_data.kib_global_lock;
1444         char *reason = NULL;
1445         LIST_HEAD(txs);
1446         unsigned long flags;
1447
1448         write_lock_irqsave(glock, flags);
1449         if (peer_ni->ibp_reconnecting == 0) {
1450                 if (peer_ni->ibp_accepting)
1451                         reason = "accepting";
1452                 else if (peer_ni->ibp_connecting)
1453                         reason = "connecting";
1454                 else if (!list_empty(&peer_ni->ibp_conns))
1455                         reason = "connected";
1456                 else /* connected then closed */
1457                         reason = "closed";
1458
1459                 goto no_reconnect;
1460         }
1461
1462         if (peer_ni->ibp_accepting)
1463                 CNETERR("Detecting race between accepting and reconnecting\n");
1464         peer_ni->ibp_reconnecting--;
1465
1466         if (!kiblnd_peer_active(peer_ni)) {
1467                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1468                 reason = "unlinked";
1469                 goto no_reconnect;
1470         }
1471
1472         peer_ni->ibp_connecting++;
1473         peer_ni->ibp_reconnected++;
1474
1475         write_unlock_irqrestore(glock, flags);
1476
1477         kiblnd_connect_peer(peer_ni);
1478         return true;
1479
1480  no_reconnect:
1481         write_unlock_irqrestore(glock, flags);
1482
1483         CWARN("Abort reconnection of %s: %s\n",
1484               libcfs_nid2str(peer_ni->ibp_nid), reason);
1485         kiblnd_txlist_done(&txs, -ECONNABORTED,
1486                            LNET_MSG_STATUS_LOCAL_ABORTED);
1487         return false;
1488 }
1489
1490 void
1491 kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
1492 {
1493         struct kib_peer_ni *peer_ni;
1494         struct kib_peer_ni *peer2;
1495         struct kib_conn *conn;
1496         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1497         unsigned long flags;
1498         int rc;
1499         int i;
1500         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1501
1502         /* If I get here, I've committed to send, so I complete the tx with
1503          * failure on any problems
1504          */
1505
1506         LASSERT(!tx || !tx->tx_conn);     /* only set when assigned a conn */
1507         LASSERT(!tx || tx->tx_nwrq > 0);  /* work items have been set up */
1508
1509         /* First time, just use a read lock since I expect to find my peer_ni
1510          * connected
1511          */
1512         read_lock_irqsave(g_lock, flags);
1513
1514         peer_ni = kiblnd_find_peer_locked(ni, nid);
1515         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1516                 /* Found a peer_ni with an established connection */
1517                 conn = kiblnd_get_conn_locked(peer_ni);
1518                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1519
1520                 read_unlock_irqrestore(g_lock, flags);
1521
1522                 if (tx != NULL)
1523                         kiblnd_queue_tx(tx, conn);
1524                 kiblnd_conn_decref(conn); /* ...to here */
1525                 return;
1526         }
1527
1528         read_unlock(g_lock);
1529         /* Re-try with a write lock */
1530         write_lock(g_lock);
1531
1532         peer_ni = kiblnd_find_peer_locked(ni, nid);
1533         if (peer_ni != NULL) {
1534                 if (list_empty(&peer_ni->ibp_conns)) {
1535                         /* found a peer_ni, but it's still connecting... */
1536                         LASSERT(kiblnd_peer_connecting(peer_ni));
1537                         if (tx != NULL)
1538                                 list_add_tail(&tx->tx_list,
1539                                               &peer_ni->ibp_tx_queue);
1540                         write_unlock_irqrestore(g_lock, flags);
1541                 } else {
1542                         conn = kiblnd_get_conn_locked(peer_ni);
1543                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1544
1545                         write_unlock_irqrestore(g_lock, flags);
1546
1547                         if (tx != NULL)
1548                                 kiblnd_queue_tx(tx, conn);
1549                         kiblnd_conn_decref(conn); /* ...to here */
1550                 }
1551                 return;
1552         }
1553
1554         write_unlock_irqrestore(g_lock, flags);
1555
1556         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1557         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1558         if (rc != 0) {
1559                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1560                 if (tx != NULL) {
1561                         tx->tx_status = -EHOSTUNREACH;
1562                         tx->tx_waiting = 0;
1563                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1564                         kiblnd_tx_done(tx);
1565                 }
1566                 return;
1567         }
1568
1569         write_lock_irqsave(g_lock, flags);
1570
1571         peer2 = kiblnd_find_peer_locked(ni, nid);
1572         if (peer2 != NULL) {
1573                 if (list_empty(&peer2->ibp_conns)) {
1574                         /* found a peer_ni, but it's still connecting... */
1575                         LASSERT(kiblnd_peer_connecting(peer2));
1576                         if (tx != NULL)
1577                                 list_add_tail(&tx->tx_list,
1578                                               &peer2->ibp_tx_queue);
1579                         write_unlock_irqrestore(g_lock, flags);
1580                 } else {
1581                         conn = kiblnd_get_conn_locked(peer2);
1582                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1583
1584                         write_unlock_irqrestore(g_lock, flags);
1585
1586                         if (tx != NULL)
1587                                 kiblnd_queue_tx(tx, conn);
1588                         kiblnd_conn_decref(conn); /* ...to here */
1589                 }
1590
1591                 kiblnd_peer_decref(peer_ni);
1592                 return;
1593         }
1594
1595         /* Brand new peer_ni */
1596         LASSERT(peer_ni->ibp_connecting == 0);
1597         tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
1598         peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
1599
1600         /* always called with a ref on ni, which prevents ni being shutdown */
1601         LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
1602
1603         if (tx != NULL)
1604                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1605
1606         kiblnd_peer_addref(peer_ni);
1607         hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
1608
1609         write_unlock_irqrestore(g_lock, flags);
1610
1611         for (i = 0; i < tunables->lnd_conns_per_peer; i++)
1612                 kiblnd_connect_peer(peer_ni);
1613         kiblnd_peer_decref(peer_ni);
1614 }
1615
1616 int
1617 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1618 {
1619         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1620         int               type = lntmsg->msg_type;
1621         struct lnet_process_id target = lntmsg->msg_target;
1622         int               target_is_router = lntmsg->msg_target_is_router;
1623         int               routing = lntmsg->msg_routing;
1624         unsigned int      payload_niov = lntmsg->msg_niov;
1625         struct bio_vec   *payload_kiov = lntmsg->msg_kiov;
1626         unsigned int      payload_offset = lntmsg->msg_offset;
1627         unsigned int      payload_nob = lntmsg->msg_len;
1628         struct kib_msg *ibmsg;
1629         struct kib_rdma_desc *rd;
1630         struct kib_tx *tx;
1631         int               nob;
1632         int               rc;
1633
1634         /* NB 'private' is different depending on what we're sending.... */
1635
1636         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1637                payload_nob, payload_niov, libcfs_id2str(target));
1638
1639         LASSERT (payload_nob == 0 || payload_niov > 0);
1640         LASSERT (payload_niov <= LNET_MAX_IOV);
1641
1642         /* Thread context */
1643         LASSERT (!in_interrupt());
1644
1645         switch (type) {
1646         default:
1647                 LBUG();
1648                 return (-EIO);
1649
1650         case LNET_MSG_ACK:
1651                 LASSERT (payload_nob == 0);
1652                 break;
1653
1654         case LNET_MSG_GET:
1655                 if (routing || target_is_router)
1656                         break;                  /* send IMMEDIATE */
1657
1658                 /* is the REPLY message too small for RDMA? */
1659                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1660                 if (nob <= IBLND_MSG_SIZE)
1661                         break;                  /* send IMMEDIATE */
1662
1663                 tx = kiblnd_get_idle_tx(ni, target.nid);
1664                 if (tx == NULL) {
1665                         CERROR("Can't allocate txd for GET to %s\n",
1666                                libcfs_nid2str(target.nid));
1667                         return -ENOMEM;
1668                 }
1669
1670                 ibmsg = tx->tx_msg;
1671                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1672                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1673                                           lntmsg->msg_md->md_niov,
1674                                           lntmsg->msg_md->md_kiov,
1675                                           0, lntmsg->msg_md->md_length);
1676                 if (rc != 0) {
1677                         CERROR("Can't setup GET sink for %s: %d\n",
1678                                libcfs_nid2str(target.nid), rc);
1679                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1680                         kiblnd_tx_done(tx);
1681                         return -EIO;
1682                 }
1683
1684                 nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
1685                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1686                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1687
1688                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1689
1690                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1691                 if (tx->tx_lntmsg[1] == NULL) {
1692                         CERROR("Can't create reply for GET -> %s\n",
1693                                libcfs_nid2str(target.nid));
1694                         kiblnd_tx_done(tx);
1695                         return -EIO;
1696                 }
1697
1698                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1699                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1700                 kiblnd_launch_tx(ni, tx, target.nid);
1701                 return 0;
1702
1703         case LNET_MSG_REPLY:
1704         case LNET_MSG_PUT:
1705                 /* Is the payload small enough not to need RDMA? */
1706                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
1707                 if (nob <= IBLND_MSG_SIZE)
1708                         break;                  /* send IMMEDIATE */
1709
1710                 tx = kiblnd_get_idle_tx(ni, target.nid);
1711                 if (tx == NULL) {
1712                         CERROR("Can't allocate %s txd for %s\n",
1713                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1714                                libcfs_nid2str(target.nid));
1715                         return -ENOMEM;
1716                 }
1717
1718                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1719                                           payload_niov, payload_kiov,
1720                                           payload_offset, payload_nob);
1721                 if (rc != 0) {
1722                         CERROR("Can't setup PUT src for %s: %d\n",
1723                                libcfs_nid2str(target.nid), rc);
1724                         kiblnd_tx_done(tx);
1725                         return -EIO;
1726                 }
1727
1728                 ibmsg = tx->tx_msg;
1729                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1730                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1731                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
1732                                    sizeof(struct kib_putreq_msg));
1733
1734                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1735                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1736                 kiblnd_launch_tx(ni, tx, target.nid);
1737                 return 0;
1738         }
1739
1740         /* send IMMEDIATE */
1741         LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
1742                 <= IBLND_MSG_SIZE);
1743
1744         tx = kiblnd_get_idle_tx(ni, target.nid);
1745         if (tx == NULL) {
1746                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1747                         type, libcfs_nid2str(target.nid));
1748                 return -ENOMEM;
1749         }
1750
1751         ibmsg = tx->tx_msg;
1752         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1753
1754         lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1755                             offsetof(struct kib_msg,
1756                                      ibm_u.immediate.ibim_payload),
1757                             payload_niov, payload_kiov,
1758                             payload_offset, payload_nob);
1759
1760         nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
1761         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1762
1763         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1764         kiblnd_launch_tx(ni, tx, target.nid);
1765         return 0;
1766 }
1767
1768 static void
1769 kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
1770 {
1771         struct lnet_process_id target = lntmsg->msg_target;
1772         unsigned int niov = lntmsg->msg_niov;
1773         struct bio_vec *kiov = lntmsg->msg_kiov;
1774         unsigned int offset = lntmsg->msg_offset;
1775         unsigned int nob = lntmsg->msg_len;
1776         struct kib_tx *tx;
1777         int rc;
1778
1779         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1780         if (tx == NULL) {
1781                 CERROR("Can't get tx for REPLY to %s\n",
1782                        libcfs_nid2str(target.nid));
1783                 goto failed_0;
1784         }
1785
1786         if (nob == 0)
1787                 rc = 0;
1788         else
1789                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1790                                           niov, kiov, offset, nob);
1791
1792         if (rc != 0) {
1793                 CERROR("Can't setup GET src for %s: %d\n",
1794                        libcfs_nid2str(target.nid), rc);
1795                 goto failed_1;
1796         }
1797
1798         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1799                               IBLND_MSG_GET_DONE, nob,
1800                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1801                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1802         if (rc < 0) {
1803                 CERROR("Can't setup rdma for GET from %s: %d\n",
1804                        libcfs_nid2str(target.nid), rc);
1805                 goto failed_1;
1806         }
1807
1808         if (nob == 0) {
1809                 /* No RDMA: local completion may happen now! */
1810                 lnet_finalize(lntmsg, 0);
1811         } else {
1812                 /* RDMA: lnet_finalize(lntmsg) when it
1813                  * completes */
1814                 tx->tx_lntmsg[0] = lntmsg;
1815         }
1816
1817         kiblnd_queue_tx(tx, rx->rx_conn);
1818         return;
1819
1820
1821 failed_1:
1822         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1823         kiblnd_tx_done(tx);
1824 failed_0:
1825         lnet_finalize(lntmsg, -EIO);
1826 }
1827
1828 int
1829 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1830             int delayed, unsigned int niov, struct bio_vec *kiov,
1831             unsigned int offset, unsigned int mlen, unsigned int rlen)
1832 {
1833         struct kib_rx *rx = private;
1834         struct kib_msg *rxmsg = rx->rx_msg;
1835         struct kib_conn *conn = rx->rx_conn;
1836         struct kib_tx *tx;
1837         __u64        ibprm_cookie;
1838         int          nob;
1839         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1840         int          rc = 0;
1841
1842         LASSERT (mlen <= rlen);
1843         LASSERT (!in_interrupt());
1844
1845         switch (rxmsg->ibm_type) {
1846         default:
1847                 LBUG();
1848
1849         case IBLND_MSG_IMMEDIATE:
1850                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
1851                 if (nob > rx->rx_nob) {
1852                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1853                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1854                                 nob, rx->rx_nob);
1855                         rc = -EPROTO;
1856                         break;
1857                 }
1858
1859                 lnet_copy_flat2kiov(niov, kiov, offset,
1860                                     IBLND_MSG_SIZE, rxmsg,
1861                                     offsetof(struct kib_msg,
1862                                              ibm_u.immediate.ibim_payload),
1863                                     mlen);
1864                 lnet_finalize(lntmsg, 0);
1865                 break;
1866
1867         case IBLND_MSG_PUT_REQ: {
1868                 struct kib_msg  *txmsg;
1869                 struct kib_rdma_desc *rd;
1870                 ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1871
1872                 if (mlen == 0) {
1873                         lnet_finalize(lntmsg, 0);
1874                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1875                                                0, ibprm_cookie);
1876                         break;
1877                 }
1878
1879                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1880                 if (tx == NULL) {
1881                         CERROR("Can't allocate tx for %s\n",
1882                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1883                         /* Not replying will break the connection */
1884                         rc = -ENOMEM;
1885                         break;
1886                 }
1887
1888                 txmsg = tx->tx_msg;
1889                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1890                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1891                                           niov, kiov, offset, mlen);
1892                 if (rc != 0) {
1893                         CERROR("Can't setup PUT sink for %s: %d\n",
1894                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1895                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1896                         kiblnd_tx_done(tx);
1897                         /* tell peer_ni it's over */
1898                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1899                                                rc, ibprm_cookie);
1900                         break;
1901                 }
1902
1903                 nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
1904                 txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
1905                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1906
1907                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1908
1909                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1910                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1911                 kiblnd_queue_tx(tx, conn);
1912
1913                 /* reposted buffer reserved for PUT_DONE */
1914                 post_credit = IBLND_POSTRX_NO_CREDIT;
1915                 break;
1916                 }
1917
1918         case IBLND_MSG_GET_REQ:
1919                 if (lntmsg != NULL) {
1920                         /* Optimized GET; RDMA lntmsg's payload */
1921                         kiblnd_reply(ni, rx, lntmsg);
1922                 } else {
1923                         /* GET didn't match anything */
1924                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1925                                                -ENODATA,
1926                                                rxmsg->ibm_u.get.ibgm_cookie);
1927                 }
1928                 break;
1929         }
1930
1931         kiblnd_post_rx(rx, post_credit);
1932         return rc;
1933 }
1934
1935 int
1936 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1937 {
1938         struct task_struct *task = kthread_run(fn, arg, "%s", name);
1939
1940         if (IS_ERR(task))
1941                 return PTR_ERR(task);
1942
1943         atomic_inc(&kiblnd_data.kib_nthreads);
1944         return 0;
1945 }
1946
1947 static void
1948 kiblnd_thread_fini (void)
1949 {
1950         atomic_dec (&kiblnd_data.kib_nthreads);
1951 }
1952
1953 static void
1954 kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
1955 {
1956         /* This is racy, but everyone's only writing ktime_get_seconds() */
1957         peer_ni->ibp_last_alive = ktime_get_seconds();
1958         smp_mb();
1959 }
1960
1961 static void
1962 kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
1963 {
1964         int           error = 0;
1965         time64_t last_alive = 0;
1966         unsigned long flags;
1967
1968         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1969
1970         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
1971                 error = peer_ni->ibp_error;
1972                 peer_ni->ibp_error = 0;
1973
1974                 last_alive = peer_ni->ibp_last_alive;
1975         }
1976
1977         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1978
1979         if (error != 0)
1980                 lnet_notify(peer_ni->ibp_ni,
1981                             peer_ni->ibp_nid, false, false, last_alive);
1982 }
1983
1984 void
1985 kiblnd_close_conn_locked(struct kib_conn *conn, int error)
1986 {
1987         /* This just does the immediate housekeeping.  'error' is zero for a
1988          * normal shutdown which can happen only after the connection has been
1989          * established.  If the connection is established, schedule the
1990          * connection to be finished off by the connd.  Otherwise the connd is
1991          * already dealing with it (either to set it up or tear it down).
1992          * Caller holds kib_global_lock exclusively in irq context */
1993         struct kib_peer_ni *peer_ni = conn->ibc_peer;
1994         struct kib_dev *dev;
1995         unsigned long flags;
1996
1997         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1998
1999         if (error != 0 && conn->ibc_comms_error == 0)
2000                 conn->ibc_comms_error = error;
2001
2002         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
2003                 return; /* already being handled  */
2004
2005         if (error == 0 &&
2006             list_empty(&conn->ibc_tx_noops) &&
2007             list_empty(&conn->ibc_tx_queue) &&
2008             list_empty(&conn->ibc_tx_queue_rsrvd) &&
2009             list_empty(&conn->ibc_tx_queue_nocred) &&
2010             list_empty(&conn->ibc_active_txs)) {
2011                 CDEBUG(D_NET, "closing conn to %s\n", 
2012                        libcfs_nid2str(peer_ni->ibp_nid));
2013         } else {
2014                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
2015                        libcfs_nid2str(peer_ni->ibp_nid), error,
2016                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
2017                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
2018                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
2019                                                 "" : "(sending_rsrvd)",
2020                        list_empty(&conn->ibc_tx_queue_nocred) ?
2021                                                  "" : "(sending_nocred)",
2022                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
2023         }
2024
2025         dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
2026         if (peer_ni->ibp_next_conn == conn)
2027                 /* clear next_conn so it won't be used */
2028                 peer_ni->ibp_next_conn = NULL;
2029         list_del(&conn->ibc_list);
2030         /* connd (see below) takes over ibc_list's ref */
2031
2032         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
2033             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
2034                 kiblnd_unlink_peer_locked(peer_ni);
2035
2036                 /* set/clear error on last conn */
2037                 peer_ni->ibp_error = conn->ibc_comms_error;
2038         }
2039
2040         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
2041
2042         if (error != 0 &&
2043             kiblnd_dev_can_failover(dev)) {
2044                 list_add_tail(&dev->ibd_fail_list,
2045                               &kiblnd_data.kib_failed_devs);
2046                 wake_up(&kiblnd_data.kib_failover_waitq);
2047         }
2048
2049         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
2050
2051         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
2052         wake_up(&kiblnd_data.kib_connd_waitq);
2053
2054         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
2055 }
2056
2057 void
2058 kiblnd_close_conn(struct kib_conn *conn, int error)
2059 {
2060         unsigned long flags;
2061
2062         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2063
2064         kiblnd_close_conn_locked(conn, error);
2065
2066         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2067 }
2068
2069 static void
2070 kiblnd_handle_early_rxs(struct kib_conn *conn)
2071 {
2072         unsigned long flags;
2073         struct kib_rx *rx;
2074
2075         LASSERT(!in_interrupt());
2076         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2077
2078         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2079         while (!list_empty(&conn->ibc_early_rxs)) {
2080                 rx = list_entry(conn->ibc_early_rxs.next,
2081                                 struct kib_rx, rx_list);
2082                 list_del(&rx->rx_list);
2083                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2084
2085                 kiblnd_handle_rx(rx);
2086
2087                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2088         }
2089         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2090 }
2091
2092 void
2093 kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
2094 {
2095         LIST_HEAD(zombies);
2096         struct kib_tx *nxt;
2097         struct kib_tx *tx;
2098
2099         spin_lock(&conn->ibc_lock);
2100
2101         list_for_each_entry_safe(tx, nxt, txs, tx_list) {
2102                 if (txs == &conn->ibc_active_txs) {
2103                         LASSERT(!tx->tx_queued);
2104                         LASSERT(tx->tx_waiting ||
2105                                 tx->tx_sending != 0);
2106                         if (conn->ibc_comms_error == -ETIMEDOUT) {
2107                                 if (tx->tx_waiting && !tx->tx_sending)
2108                                         tx->tx_hstatus =
2109                                           LNET_MSG_STATUS_REMOTE_TIMEOUT;
2110                                 else if (tx->tx_sending)
2111                                         tx->tx_hstatus =
2112                                           LNET_MSG_STATUS_NETWORK_TIMEOUT;
2113                         }
2114                 } else {
2115                         LASSERT(tx->tx_queued);
2116                         if (conn->ibc_comms_error == -ETIMEDOUT)
2117                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2118                         else
2119                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
2120                 }
2121
2122                 tx->tx_status = -ECONNABORTED;
2123                 tx->tx_waiting = 0;
2124
2125                 /*
2126                  * TODO: This makes an assumption that
2127                  * kiblnd_tx_complete() will be called for each tx. If
2128                  * that event is dropped we could end up with stale
2129                  * connections floating around. We'd like to deal with
2130                  * that in a better way.
2131                  *
2132                  * Also that means we can exceed the timeout by many
2133                  * seconds.
2134                  */
2135                 if (tx->tx_sending == 0) {
2136                         tx->tx_queued = 0;
2137                         list_move(&tx->tx_list, &zombies);
2138                 } else {
2139                         /* keep tx until cq destroy */
2140                         list_move(&tx->tx_list, &conn->ibc_zombie_txs);
2141                         conn->ibc_waits ++;
2142                 }
2143         }
2144
2145         spin_unlock(&conn->ibc_lock);
2146
2147         /*
2148          * aborting transmits occurs when finalizing the connection.
2149          * The connection is finalized on error.
2150          * Passing LNET_MSG_STATUS_OK to txlist_done() will not
2151          * override the value already set in tx->tx_hstatus above.
2152          */
2153         kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
2154 }
2155
2156 static bool
2157 kiblnd_tx_may_discard(struct kib_conn *conn)
2158 {
2159         bool rc = false;
2160         struct kib_tx *nxt;
2161         struct kib_tx *tx;
2162
2163         spin_lock(&conn->ibc_lock);
2164
2165         list_for_each_entry_safe(tx, nxt, &conn->ibc_zombie_txs, tx_list) {
2166                 if (tx->tx_sending > 0 && tx->tx_lntmsg[0] &&
2167                     lnet_md_discarded(tx->tx_lntmsg[0]->msg_md)) {
2168                         tx->tx_sending --;
2169                         if (tx->tx_sending == 0) {
2170                                 kiblnd_conn_decref(tx->tx_conn);
2171                                 tx->tx_conn = NULL;
2172                                 rc = true;
2173                         }
2174                 }
2175         }
2176
2177         spin_unlock(&conn->ibc_lock);
2178         return rc;
2179 }
2180
2181 static void
2182 kiblnd_finalise_conn(struct kib_conn *conn)
2183 {
2184         LASSERT (!in_interrupt());
2185         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2186
2187         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2188          * for connections that didn't get as far as being connected, because
2189          * rdma_disconnect() does this for free. */
2190         kiblnd_abort_receives(conn);
2191
2192         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2193
2194         /* Complete all tx descs not waiting for sends to complete.
2195          * NB we should be safe from RDMA now that the QP has changed state */
2196
2197         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2198         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2199         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2200         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2201         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2202
2203         kiblnd_handle_early_rxs(conn);
2204 }
2205
2206 static void
2207 kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
2208                            int error)
2209 {
2210         LIST_HEAD(zombies);
2211         unsigned long flags;
2212         enum lnet_msg_hstatus hstatus;
2213
2214         LASSERT(error != 0);
2215         LASSERT(!in_interrupt());
2216
2217         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2218
2219         if (active) {
2220                 LASSERT(peer_ni->ibp_connecting > 0);
2221                 peer_ni->ibp_connecting--;
2222         } else {
2223                 LASSERT (peer_ni->ibp_accepting > 0);
2224                 peer_ni->ibp_accepting--;
2225         }
2226
2227         if (kiblnd_peer_connecting(peer_ni)) {
2228                 /* another connection attempt under way... */
2229                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2230                                         flags);
2231                 return;
2232         }
2233
2234         peer_ni->ibp_reconnected = 0;
2235         if (list_empty(&peer_ni->ibp_conns)) {
2236                 /* Take peer_ni's blocked transmits to complete with error */
2237                 list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
2238
2239                 if (kiblnd_peer_active(peer_ni))
2240                         kiblnd_unlink_peer_locked(peer_ni);
2241
2242                 peer_ni->ibp_error = error;
2243         } else {
2244                 /* Can't have blocked transmits if there are connections */
2245                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2246         }
2247
2248         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2249
2250         kiblnd_peer_notify(peer_ni);
2251
2252         if (list_empty(&zombies))
2253                 return;
2254
2255         CNETERR("Deleting messages for %s: connection failed\n",
2256                 libcfs_nid2str(peer_ni->ibp_nid));
2257
2258         switch (error) {
2259         case -EHOSTUNREACH:
2260         case -ETIMEDOUT:
2261                 hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
2262                 break;
2263         case -ECONNREFUSED:
2264                 hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
2265                 break;
2266         default:
2267                 hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
2268                 break;
2269         }
2270
2271         kiblnd_txlist_done(&zombies, error, hstatus);
2272 }
2273
2274 static void
2275 kiblnd_connreq_done(struct kib_conn *conn, int status)
2276 {
2277         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2278         struct kib_tx *tx;
2279         LIST_HEAD(txs);
2280         unsigned long    flags;
2281         int              active;
2282
2283         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2284
2285         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2286                libcfs_nid2str(peer_ni->ibp_nid), active,
2287                conn->ibc_version, status);
2288
2289         LASSERT (!in_interrupt());
2290         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2291                   peer_ni->ibp_connecting > 0) ||
2292                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2293                   peer_ni->ibp_accepting > 0));
2294
2295         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2296         conn->ibc_connvars = NULL;
2297
2298         if (status != 0) {
2299                 /* failed to establish connection */
2300                 kiblnd_peer_connect_failed(peer_ni, active, status);
2301                 kiblnd_finalise_conn(conn);
2302                 return;
2303         }
2304
2305         /* connection established */
2306         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2307
2308         conn->ibc_last_send = ktime_get();
2309         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2310         kiblnd_peer_alive(peer_ni);
2311
2312         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2313          * peer_ni instance... */
2314         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2315         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2316         peer_ni->ibp_reconnected = 0;
2317         if (active)
2318                 peer_ni->ibp_connecting--;
2319         else
2320                 peer_ni->ibp_accepting--;
2321
2322         if (peer_ni->ibp_version == 0) {
2323                 peer_ni->ibp_version     = conn->ibc_version;
2324                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2325         }
2326
2327         if (peer_ni->ibp_version     != conn->ibc_version ||
2328             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2329                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2330                                                 conn->ibc_incarnation);
2331                 peer_ni->ibp_version     = conn->ibc_version;
2332                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2333         }
2334
2335         /* grab pending txs while I have the lock */
2336         list_splice_init(&peer_ni->ibp_tx_queue, &txs);
2337
2338         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2339             conn->ibc_comms_error != 0) {       /* error has happened already */
2340
2341                 /* start to shut down connection */
2342                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2343                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2344
2345                 kiblnd_txlist_done(&txs, -ECONNABORTED,
2346                                    LNET_MSG_STATUS_LOCAL_ERROR);
2347
2348                 return;
2349         }
2350
2351         /* +1 ref for myself, this connection is visible to other threads
2352          * now, refcount of peer:ibp_conns can be released by connection
2353          * close from either a different thread, or the calling of
2354          * kiblnd_check_sends_locked() below. See bz21911 for details.
2355          */
2356         kiblnd_conn_addref(conn);
2357         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2358
2359         /* Schedule blocked txs
2360          * Note: if we are running with conns_per_peer > 1, these blocked
2361          * txs will all get scheduled to the first connection which gets
2362          * scheduled.  We won't be using round robin on this first batch.
2363          */
2364         spin_lock(&conn->ibc_lock);
2365         while (!list_empty(&txs)) {
2366                 tx = list_entry(txs.next, struct kib_tx, tx_list);
2367                 list_del(&tx->tx_list);
2368
2369                 kiblnd_queue_tx_locked(tx, conn);
2370         }
2371         kiblnd_check_sends_locked(conn);
2372         spin_unlock(&conn->ibc_lock);
2373
2374         /* schedule blocked rxs */
2375         kiblnd_handle_early_rxs(conn);
2376         kiblnd_conn_decref(conn);
2377 }
2378
2379 static void
2380 kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
2381 {
2382         int          rc;
2383
2384 #ifdef HAVE_RDMA_REJECT_4ARGS
2385         rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
2386 #else
2387         rc = rdma_reject(cmid, rej, sizeof(*rej));
2388 #endif
2389
2390         if (rc != 0)
2391                 CWARN("Error %d sending reject\n", rc);
2392 }
2393
2394 static int
2395 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2396 {
2397         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2398         struct kib_msg *reqmsg = priv;
2399         struct kib_msg *ackmsg;
2400         struct kib_dev *ibdev;
2401         struct kib_peer_ni *peer_ni;
2402         struct kib_peer_ni *peer2;
2403         struct kib_conn *conn;
2404         struct lnet_ni *ni = NULL;
2405         struct kib_net *net = NULL;
2406         lnet_nid_t nid;
2407         struct rdma_conn_param cp;
2408         struct kib_rej rej;
2409         int version = IBLND_MSG_VERSION;
2410         unsigned long flags;
2411         int rc;
2412         struct sockaddr_in *peer_addr;
2413
2414         LASSERT(!in_interrupt());
2415         /* cmid inherits 'context' from the corresponding listener id */
2416         ibdev = cmid->context;
2417         LASSERT(ibdev);
2418
2419         memset(&rej, 0, sizeof(rej));
2420         rej.ibr_magic                = IBLND_MSG_MAGIC;
2421         rej.ibr_why                  = IBLND_REJECT_FATAL;
2422         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2423
2424         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2425         if (*kiblnd_tunables.kib_require_priv_port &&
2426             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2427                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2428                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2429                        &ip, ntohs(peer_addr->sin_port));
2430                 goto failed;
2431         }
2432
2433         if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
2434                 CERROR("Short connection request\n");
2435                 goto failed;
2436         }
2437
2438         /* Future protocol version compatibility support!  If the
2439          * o2iblnd-specific protocol changes, or when LNET unifies
2440          * protocols over all LNDs, the initial connection will
2441          * negotiate a protocol version.  I trap this here to avoid
2442          * console errors; the reject tells the peer_ni which protocol I
2443          * speak. */
2444         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2445             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2446                 goto failed;
2447         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2448             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2449             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2450                 goto failed;
2451         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2452             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2453             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2454                 goto failed;
2455
2456         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2457         if (rc != 0) {
2458                 CERROR("Can't parse connection request: %d\n", rc);
2459                 goto failed;
2460         }
2461
2462         nid = reqmsg->ibm_srcnid;
2463         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2464
2465         if (ni != NULL) {
2466                 net = (struct kib_net *)ni->ni_data;
2467                 rej.ibr_incarnation = net->ibn_incarnation;
2468         }
2469
2470         if (ni == NULL ||                         /* no matching net */
2471             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2472             net->ibn_dev != ibdev) {              /* wrong device */
2473                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
2474                        ni ? libcfs_nid2str(ni->ni_nid) : "NA",
2475                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2476                        &ibdev->ibd_ifip,
2477                        libcfs_nid2str(reqmsg->ibm_dstnid));
2478
2479                 goto failed;
2480         }
2481
2482         /* check time stamp as soon as possible */
2483         if (reqmsg->ibm_dststamp != 0 &&
2484             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2485                 CWARN("Stale connection request\n");
2486                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2487                 goto failed;
2488         }
2489
2490         /* I can accept peer_ni's version */
2491         version = reqmsg->ibm_version;
2492
2493         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2494                 CERROR("Unexpected connreq msg type: %x from %s\n",
2495                        reqmsg->ibm_type, libcfs_nid2str(nid));
2496                 goto failed;
2497         }
2498
2499         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2500             kiblnd_msg_queue_size(version, ni)) {
2501                 CERROR("Can't accept conn from %s, queue depth too large:  %d (<=%d wanted)\n",
2502                        libcfs_nid2str(nid),
2503                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2504                        kiblnd_msg_queue_size(version, ni));
2505
2506                 if (version == IBLND_MSG_VERSION)
2507                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2508
2509                 goto failed;
2510         }
2511
2512         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2513             IBLND_MAX_RDMA_FRAGS) {
2514                 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
2515                       libcfs_nid2str(nid), version,
2516                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2517                       IBLND_MAX_RDMA_FRAGS);
2518
2519                 if (version >= IBLND_MSG_VERSION)
2520                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2521
2522                 goto failed;
2523         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2524                    IBLND_MAX_RDMA_FRAGS &&
2525                    net->ibn_fmr_ps == NULL) {
2526                 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
2527                       libcfs_nid2str(nid), version,
2528                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2529                       IBLND_MAX_RDMA_FRAGS);
2530
2531                 if (version == IBLND_MSG_VERSION)
2532                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2533
2534                 goto failed;
2535         }
2536
2537         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2538                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2539                        libcfs_nid2str(nid),
2540                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2541                        IBLND_MSG_SIZE);
2542                 goto failed;
2543         }
2544
2545         /* assume 'nid' is a new peer_ni; create  */
2546         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2547         if (rc != 0) {
2548                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2549                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2550                 goto failed;
2551         }
2552
2553         /* We have validated the peer's parameters so use those */
2554         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2555         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2556
2557         write_lock_irqsave(g_lock, flags);
2558
2559         peer2 = kiblnd_find_peer_locked(ni, nid);
2560         if (peer2 != NULL) {
2561                 if (peer2->ibp_version == 0) {
2562                         peer2->ibp_version     = version;
2563                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2564                 }
2565
2566                 /* not the guy I've talked with */
2567                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2568                     peer2->ibp_version     != version) {
2569                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2570
2571                         if (kiblnd_peer_active(peer2)) {
2572                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2573                                 peer2->ibp_version = version;
2574                         }
2575                         write_unlock_irqrestore(g_lock, flags);
2576
2577                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2578                               libcfs_nid2str(nid), peer2->ibp_version, version,
2579                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2580
2581                         kiblnd_peer_decref(peer_ni);
2582                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2583                         goto failed;
2584                 }
2585
2586                 /* Tie-break connection race in favour of the higher NID.
2587                  * If we keep running into a race condition multiple times,
2588                  * we have to assume that the connection attempt with the
2589                  * higher NID is stuck in a connecting state and will never
2590                  * recover.  As such, we pass through this if-block and let
2591                  * the lower NID connection win so we can move forward.
2592                  */
2593                 if (peer2->ibp_connecting != 0 &&
2594                     nid < ni->ni_nid && peer2->ibp_races <
2595                     MAX_CONN_RACES_BEFORE_ABORT) {
2596                         peer2->ibp_races++;
2597                         write_unlock_irqrestore(g_lock, flags);
2598
2599                         CDEBUG(D_NET, "Conn race %s\n",
2600                                libcfs_nid2str(peer2->ibp_nid));
2601
2602                         kiblnd_peer_decref(peer_ni);
2603                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2604                         goto failed;
2605                 }
2606                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2607                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2608                                 libcfs_nid2str(peer2->ibp_nid),
2609                                 MAX_CONN_RACES_BEFORE_ABORT);
2610                 /*
2611                  * passive connection is allowed even this peer_ni is waiting for
2612                  * reconnection.
2613                  */
2614                 peer2->ibp_reconnecting = 0;
2615                 peer2->ibp_races = 0;
2616                 peer2->ibp_accepting++;
2617                 kiblnd_peer_addref(peer2);
2618
2619                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2620                  * so copy validated parameters since we now know what the
2621                  * peer_ni's limits are */
2622                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2623                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2624
2625                 write_unlock_irqrestore(g_lock, flags);
2626                 kiblnd_peer_decref(peer_ni);
2627                 peer_ni = peer2;
2628         } else {
2629                 /* Brand new peer_ni */
2630                 LASSERT(peer_ni->ibp_accepting == 0);
2631                 LASSERT(peer_ni->ibp_version == 0 &&
2632                         peer_ni->ibp_incarnation == 0);
2633
2634                 peer_ni->ibp_accepting   = 1;
2635                 peer_ni->ibp_version     = version;
2636                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2637
2638                 /* I have a ref on ni that prevents it being shutdown */
2639                 LASSERT(net->ibn_shutdown == 0);
2640
2641                 kiblnd_peer_addref(peer_ni);
2642                 hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
2643
2644                 write_unlock_irqrestore(g_lock, flags);
2645         }
2646
2647         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT,
2648                                   version);
2649         if (!conn) {
2650                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2651                 kiblnd_peer_decref(peer_ni);
2652                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2653                 goto failed;
2654         }
2655
2656         /* conn now "owns" cmid, so I return success from here on to ensure the
2657          * CM callback doesn't destroy cmid.
2658          */
2659         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2660         conn->ibc_credits          = conn->ibc_queue_depth;
2661         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2662         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2663                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2664
2665         ackmsg = &conn->ibc_connvars->cv_msg;
2666         memset(ackmsg, 0, sizeof(*ackmsg));
2667
2668         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2669                         sizeof(ackmsg->ibm_u.connparams));
2670         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2671         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2672         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2673
2674         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2675
2676         memset(&cp, 0, sizeof(cp));
2677         cp.private_data        = ackmsg;
2678         cp.private_data_len    = ackmsg->ibm_nob;
2679         cp.responder_resources = 0;            /* No atomic ops or RDMA reads */
2680         cp.initiator_depth     = 0;
2681         cp.flow_control        = 1;
2682         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2683         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2684
2685         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2686
2687         rc = rdma_accept(cmid, &cp);
2688         if (rc != 0) {
2689                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2690                 rej.ibr_version = version;
2691                 rej.ibr_why     = IBLND_REJECT_FATAL;
2692
2693                 kiblnd_reject(cmid, &rej);
2694                 kiblnd_connreq_done(conn, rc);
2695                 kiblnd_conn_decref(conn);
2696         }
2697
2698         lnet_ni_decref(ni);
2699         return 0;
2700
2701  failed:
2702         if (ni != NULL) {
2703                 rej.ibr_cp.ibcp_queue_depth =
2704                         kiblnd_msg_queue_size(version, ni);
2705                 rej.ibr_cp.ibcp_max_frags   = IBLND_MAX_RDMA_FRAGS;
2706                 lnet_ni_decref(ni);
2707         }
2708
2709         rej.ibr_version = version;
2710         kiblnd_reject(cmid, &rej);
2711
2712         return -ECONNREFUSED;
2713 }
2714
2715 static void
2716 kiblnd_check_reconnect(struct kib_conn *conn, int version,
2717                        u64 incarnation, int why, struct kib_connparams *cp)
2718 {
2719         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2720         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2721         char            *reason;
2722         int              msg_size = IBLND_MSG_SIZE;
2723         int              frag_num = -1;
2724         int              queue_dep = -1;
2725         bool             reconnect;
2726         unsigned long    flags;
2727
2728         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2729         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2730
2731         if (cp) {
2732                 msg_size        = cp->ibcp_max_msg_size;
2733                 frag_num        = cp->ibcp_max_frags;
2734                 queue_dep       = cp->ibcp_queue_depth;
2735         }
2736
2737         write_lock_irqsave(glock, flags);
2738         /* retry connection if it's still needed and no other connection
2739          * attempts (active or passive) are in progress
2740          * NB: reconnect is still needed even when ibp_tx_queue is
2741          * empty if ibp_version != version because reconnect may be
2742          * initiated.
2743          */
2744         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2745                      peer_ni->ibp_version != version) &&
2746                     peer_ni->ibp_connecting &&
2747                     peer_ni->ibp_accepting == 0;
2748         if (!reconnect) {
2749                 reason = "no need";
2750                 goto out;
2751         }
2752
2753         switch (why) {
2754         default:
2755                 reason = "Unknown";
2756                 break;
2757
2758         case IBLND_REJECT_RDMA_FRAGS: {
2759                 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2760
2761                 if (!cp) {
2762                         reason = "can't negotiate max frags";
2763                         goto out;
2764                 }
2765                 tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2766 #ifdef HAVE_IB_GET_DMA_MR
2767                 /*
2768                  * This check only makes sense if the kernel supports global
2769                  * memory registration. Otherwise, map_on_demand will never == 0
2770                  */
2771                 if (!tunables->lnd_map_on_demand) {
2772                         reason = "map_on_demand must be enabled";
2773                         goto out;
2774                 }
2775 #endif
2776                 if (conn->ibc_max_frags <= frag_num) {
2777                         reason = "unsupported max frags";
2778                         goto out;
2779                 }
2780
2781                 peer_ni->ibp_max_frags = frag_num;
2782                 reason = "rdma fragments";
2783                 break;
2784         }
2785         case IBLND_REJECT_MSG_QUEUE_SIZE:
2786                 if (!cp) {
2787                         reason = "can't negotiate queue depth";
2788                         goto out;
2789                 }
2790                 if (conn->ibc_queue_depth <= queue_dep) {
2791                         reason = "unsupported queue depth";
2792                         goto out;
2793                 }
2794
2795                 peer_ni->ibp_queue_depth = queue_dep;
2796                 reason = "queue depth";
2797                 break;
2798
2799         case IBLND_REJECT_CONN_STALE:
2800                 reason = "stale";
2801                 break;
2802
2803         case IBLND_REJECT_CONN_RACE:
2804                 reason = "conn race";
2805                 break;
2806
2807         case IBLND_REJECT_CONN_UNCOMPAT:
2808                 reason = "version negotiation";
2809                 break;
2810         }
2811
2812         conn->ibc_reconnect = 1;
2813         peer_ni->ibp_reconnecting++;
2814         peer_ni->ibp_version = version;
2815         if (incarnation != 0)
2816                 peer_ni->ibp_incarnation = incarnation;
2817  out:
2818         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2819
2820         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2821                 libcfs_nid2str(peer_ni->ibp_nid),
2822                 reconnect ? "reconnect" : "don't reconnect",
2823                 reason, IBLND_MSG_VERSION, version, msg_size,
2824                 conn->ibc_queue_depth, queue_dep,
2825                 conn->ibc_max_frags, frag_num);
2826         /*
2827          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2828          * while destroying the zombie
2829          */
2830 }
2831
2832 static void
2833 kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
2834 {
2835         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2836         int status = -ECONNREFUSED;
2837
2838         LASSERT (!in_interrupt());
2839         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2840
2841         switch (reason) {
2842         case IB_CM_REJ_STALE_CONN:
2843                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2844                                        IBLND_REJECT_CONN_STALE, NULL);
2845                 break;
2846
2847         case IB_CM_REJ_INVALID_SERVICE_ID:
2848                 status = -EHOSTUNREACH;
2849                 CNETERR("%s rejected: no listener at %d\n",
2850                         libcfs_nid2str(peer_ni->ibp_nid),
2851                         *kiblnd_tunables.kib_service);
2852                 break;
2853
2854         case IB_CM_REJ_CONSUMER_DEFINED:
2855                 if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
2856                         struct kib_rej *rej = priv;
2857                         struct kib_connparams *cp = NULL;
2858                         bool flip = false;
2859                         __u64 incarnation = -1;
2860
2861                         /* NB. default incarnation is -1 because:
2862                          * a) V1 will ignore dst incarnation in connreq.
2863                          * b) V2 will provide incarnation while rejecting me,
2864                          *    -1 will be overwrote.
2865                          *
2866                          * if I try to connect to a V1 peer_ni with V2 protocol,
2867                          * it rejected me then upgrade to V2, I have no idea
2868                          * about the upgrading and try to reconnect with V1,
2869                          * in this case upgraded V2 can find out I'm trying to
2870                          * talk to the old guy and reject me(incarnation is -1).
2871                          */
2872
2873                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2874                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2875                                 __swab32s(&rej->ibr_magic);
2876                                 __swab16s(&rej->ibr_version);
2877                                 flip = true;
2878                         }
2879
2880                         if (priv_nob >= sizeof(struct kib_rej) &&
2881                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2882                                 /* priv_nob is always 148 in current version
2883                                  * of OFED, so we still need to check version.
2884                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
2885                                  */
2886                                 cp = &rej->ibr_cp;
2887
2888                                 if (flip) {
2889                                         __swab64s(&rej->ibr_incarnation);
2890                                         __swab16s(&cp->ibcp_queue_depth);
2891                                         __swab16s(&cp->ibcp_max_frags);
2892                                         __swab32s(&cp->ibcp_max_msg_size);
2893                                 }
2894
2895                                 incarnation = rej->ibr_incarnation;
2896                         }
2897
2898                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2899                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2900                                 CERROR("%s rejected: consumer defined fatal error\n",
2901                                        libcfs_nid2str(peer_ni->ibp_nid));
2902                                 break;
2903                         }
2904
2905                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2906                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2907                                 CERROR("%s rejected: o2iblnd version %x error\n",
2908                                        libcfs_nid2str(peer_ni->ibp_nid),
2909                                        rej->ibr_version);
2910                                 break;
2911                         }
2912
2913                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2914                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2915                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2916                                        libcfs_nid2str(peer_ni->ibp_nid),
2917                                        rej->ibr_version);
2918
2919                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2920                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2921                         }
2922
2923                         switch (rej->ibr_why) {
2924                         case IBLND_REJECT_CONN_RACE:
2925                         case IBLND_REJECT_CONN_STALE:
2926                         case IBLND_REJECT_CONN_UNCOMPAT:
2927                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2928                         case IBLND_REJECT_RDMA_FRAGS:
2929                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2930                                                        incarnation,
2931                                                        rej->ibr_why, cp);
2932                                 break;
2933
2934                         case IBLND_REJECT_NO_RESOURCES:
2935                                 CERROR("%s rejected: o2iblnd no resources\n",
2936                                        libcfs_nid2str(peer_ni->ibp_nid));
2937                                 break;
2938
2939                         case IBLND_REJECT_FATAL:
2940                                 CERROR("%s rejected: o2iblnd fatal error\n",
2941                                        libcfs_nid2str(peer_ni->ibp_nid));
2942                                 break;
2943
2944                         default:
2945                                 CERROR("%s rejected: o2iblnd reason %d\n",
2946                                        libcfs_nid2str(peer_ni->ibp_nid),
2947                                        rej->ibr_why);
2948                                 break;
2949                         }
2950                         break;
2951                 }
2952                 /* fall through */
2953         default:
2954                 CNETERR("%s rejected: reason %d, size %d\n",
2955                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2956                 break;
2957         }
2958
2959         kiblnd_connreq_done(conn, status);
2960 }
2961
2962 static void
2963 kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
2964 {
2965         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2966         struct lnet_ni *ni = peer_ni->ibp_ni;
2967         struct kib_net *net = ni->ni_data;
2968         struct kib_msg *msg = priv;
2969         int            ver  = conn->ibc_version;
2970         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2971         unsigned long  flags;
2972
2973         LASSERT (net != NULL);
2974
2975         if (rc != 0) {
2976                 CERROR("Can't unpack connack from %s: %d\n",
2977                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2978                 goto failed;
2979         }
2980
2981         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2982                 CERROR("Unexpected message %d from %s\n",
2983                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
2984                 rc = -EPROTO;
2985                 goto failed;
2986         }
2987
2988         if (ver != msg->ibm_version) {
2989                 CERROR("%s replied version %x is different with "
2990                        "requested version %x\n",
2991                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
2992                 rc = -EPROTO;
2993                 goto failed;
2994         }
2995
2996         if (msg->ibm_u.connparams.ibcp_queue_depth >
2997             conn->ibc_queue_depth) {
2998                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2999                        libcfs_nid2str(peer_ni->ibp_nid),
3000                        msg->ibm_u.connparams.ibcp_queue_depth,
3001                        conn->ibc_queue_depth);
3002                 rc = -EPROTO;
3003                 goto failed;
3004         }
3005
3006         if (msg->ibm_u.connparams.ibcp_max_frags >
3007             conn->ibc_max_frags) {
3008                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
3009                        libcfs_nid2str(peer_ni->ibp_nid),
3010                        msg->ibm_u.connparams.ibcp_max_frags,
3011                        conn->ibc_max_frags);
3012                 rc = -EPROTO;
3013                 goto failed;
3014         }
3015
3016         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
3017                 CERROR("%s max message size %d too big (%d max)\n",
3018                        libcfs_nid2str(peer_ni->ibp_nid),
3019                        msg->ibm_u.connparams.ibcp_max_msg_size,
3020                        IBLND_MSG_SIZE);
3021                 rc = -EPROTO;
3022                 goto failed;
3023         }
3024
3025         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3026         if (msg->ibm_dstnid == ni->ni_nid &&
3027             msg->ibm_dststamp == net->ibn_incarnation)
3028                 rc = 0;
3029         else
3030                 rc = -ESTALE;
3031         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3032
3033         if (rc != 0) {
3034                 CERROR("Bad connection reply from %s, rc = %d, "
3035                        "version: %x max_frags: %d\n",
3036                        libcfs_nid2str(peer_ni->ibp_nid), rc,
3037                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
3038                 goto failed;
3039         }
3040
3041         conn->ibc_incarnation      = msg->ibm_srcstamp;
3042         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
3043         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
3044         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
3045         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
3046         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
3047                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
3048
3049         kiblnd_connreq_done(conn, 0);
3050         return;
3051
3052  failed:
3053         /* NB My QP has already established itself, so I handle anything going
3054          * wrong here by setting ibc_comms_error.
3055          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
3056          * immediately tears it down. */
3057
3058         LASSERT (rc != 0);
3059         conn->ibc_comms_error = rc;
3060         kiblnd_connreq_done(conn, 0);
3061 }
3062
3063 static int
3064 kiblnd_active_connect(struct rdma_cm_id *cmid)
3065 {
3066         struct kib_peer_ni *peer_ni = cmid->context;
3067         struct kib_conn *conn;
3068         struct kib_msg *msg;
3069         struct rdma_conn_param cp;
3070         int                      version;
3071         __u64                    incarnation;
3072         unsigned long            flags;
3073         int                      rc;
3074
3075         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3076
3077         incarnation = peer_ni->ibp_incarnation;
3078         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
3079                                                  peer_ni->ibp_version;
3080
3081         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3082
3083         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
3084                                   version);
3085         if (conn == NULL) {
3086                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
3087                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
3088                 return -ENOMEM;
3089         }
3090
3091         /* conn "owns" cmid now, so I return success from here on to ensure the
3092          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
3093          * on peer_ni */
3094
3095         msg = &conn->ibc_connvars->cv_msg;
3096
3097         memset(msg, 0, sizeof(*msg));
3098         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
3099         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
3100         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
3101         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
3102
3103         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
3104                         0, peer_ni->ibp_nid, incarnation);
3105
3106         memset(&cp, 0, sizeof(cp));
3107         cp.private_data        = msg;
3108         cp.private_data_len    = msg->ibm_nob;
3109         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
3110         cp.initiator_depth     = 0;
3111         cp.flow_control        = 1;
3112         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
3113         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
3114
3115         LASSERT(cmid->context == (void *)conn);
3116         LASSERT(conn->ibc_cmid == cmid);
3117         rc = rdma_connect_locked(cmid, &cp);
3118         if (rc != 0) {
3119                 CERROR("Can't connect to %s: %d\n",
3120                        libcfs_nid2str(peer_ni->ibp_nid), rc);
3121                 kiblnd_connreq_done(conn, rc);
3122                 kiblnd_conn_decref(conn);
3123         }
3124
3125         return 0;
3126 }
3127
3128 int
3129 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3130 {
3131         struct kib_peer_ni *peer_ni;
3132         struct kib_conn *conn;
3133         int rc;
3134
3135         switch (event->event) {
3136         default:
3137                 CERROR("Unexpected event: %d, status: %d\n",
3138                        event->event, event->status);
3139                 LBUG();
3140
3141         case RDMA_CM_EVENT_CONNECT_REQUEST:
3142                 /* destroy cmid on failure */
3143                 rc = kiblnd_passive_connect(cmid,
3144                                             (void *)KIBLND_CONN_PARAM(event),
3145                                             KIBLND_CONN_PARAM_LEN(event));
3146                 CDEBUG(D_NET, "connreq: %d\n", rc);
3147                 return rc;
3148
3149         case RDMA_CM_EVENT_ADDR_ERROR:
3150                 peer_ni = cmid->context;
3151                 CNETERR("%s: ADDR ERROR %d\n",
3152                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3153                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3154                 kiblnd_peer_decref(peer_ni);
3155                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
3156
3157         case RDMA_CM_EVENT_ADDR_RESOLVED:
3158                 peer_ni = cmid->context;
3159
3160                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
3161                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3162
3163                 if (event->status != 0) {
3164                         CNETERR("Can't resolve address for %s: %d\n",
3165                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
3166                         rc = event->status;
3167                 } else {
3168                         rc = rdma_resolve_route(
3169                                 cmid, kiblnd_timeout() * 1000);
3170                         if (rc == 0) {
3171                                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
3172                                 struct kib_dev *dev = net->ibn_dev;
3173
3174                                 CDEBUG(D_NET, "%s: connection bound to "\
3175                                        "%s:%pI4h:%s\n",
3176                                        libcfs_nid2str(peer_ni->ibp_nid),
3177                                        dev->ibd_ifname,
3178                                        &dev->ibd_ifip, cmid->device->name);
3179
3180                                 return 0;
3181                         }
3182
3183                         /* Can't initiate route resolution */
3184                         CERROR("Can't resolve route for %s: %d\n",
3185                                libcfs_nid2str(peer_ni->ibp_nid), rc);
3186                 }
3187                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
3188                 kiblnd_peer_decref(peer_ni);
3189                 return rc;                      /* rc != 0 destroys cmid */
3190
3191         case RDMA_CM_EVENT_ROUTE_ERROR:
3192                 peer_ni = cmid->context;
3193                 CNETERR("%s: ROUTE ERROR %d\n",
3194                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3195                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3196                 kiblnd_peer_decref(peer_ni);
3197                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3198
3199         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3200                 peer_ni = cmid->context;
3201                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3202                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3203
3204                 if (event->status == 0)
3205                         return kiblnd_active_connect(cmid);
3206
3207                 CNETERR("Can't resolve route for %s: %d\n",
3208                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3209                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3210                 kiblnd_peer_decref(peer_ni);
3211                 return event->status;           /* rc != 0 destroys cmid */
3212
3213         case RDMA_CM_EVENT_UNREACHABLE:
3214                 conn = cmid->context;
3215                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3216                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3217                 CNETERR("%s: UNREACHABLE %d\n",
3218                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3219                 kiblnd_connreq_done(conn, -ENETDOWN);
3220                 kiblnd_conn_decref(conn);
3221                 return 0;
3222
3223         case RDMA_CM_EVENT_CONNECT_ERROR:
3224                 conn = cmid->context;
3225                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3226                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3227                 CNETERR("%s: CONNECT ERROR %d\n",
3228                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3229                 kiblnd_connreq_done(conn, -ENOTCONN);
3230                 kiblnd_conn_decref(conn);
3231                 return 0;
3232
3233         case RDMA_CM_EVENT_REJECTED:
3234                 conn = cmid->context;
3235                 switch (conn->ibc_state) {
3236                 default:
3237                         LBUG();
3238
3239                 case IBLND_CONN_PASSIVE_WAIT:
3240                         CERROR ("%s: REJECTED %d\n",
3241                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3242                                 event->status);
3243                         kiblnd_connreq_done(conn, -ECONNRESET);
3244                         break;
3245
3246                 case IBLND_CONN_ACTIVE_CONNECT:
3247                         kiblnd_rejected(conn, event->status,
3248                                         (void *)KIBLND_CONN_PARAM(event),
3249                                         KIBLND_CONN_PARAM_LEN(event));
3250                         break;
3251                 }
3252                 kiblnd_conn_decref(conn);
3253                 return 0;
3254
3255         case RDMA_CM_EVENT_ESTABLISHED:
3256                 conn = cmid->context;
3257                 switch (conn->ibc_state) {
3258                 default:
3259                         LBUG();
3260
3261                 case IBLND_CONN_PASSIVE_WAIT:
3262                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3263                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3264                         kiblnd_connreq_done(conn, 0);
3265                         break;
3266
3267                 case IBLND_CONN_ACTIVE_CONNECT:
3268                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3269                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3270                         kiblnd_check_connreply(conn,
3271                                                (void *)KIBLND_CONN_PARAM(event),
3272                                                KIBLND_CONN_PARAM_LEN(event));
3273                         break;
3274                 }
3275                 /* net keeps its ref on conn! */
3276                 return 0;
3277
3278         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3279                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3280                 return 0;
3281
3282         case RDMA_CM_EVENT_DISCONNECTED:
3283                 conn = cmid->context;
3284                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3285                         CERROR("%s DISCONNECTED\n",
3286                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3287                         kiblnd_connreq_done(conn, -ECONNRESET);
3288                 } else {
3289                         kiblnd_close_conn(conn, 0);
3290                 }
3291                 kiblnd_conn_decref(conn);
3292                 cmid->context = NULL;
3293                 return 0;
3294
3295         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3296                 LCONSOLE_ERROR_MSG(0x131,
3297                                    "Received notification of device removal\n"
3298                                    "Please shutdown LNET to allow this to proceed\n");
3299                 /* Can't remove network from underneath LNET for now, so I have
3300                  * to ignore this */
3301                 return 0;
3302
3303         case RDMA_CM_EVENT_ADDR_CHANGE:
3304                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3305                 return 0;
3306         }
3307 }
3308
3309 static int
3310 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
3311 {
3312         struct kib_tx *tx;
3313         struct list_head *ttmp;
3314
3315         list_for_each(ttmp, txs) {
3316                 tx = list_entry(ttmp, struct kib_tx, tx_list);
3317
3318                 if (txs != &conn->ibc_active_txs) {
3319                         LASSERT(tx->tx_queued);
3320                 } else {
3321                         LASSERT(!tx->tx_queued);
3322                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3323                 }
3324
3325                 if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3326                         CERROR("Timed out tx: %s(WSQ:%d%d%d), %lld seconds\n",
3327                                kiblnd_queue2str(conn, txs),
3328                                tx->tx_waiting, tx->tx_sending, tx->tx_queued,
3329                                kiblnd_timeout() +
3330                                ktime_ms_delta(ktime_get(),
3331                                               tx->tx_deadline) / MSEC_PER_SEC);
3332                         return 1;
3333                 }
3334         }
3335
3336         return 0;
3337 }
3338
3339 static int
3340 kiblnd_conn_timed_out_locked(struct kib_conn *conn)
3341 {
3342         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3343                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3344                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3345                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3346                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3347 }
3348
3349 static void
3350 kiblnd_check_conns (int idx)
3351 {
3352         LIST_HEAD(closes);
3353         LIST_HEAD(checksends);
3354         LIST_HEAD(timedout_txs);
3355         struct hlist_head *peers = &kiblnd_data.kib_peers[idx];
3356         struct kib_peer_ni *peer_ni;
3357         struct kib_conn *conn;
3358         struct kib_tx *tx, *tx_tmp;
3359         struct list_head *ctmp;
3360         unsigned long flags;
3361
3362         /* NB. We expect to have a look at all the peers and not find any
3363          * RDMAs to time out, so we just use a shared lock while we
3364          * take a look...
3365          */
3366         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3367
3368         hlist_for_each_entry(peer_ni, peers, ibp_list) {
3369                 /* Check tx_deadline */
3370                 list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
3371                         if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3372                                 CWARN("Timed out tx for %s: %lld seconds\n",
3373                                       libcfs_nid2str(peer_ni->ibp_nid),
3374                                       ktime_ms_delta(ktime_get(),
3375                                                      tx->tx_deadline) / MSEC_PER_SEC);
3376                                 list_move(&tx->tx_list, &timedout_txs);
3377                         }
3378                 }
3379
3380                 list_for_each(ctmp, &peer_ni->ibp_conns) {
3381                         int timedout;
3382                         int sendnoop;
3383
3384                         conn = list_entry(ctmp, struct kib_conn, ibc_list);
3385
3386                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3387
3388                         spin_lock(&conn->ibc_lock);
3389
3390                         sendnoop = kiblnd_need_noop(conn);
3391                         timedout = kiblnd_conn_timed_out_locked(conn);
3392                         if (!sendnoop && !timedout) {
3393                                 spin_unlock(&conn->ibc_lock);
3394                                 continue;
3395                         }
3396
3397                         if (timedout) {
3398                                 CERROR("Timed out RDMA with %s (%lld): c: %u, oc: %u, rc: %u\n",
3399                                        libcfs_nid2str(peer_ni->ibp_nid),
3400                                        ktime_get_seconds()
3401                                        - peer_ni->ibp_last_alive,
3402                                        conn->ibc_credits,
3403                                        conn->ibc_outstanding_credits,
3404                                        conn->ibc_reserved_credits);
3405                                 list_add(&conn->ibc_connd_list, &closes);
3406                         } else {
3407                                 list_add(&conn->ibc_connd_list, &checksends);
3408                         }
3409                         /* +ref for 'closes' or 'checksends' */
3410                         kiblnd_conn_addref(conn);
3411
3412                         spin_unlock(&conn->ibc_lock);
3413                 }
3414         }
3415
3416         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3417
3418         if (!list_empty(&timedout_txs))
3419                 kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
3420                                    LNET_MSG_STATUS_NETWORK_TIMEOUT);
3421
3422         /* Handle timeout by closing the whole
3423          * connection. We can only be sure RDMA activity
3424          * has ceased once the QP has been modified.
3425          */
3426         while (!list_empty(&closes)) {
3427                 conn = list_entry(closes.next,
3428                                   struct kib_conn, ibc_connd_list);
3429                 list_del(&conn->ibc_connd_list);
3430                 kiblnd_close_conn(conn, -ETIMEDOUT);
3431                 kiblnd_conn_decref(conn);
3432         }
3433
3434         /* In case we have enough credits to return via a
3435          * NOOP, but there were no non-blocking tx descs
3436          * free to do it last time...
3437          */
3438         while (!list_empty(&checksends)) {
3439                 conn = list_entry(checksends.next,
3440                                   struct kib_conn, ibc_connd_list);
3441                 list_del(&conn->ibc_connd_list);
3442
3443                 spin_lock(&conn->ibc_lock);
3444                 kiblnd_check_sends_locked(conn);
3445                 spin_unlock(&conn->ibc_lock);
3446
3447                 kiblnd_conn_decref(conn);
3448         }
3449 }
3450
3451 static void
3452 kiblnd_disconnect_conn(struct kib_conn *conn)
3453 {
3454         LASSERT (!in_interrupt());
3455         LASSERT (current == kiblnd_data.kib_connd);
3456         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3457
3458         rdma_disconnect(conn->ibc_cmid);
3459         kiblnd_finalise_conn(conn);
3460
3461         kiblnd_peer_notify(conn->ibc_peer);
3462 }
3463
3464 /*
3465  * High-water for reconnection to the same peer_ni, reconnection attempt should
3466  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3467  */
3468 #define KIB_RECONN_HIGH_RACE    10
3469 /*
3470  * Allow connd to take a break and handle other things after consecutive
3471  * reconnection attemps.
3472  */
3473 #define KIB_RECONN_BREAK        100
3474
3475 int
3476 kiblnd_connd (void *arg)
3477 {
3478         spinlock_t *lock = &kiblnd_data.kib_connd_lock;
3479         wait_queue_entry_t wait;
3480         unsigned long flags;
3481         struct kib_conn *conn;
3482         int timeout;
3483         int i;
3484         bool dropped_lock;
3485         int peer_index = 0;
3486         unsigned long deadline = jiffies;
3487
3488         init_wait(&wait);
3489         kiblnd_data.kib_connd = current;
3490
3491         spin_lock_irqsave(lock, flags);
3492
3493         while (!kiblnd_data.kib_shutdown) {
3494                 int reconn = 0;
3495
3496                 dropped_lock = false;
3497
3498                 if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
3499                         struct kib_peer_ni *peer_ni = NULL;
3500
3501                         conn = list_entry(kiblnd_data.kib_connd_zombies.next,
3502                                           struct kib_conn, ibc_list);
3503                         list_del(&conn->ibc_list);
3504                         if (conn->ibc_reconnect) {
3505                                 peer_ni = conn->ibc_peer;
3506                                 kiblnd_peer_addref(peer_ni);
3507                         }
3508
3509                         spin_unlock_irqrestore(lock, flags);
3510                         dropped_lock = true;
3511
3512                         kiblnd_destroy_conn(conn);
3513
3514                         spin_lock_irqsave(lock, flags);
3515                         if (!peer_ni) {
3516                                 LIBCFS_FREE(conn, sizeof(*conn));
3517                                 continue;
3518                         }
3519
3520                         conn->ibc_peer = peer_ni;
3521                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3522                                 list_add_tail(&conn->ibc_list,
3523                                               &kiblnd_data.kib_reconn_list);
3524                         else
3525                                 list_add_tail(&conn->ibc_list,
3526                                               &kiblnd_data.kib_reconn_wait);
3527                 }
3528
3529                 if (!list_empty(&kiblnd_data.kib_connd_conns)) {
3530                         int wait;
3531                         conn = list_entry(kiblnd_data.kib_connd_conns.next,
3532                                           struct kib_conn, ibc_list);
3533                         list_del(&conn->ibc_list);
3534
3535                         spin_unlock_irqrestore(lock, flags);
3536                         dropped_lock = true;
3537
3538                         kiblnd_disconnect_conn(conn);
3539                         wait = conn->ibc_waits;
3540                         if (wait == 0) /* keep ref for connd_wait, see below */
3541                                 kiblnd_conn_decref(conn);
3542
3543                         spin_lock_irqsave(lock, flags);
3544
3545                         if (wait)
3546                                 list_add_tail(&conn->ibc_list,
3547                                               &kiblnd_data.kib_connd_waits);
3548                 }
3549
3550                 while (reconn < KIB_RECONN_BREAK) {
3551                         if (kiblnd_data.kib_reconn_sec !=
3552                             ktime_get_real_seconds()) {
3553                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3554                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3555                                                  &kiblnd_data.kib_reconn_list);
3556                         }
3557
3558                         if (list_empty(&kiblnd_data.kib_reconn_list))
3559                                 break;
3560
3561                         conn = list_entry(kiblnd_data.kib_reconn_list.next,
3562                                           struct kib_conn, ibc_list);
3563                         list_del(&conn->ibc_list);
3564
3565                         spin_unlock_irqrestore(lock, flags);
3566                         dropped_lock = true;
3567
3568                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3569                         kiblnd_peer_decref(conn->ibc_peer);
3570                         LIBCFS_FREE(conn, sizeof(*conn));
3571
3572                         spin_lock_irqsave(lock, flags);
3573                 }
3574
3575                 if (!list_empty(&kiblnd_data.kib_connd_waits)) {
3576                         conn = list_entry(kiblnd_data.kib_connd_waits.next,
3577                                           struct kib_conn, ibc_list);
3578                         list_del(&conn->ibc_list);
3579                         spin_unlock_irqrestore(lock, flags);
3580
3581                         dropped_lock = kiblnd_tx_may_discard(conn);
3582                         if (dropped_lock)
3583                                 kiblnd_conn_decref(conn);
3584
3585                         spin_lock_irqsave(lock, flags);
3586                         if (!dropped_lock)
3587                                 list_add_tail(&conn->ibc_list,
3588                                               &kiblnd_data.kib_connd_waits);
3589                 }
3590
3591                 /* careful with the jiffy wrap... */
3592                 timeout = (int)(deadline - jiffies);
3593                 if (timeout <= 0) {
3594                         const int n = 4;
3595                         const int p = 1;
3596                         int chunk = HASH_SIZE(kiblnd_data.kib_peers);
3597                         unsigned int lnd_timeout;
3598
3599                         spin_unlock_irqrestore(lock, flags);
3600                         dropped_lock = true;
3601
3602                         /* Time to check for RDMA timeouts on a few more
3603                          * peers: I do checks every 'p' seconds on a
3604                          * proportion of the peer_ni table and I need to check
3605                          * every connection 'n' times within a timeout
3606                          * interval, to ensure I detect a timeout on any
3607                          * connection within (n+1)/n times the timeout
3608                          * interval.
3609                          */
3610
3611                         lnd_timeout = kiblnd_timeout();
3612                         if (lnd_timeout > n * p)
3613                                 chunk = (chunk * n * p) / lnd_timeout;
3614                         if (chunk == 0)
3615                                 chunk = 1;
3616
3617                         for (i = 0; i < chunk; i++) {
3618                                 kiblnd_check_conns(peer_index);
3619                                 peer_index = (peer_index + 1) %
3620                                         HASH_SIZE(kiblnd_data.kib_peers);
3621                         }
3622
3623                         deadline += cfs_time_seconds(p);
3624                         spin_lock_irqsave(lock, flags);
3625                 }
3626
3627                 if (dropped_lock)
3628                         continue;
3629
3630                 /* Nothing to do for 'timeout'  */
3631                 set_current_state(TASK_INTERRUPTIBLE);
3632                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3633                 spin_unlock_irqrestore(lock, flags);
3634
3635                 schedule_timeout(timeout);
3636
3637                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3638                 spin_lock_irqsave(lock, flags);
3639         }
3640
3641         spin_unlock_irqrestore(lock, flags);
3642
3643         kiblnd_thread_fini();
3644         return 0;
3645 }
3646
3647 void
3648 kiblnd_qp_event(struct ib_event *event, void *arg)
3649 {
3650         struct kib_conn *conn = arg;
3651
3652         switch (event->event) {
3653         case IB_EVENT_COMM_EST:
3654                 CDEBUG(D_NET, "%s established\n",
3655                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3656                 /* We received a packet but connection isn't established
3657                  * probably handshake packet was lost, so free to
3658                  * force make connection established */
3659                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3660                 return;
3661
3662         case IB_EVENT_PORT_ERR:
3663         case IB_EVENT_DEVICE_FATAL:
3664                 CERROR("Fatal device error for NI %s\n",
3665                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3666                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
3667                 return;
3668
3669         case IB_EVENT_PORT_ACTIVE:
3670                 CERROR("Port reactivated for NI %s\n",
3671                        libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
3672                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
3673                 return;
3674
3675         default:
3676                 CERROR("%s: Async QP event type %d\n",
3677                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3678                 return;
3679         }
3680 }
3681
3682 static void
3683 kiblnd_complete (struct ib_wc *wc)
3684 {
3685         switch (kiblnd_wreqid2type(wc->wr_id)) {
3686         default:
3687                 LBUG();
3688
3689         case IBLND_WID_MR:
3690                 if (wc->status != IB_WC_SUCCESS &&
3691                     wc->status != IB_WC_WR_FLUSH_ERR)
3692                         CNETERR("FastReg failed: %d\n", wc->status);
3693                 return;
3694
3695         case IBLND_WID_RDMA:
3696                 /* We only get RDMA completion notification if it fails.  All
3697                  * subsequent work items, including the final SEND will fail
3698                  * too.  However we can't print out any more info about the
3699                  * failing RDMA because 'tx' might be back on the idle list or
3700                  * even reused already if we didn't manage to post all our work
3701                  * items */
3702                 CNETERR("RDMA (tx: %p) failed: %d\n",
3703                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3704                 return;
3705
3706         case IBLND_WID_TX:
3707                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3708                 return;
3709
3710         case IBLND_WID_RX:
3711                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3712                                    wc->byte_len);
3713                 return;
3714         }
3715 }
3716
3717 void
3718 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3719 {
3720         /* NB I'm not allowed to schedule this conn once its refcount has
3721          * reached 0.  Since fundamentally I'm racing with scheduler threads
3722          * consuming my CQ I could be called after all completions have
3723          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3724          * and this CQ is about to be destroyed so I NOOP. */
3725         struct kib_conn *conn = arg;
3726         struct kib_sched_info *sched = conn->ibc_sched;
3727         unsigned long flags;
3728
3729         LASSERT(cq == conn->ibc_cq);
3730
3731         spin_lock_irqsave(&sched->ibs_lock, flags);
3732
3733         conn->ibc_ready = 1;
3734
3735         if (!conn->ibc_scheduled &&
3736             (conn->ibc_nrx > 0 ||
3737              conn->ibc_nsends_posted > 0)) {
3738                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3739                 conn->ibc_scheduled = 1;
3740                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3741
3742                 if (waitqueue_active(&sched->ibs_waitq))
3743                         wake_up(&sched->ibs_waitq);
3744         }
3745
3746         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3747 }
3748
3749 void
3750 kiblnd_cq_event(struct ib_event *event, void *arg)
3751 {
3752         struct kib_conn *conn = arg;
3753
3754         CERROR("%s: async CQ event type %d\n",
3755                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3756 }
3757
3758 int
3759 kiblnd_scheduler(void *arg)
3760 {
3761         long id = (long)arg;
3762         struct kib_sched_info *sched;
3763         struct kib_conn *conn;
3764         wait_queue_entry_t wait;
3765         unsigned long flags;
3766         struct ib_wc wc;
3767         bool did_something;
3768         int rc;
3769
3770         init_wait(&wait);
3771
3772         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3773
3774         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3775         if (rc != 0) {
3776                 CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
3777         }
3778
3779         spin_lock_irqsave(&sched->ibs_lock, flags);
3780
3781         while (!kiblnd_data.kib_shutdown) {
3782                 if (need_resched()) {
3783                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3784
3785                         cond_resched();
3786
3787                         spin_lock_irqsave(&sched->ibs_lock, flags);
3788                 }
3789
3790                 did_something = false;
3791
3792                 if (!list_empty(&sched->ibs_conns)) {
3793                         conn = list_entry(sched->ibs_conns.next,
3794                                           struct kib_conn, ibc_sched_list);
3795                         /* take over kib_sched_conns' ref on conn... */
3796                         LASSERT(conn->ibc_scheduled);
3797                         list_del(&conn->ibc_sched_list);
3798                         conn->ibc_ready = 0;
3799
3800                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3801
3802                         wc.wr_id = IBLND_WID_INVAL;
3803
3804                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3805                         if (rc == 0) {
3806                                 rc = ib_req_notify_cq(conn->ibc_cq,
3807                                                       IB_CQ_NEXT_COMP);
3808                                 if (rc < 0) {
3809                                         CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
3810                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3811                                         kiblnd_close_conn(conn, -EIO);
3812                                         kiblnd_conn_decref(conn);
3813                                         spin_lock_irqsave(&sched->ibs_lock,
3814                                                           flags);
3815                                         continue;
3816                                 }
3817
3818                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3819                         }
3820
3821                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3822                                 LCONSOLE_ERROR(
3823                                         "ib_poll_cq (rc: %d) returned invalid "
3824                                         "wr_id, opcode %d, status: %d, "
3825                                         "vendor_err: %d, conn: %s status: %d\n"
3826                                         "please upgrade firmware and OFED or "
3827                                         "contact vendor.\n", rc,
3828                                         wc.opcode, wc.status, wc.vendor_err,
3829                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3830                                         conn->ibc_state);
3831                                 rc = -EINVAL;
3832                         }
3833
3834                         if (rc < 0) {
3835                                 CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
3836                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3837                                       rc);
3838                                 kiblnd_close_conn(conn, -EIO);
3839                                 kiblnd_conn_decref(conn);
3840                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3841                                 continue;
3842                         }
3843
3844                         spin_lock_irqsave(&sched->ibs_lock, flags);
3845
3846                         if (rc != 0 || conn->ibc_ready) {
3847                                 /* There may be another completion waiting; get
3848                                  * another scheduler to check while I handle
3849                                  * this one... */
3850                                 /* +1 ref for sched_conns */
3851                                 kiblnd_conn_addref(conn);
3852                                 list_add_tail(&conn->ibc_sched_list,
3853                                               &sched->ibs_conns);
3854                                 if (waitqueue_active(&sched->ibs_waitq))
3855                                         wake_up(&sched->ibs_waitq);
3856                         } else {
3857                                 conn->ibc_scheduled = 0;
3858                         }
3859
3860                         if (rc != 0) {
3861                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3862                                 kiblnd_complete(&wc);
3863
3864                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3865                         }
3866
3867                         kiblnd_conn_decref(conn); /* ..drop my ref from above */
3868                         did_something = true;
3869                 }
3870
3871                 if (did_something)
3872                         continue;
3873
3874                 set_current_state(TASK_INTERRUPTIBLE);
3875                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3876                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3877
3878                 schedule();
3879
3880                 remove_wait_queue(&sched->ibs_waitq, &wait);
3881                 set_current_state(TASK_RUNNING);
3882                 spin_lock_irqsave(&sched->ibs_lock, flags);
3883         }
3884
3885         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3886
3887         kiblnd_thread_fini();
3888         return 0;
3889 }
3890
3891 int
3892 kiblnd_failover_thread(void *arg)
3893 {
3894         rwlock_t *glock = &kiblnd_data.kib_global_lock;
3895         struct kib_dev *dev;
3896         struct net *ns = arg;
3897         wait_queue_entry_t wait;
3898         unsigned long flags;
3899         int rc;
3900
3901         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3902
3903         init_wait(&wait);
3904         write_lock_irqsave(glock, flags);
3905
3906         while (!kiblnd_data.kib_shutdown) {
3907                 bool do_failover = false;
3908                 int long_sleep;
3909
3910                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3911                                     ibd_fail_list) {
3912                         if (ktime_get_seconds() < dev->ibd_next_failover)
3913                                 continue;
3914                         do_failover = true;
3915                         break;
3916                 }
3917
3918                 if (do_failover) {
3919                         list_del_init(&dev->ibd_fail_list);
3920                         dev->ibd_failover = 1;
3921                         write_unlock_irqrestore(glock, flags);
3922
3923                         rc = kiblnd_dev_failover(dev, ns);
3924
3925                         write_lock_irqsave(glock, flags);
3926
3927                         LASSERT(dev->ibd_failover);
3928                         dev->ibd_failover = 0;
3929                         if (rc >= 0) { /* Device is OK or failover succeed */
3930                                 dev->ibd_next_failover = ktime_get_seconds() + 3;
3931                                 continue;
3932                         }
3933
3934                         /* failed to failover, retry later */
3935                         dev->ibd_next_failover = ktime_get_seconds() +
3936                                 min(dev->ibd_failed_failover, 10);
3937                         if (kiblnd_dev_can_failover(dev)) {
3938                                 list_add_tail(&dev->ibd_fail_list,
3939                                               &kiblnd_data.kib_failed_devs);
3940                         }
3941
3942                         continue;
3943                 }
3944
3945                 /* long sleep if no more pending failover */
3946                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3947
3948                 set_current_state(TASK_INTERRUPTIBLE);
3949                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3950                 write_unlock_irqrestore(glock, flags);
3951
3952                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3953                                       cfs_time_seconds(1));
3954                 set_current_state(TASK_RUNNING);
3955                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3956                 write_lock_irqsave(glock, flags);
3957
3958                 if (!long_sleep || rc != 0)
3959                         continue;
3960
3961                 /* have a long sleep, routine check all active devices,
3962                  * we need checking like this because if there is not active
3963                  * connection on the dev and no SEND from local, we may listen
3964                  * on wrong HCA for ever while there is a bonding failover
3965                  */
3966                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3967                         if (kiblnd_dev_can_failover(dev)) {
3968                                 list_add_tail(&dev->ibd_fail_list,
3969                                               &kiblnd_data.kib_failed_devs);
3970                         }
3971                 }
3972         }
3973
3974         write_unlock_irqrestore(glock, flags);
3975
3976         kiblnd_thread_fini();
3977         return 0;
3978 }