Whamcloud - gitweb
c0bc6db03b5c4e8d9ee9d75a293bb54a3f18cd26
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lnet/klnds/o2iblnd/o2iblnd_cb.c
32  *
33  * Author: Eric Barton <eric@bartonsoftware.com>
34  */
35
36 #include "o2iblnd.h"
37
38 #define MAX_CONN_RACES_BEFORE_ABORT 20
39
40 static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
41 static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
42                                        int error);
43 static struct ib_rdma_wr *
44 kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx,
45                                int type, int body_nob, int payload_nob);
46 #define kiblnd_init_tx_msg(ni, tx, type, body) \
47         kiblnd_init_tx_msg_payload(ni, tx, type, body, 0)
48 static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
49                             int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
50 static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
51 static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
52
53 static void kiblnd_unmap_tx(struct kib_tx *tx);
54 static void kiblnd_check_sends_locked(struct kib_conn *conn);
55
56 void
57 kiblnd_tx_done(struct kib_tx *tx)
58 {
59         struct lnet_msg *lntmsg[2];
60         int         rc;
61         int         i;
62
63         LASSERT (!in_interrupt());
64         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
65         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
66         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
67         LASSERT (tx->tx_pool != NULL);
68
69         kiblnd_unmap_tx(tx);
70
71         /* tx may have up to 2 lnet msgs to finalise */
72         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
73         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
74         rc = tx->tx_status;
75
76         if (tx->tx_conn != NULL) {
77                 kiblnd_conn_decref(tx->tx_conn);
78                 tx->tx_conn = NULL;
79         }
80
81         tx->tx_nwrq = tx->tx_nsge = 0;
82         tx->tx_status = 0;
83
84         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
85
86         /* delay finalize until my descs have been freed */
87         for (i = 0; i < 2; i++) {
88                 if (lntmsg[i] == NULL)
89                         continue;
90
91                 /* propagate health status to LNet for requests */
92                 if (i == 0 && lntmsg[i])
93                         lntmsg[i]->msg_health_status = tx->tx_hstatus;
94
95                 lnet_finalize(lntmsg[i], rc);
96         }
97 }
98
99 void
100 kiblnd_txlist_done(struct list_head *txlist, int status,
101                    enum lnet_msg_hstatus hstatus)
102 {
103         struct kib_tx *tx;
104
105         while ((tx = list_first_entry_or_null(txlist,
106                                               struct kib_tx,
107                                               tx_list)) != NULL) {
108                 list_del(&tx->tx_list);
109                 /* complete now */
110                 tx->tx_waiting = 0;
111                 tx->tx_status = status;
112                 if (hstatus != LNET_MSG_STATUS_OK)
113                         tx->tx_hstatus = hstatus;
114                 kiblnd_tx_done(tx);
115         }
116 }
117
118 static struct kib_tx *
119 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
120 {
121         struct kib_net *net = ni->ni_data;
122         struct list_head *node;
123         struct kib_tx *tx;
124         struct kib_tx_poolset *tps;
125
126         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
127         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
128         if (node == NULL)
129                 return NULL;
130         tx = container_of(node, struct kib_tx, tx_list);
131
132         LASSERT (tx->tx_nwrq == 0);
133         LASSERT (!tx->tx_queued);
134         LASSERT (tx->tx_sending == 0);
135         LASSERT (!tx->tx_waiting);
136         LASSERT (tx->tx_status == 0);
137         LASSERT (tx->tx_conn == NULL);
138         LASSERT (tx->tx_lntmsg[0] == NULL);
139         LASSERT (tx->tx_lntmsg[1] == NULL);
140         LASSERT (tx->tx_nfrags == 0);
141
142         tx->tx_gaps = false;
143         tx->tx_hstatus = LNET_MSG_STATUS_OK;
144
145         return tx;
146 }
147
148 static void
149 kiblnd_drop_rx(struct kib_rx *rx)
150 {
151         struct kib_conn *conn = rx->rx_conn;
152         struct kib_sched_info *sched = conn->ibc_sched;
153         unsigned long flags;
154
155         spin_lock_irqsave(&sched->ibs_lock, flags);
156         LASSERT(conn->ibc_nrx > 0);
157         conn->ibc_nrx--;
158         spin_unlock_irqrestore(&sched->ibs_lock, flags);
159
160         kiblnd_conn_decref(conn);
161 }
162
163 int
164 kiblnd_post_rx(struct kib_rx *rx, int credit)
165 {
166         struct kib_conn *conn = rx->rx_conn;
167         struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
168         struct ib_recv_wr *bad_wrq = NULL;
169 #ifdef HAVE_IB_GET_DMA_MR
170         struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
171 #endif
172         int rc;
173
174         LASSERT (net != NULL);
175         LASSERT (!in_interrupt());
176         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
177                  credit == IBLND_POSTRX_PEER_CREDIT ||
178                  credit == IBLND_POSTRX_RSRVD_CREDIT);
179 #ifdef HAVE_IB_GET_DMA_MR
180         LASSERT(mr != NULL);
181
182         rx->rx_sge.lkey   = mr->lkey;
183 #else
184         rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
185 #endif
186         rx->rx_sge.addr   = rx->rx_msgaddr;
187         rx->rx_sge.length = IBLND_MSG_SIZE;
188
189         rx->rx_wrq.next = NULL;
190         rx->rx_wrq.sg_list = &rx->rx_sge;
191         rx->rx_wrq.num_sge = 1;
192         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
193
194         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
195         LASSERT (rx->rx_nob >= 0);              /* not posted */
196
197         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
198                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
199                 return 0;
200         }
201
202         rx->rx_nob = -1;                        /* flag posted */
203
204         /* NB: need an extra reference after ib_post_recv because we don't
205          * own this rx (and rx::rx_conn) anymore, LU-5678.
206          */
207         kiblnd_conn_addref(conn);
208 #ifdef HAVE_IB_POST_SEND_RECV_CONST
209         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
210                           (const struct ib_recv_wr **)&bad_wrq);
211 #else
212         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
213 #endif
214         if (unlikely(rc != 0)) {
215                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
216                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
217                 rx->rx_nob = 0;
218         }
219
220         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
221                 goto out;
222
223         if (unlikely(rc != 0)) {
224                 kiblnd_close_conn(conn, rc);
225                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
226                 goto out;
227         }
228
229         if (credit == IBLND_POSTRX_NO_CREDIT)
230                 goto out;
231
232         spin_lock(&conn->ibc_lock);
233         if (credit == IBLND_POSTRX_PEER_CREDIT)
234                 conn->ibc_outstanding_credits++;
235         else
236                 conn->ibc_reserved_credits++;
237         kiblnd_check_sends_locked(conn);
238         spin_unlock(&conn->ibc_lock);
239
240 out:
241         kiblnd_conn_decref(conn);
242         return rc;
243 }
244
245 static struct kib_tx *
246 kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
247 {
248         struct kib_tx *tx;
249
250         list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
251                 LASSERT(!tx->tx_queued);
252                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
253
254                 if (tx->tx_cookie != cookie)
255                         continue;
256
257                 if (tx->tx_waiting &&
258                     tx->tx_msg->ibm_type == txtype)
259                         return tx;
260
261                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
262                       tx->tx_waiting ? "" : "NOT ",
263                       tx->tx_msg->ibm_type, txtype);
264         }
265         return NULL;
266 }
267
268 static void
269 kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
270 {
271         struct kib_tx *tx;
272         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
273         int idle;
274
275         spin_lock(&conn->ibc_lock);
276
277         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
278         if (tx == NULL) {
279                 spin_unlock(&conn->ibc_lock);
280
281                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
282                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
283                 kiblnd_close_conn(conn, -EPROTO);
284                 return;
285         }
286
287         if (tx->tx_status == 0) {               /* success so far */
288                 if (status < 0) {               /* failed? */
289                         tx->tx_status = status;
290                         tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
291                 } else if (txtype == IBLND_MSG_GET_REQ) {
292                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
293                 }
294         }
295
296         tx->tx_waiting = 0;
297
298         idle = !tx->tx_queued && (tx->tx_sending == 0);
299         if (idle)
300                 list_del(&tx->tx_list);
301
302         spin_unlock(&conn->ibc_lock);
303
304         if (idle)
305                 kiblnd_tx_done(tx);
306 }
307
308 static void
309 kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
310 {
311         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
312         struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
313
314         if (tx == NULL) {
315                 CERROR("Can't get tx for completion %x for %s\n",
316                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
317                 return;
318         }
319
320         tx->tx_msg->ibm_u.completion.ibcm_status = status;
321         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
322         kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
323
324         kiblnd_queue_tx(tx, conn);
325 }
326
327 static void
328 kiblnd_handle_rx(struct kib_rx *rx)
329 {
330         struct kib_msg *msg = rx->rx_msg;
331         struct kib_conn *conn = rx->rx_conn;
332         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
333         int credits = msg->ibm_credits;
334         struct kib_tx *tx;
335         int rc = 0;
336         int rc2;
337         int post_credit;
338         struct lnet_hdr hdr;
339         struct lnet_nid srcnid;
340
341         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
342
343         CDEBUG (D_NET, "Received %x[%d] from %s\n",
344                 msg->ibm_type, credits,
345                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
346
347         if (credits != 0) {
348                 /* Have I received credits that will let me send? */
349                 spin_lock(&conn->ibc_lock);
350
351                 if (conn->ibc_credits + credits >
352                     conn->ibc_queue_depth) {
353                         rc2 = conn->ibc_credits;
354                         spin_unlock(&conn->ibc_lock);
355
356                         CERROR("Bad credits from %s: %d + %d > %d\n",
357                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
358                                rc2, credits,
359                                conn->ibc_queue_depth);
360
361                         kiblnd_close_conn(conn, -EPROTO);
362                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
363                         return;
364                 }
365
366                 conn->ibc_credits += credits;
367
368                 /* This ensures the credit taken by NOOP can be returned */
369                 if (msg->ibm_type == IBLND_MSG_NOOP &&
370                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
371                         conn->ibc_outstanding_credits++;
372
373                 kiblnd_check_sends_locked(conn);
374                 spin_unlock(&conn->ibc_lock);
375         }
376
377         switch (msg->ibm_type) {
378         default:
379                 CERROR("Bad IBLND message type %x from %s\n",
380                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
381                 post_credit = IBLND_POSTRX_NO_CREDIT;
382                 rc = -EPROTO;
383                 break;
384
385         case IBLND_MSG_NOOP:
386                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
387                         post_credit = IBLND_POSTRX_NO_CREDIT;
388                         break;
389                 }
390
391                 if (credits != 0) /* credit already posted */
392                         post_credit = IBLND_POSTRX_NO_CREDIT;
393                 else              /* a keepalive NOOP */
394                         post_credit = IBLND_POSTRX_PEER_CREDIT;
395                 break;
396
397         case IBLND_MSG_IMMEDIATE:
398                 post_credit = IBLND_POSTRX_DONT_POST;
399                 lnet_hdr_from_nid4(&hdr, &msg->ibm_u.immediate.ibim_hdr);
400                 lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
401                 rc = lnet_parse(ni, &hdr, &srcnid, rx, 0);
402                 if (rc < 0)                     /* repost on error */
403                         post_credit = IBLND_POSTRX_PEER_CREDIT;
404                 break;
405
406         case IBLND_MSG_PUT_REQ:
407                 post_credit = IBLND_POSTRX_DONT_POST;
408                 lnet_hdr_from_nid4(&hdr, &msg->ibm_u.putreq.ibprm_hdr);
409                 lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
410                 rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
411                 if (rc < 0)                     /* repost on error */
412                         post_credit = IBLND_POSTRX_PEER_CREDIT;
413                 break;
414
415         case IBLND_MSG_PUT_NAK:
416                 CWARN ("PUT_NACK from %s\n",
417                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
418                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
419                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
420                                          msg->ibm_u.completion.ibcm_status,
421                                          msg->ibm_u.completion.ibcm_cookie);
422                 break;
423
424         case IBLND_MSG_PUT_ACK:
425                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
426
427                 spin_lock(&conn->ibc_lock);
428                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
429                                         msg->ibm_u.putack.ibpam_src_cookie);
430                 if (tx != NULL)
431                         list_del(&tx->tx_list);
432                 spin_unlock(&conn->ibc_lock);
433
434                 if (tx == NULL) {
435                         CERROR("Unmatched PUT_ACK from %s\n",
436                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
437                         rc = -EPROTO;
438                         break;
439                 }
440
441                 LASSERT (tx->tx_waiting);
442                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
443                  * (a) I can overwrite tx_msg since my peer_ni has received it!
444                  * (b) tx_waiting set tells tx_complete() it's not done. */
445
446                 tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
447
448                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
449                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
450                                        &msg->ibm_u.putack.ibpam_rd,
451                                        msg->ibm_u.putack.ibpam_dst_cookie);
452                 if (rc2 < 0)
453                         CERROR("Can't setup rdma for PUT to %s: %d\n",
454                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
455
456                 spin_lock(&conn->ibc_lock);
457                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
458                 kiblnd_queue_tx_locked(tx, conn);
459                 spin_unlock(&conn->ibc_lock);
460                 break;
461
462         case IBLND_MSG_PUT_DONE:
463                 post_credit = IBLND_POSTRX_PEER_CREDIT;
464                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
465                                          msg->ibm_u.completion.ibcm_status,
466                                          msg->ibm_u.completion.ibcm_cookie);
467                 break;
468
469         case IBLND_MSG_GET_REQ:
470                 post_credit = IBLND_POSTRX_DONT_POST;
471                 lnet_hdr_from_nid4(&hdr, &msg->ibm_u.get.ibgm_hdr);
472                 lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
473                 rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
474                 if (rc < 0)                     /* repost on error */
475                         post_credit = IBLND_POSTRX_PEER_CREDIT;
476                 break;
477
478         case IBLND_MSG_GET_DONE:
479                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
480                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
481                                          msg->ibm_u.completion.ibcm_status,
482                                          msg->ibm_u.completion.ibcm_cookie);
483                 break;
484         }
485
486         if (rc < 0)                             /* protocol error */
487                 kiblnd_close_conn(conn, rc);
488
489         if (post_credit != IBLND_POSTRX_DONT_POST)
490                 kiblnd_post_rx(rx, post_credit);
491 }
492
493 static void
494 kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
495 {
496         struct kib_msg *msg = rx->rx_msg;
497         struct kib_conn   *conn = rx->rx_conn;
498         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
499         struct kib_net *net = ni->ni_data;
500         int rc;
501         int err = -EIO;
502
503         LASSERT(net);
504         LASSERT(rx->rx_nob < 0);        /* was posted */
505         rx->rx_nob = 0;                 /* isn't now */
506
507         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
508                 goto ignore;
509
510         if (status != IB_WC_SUCCESS) {
511                 CNETERR("Rx from %s failed: %d\n",
512                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
513                 goto failed;
514         }
515
516         LASSERT(nob >= 0);
517         rx->rx_nob = nob;
518
519         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
520         if (rc != 0) {
521                 CERROR("Error %d unpacking rx from %s\n",
522                        rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
523                 goto failed;
524         }
525
526         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
527             msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
528             msg->ibm_srcstamp != conn->ibc_incarnation ||
529             msg->ibm_dststamp != net->ibn_incarnation) {
530                 CERROR("Stale rx from %s\n",
531                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
532                 err = -ESTALE;
533                 goto failed;
534         }
535
536         /* set time last known alive */
537         kiblnd_peer_alive(conn->ibc_peer);
538
539         /* racing with connection establishment/teardown! */
540
541         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
542                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
543                 unsigned long  flags;
544
545                 write_lock_irqsave(g_lock, flags);
546                 /* must check holding global lock to eliminate race */
547                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
548                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
549                         write_unlock_irqrestore(g_lock, flags);
550                         return;
551                 }
552                 write_unlock_irqrestore(g_lock, flags);
553         }
554         kiblnd_handle_rx(rx);
555         return;
556
557 failed:
558         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
559         kiblnd_close_conn(conn, err);
560 ignore:
561         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
562 }
563
564 static int
565 kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
566                   struct kib_rdma_desc *rd, u32 nob)
567 {
568         struct kib_hca_dev *hdev;
569         struct kib_dev *dev;
570         struct kib_fmr_poolset *fps;
571         int                     cpt;
572         int                     rc;
573         int i;
574
575         LASSERT(tx->tx_pool != NULL);
576         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
577
578         dev = net->ibn_dev;
579         hdev = tx->tx_pool->tpo_hdev;
580         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
581
582         /*
583          * If we're dealing with FastReg, but the device doesn't
584          * support GAPS and the tx has GAPS, then there is no real point
585          * in trying to map the memory, because it'll just fail. So
586          * preemptively fail with an appropriate message
587          */
588         if (IS_FAST_REG_DEV(dev) &&
589             !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
590             tx->tx_gaps) {
591                 CERROR("Using FastReg with no GAPS support, but tx has gaps. "
592                        "Try setting use_fastreg_gaps to 1\n");
593                 return -EPROTONOSUPPORT;
594         }
595
596 #ifdef HAVE_FMR_POOL_API
597         /*
598          * FMR does not support gaps but the tx has gaps then
599          * we should make sure that the number of fragments we'll be sending
600          * over fits within the number of fragments negotiated on the
601          * connection, otherwise, we won't be able to RDMA the data.
602          * We need to maintain the number of fragments negotiation on the
603          * connection for backwards compatibility.
604          */
605         if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
606                 if (tx->tx_conn &&
607                     tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
608                         CERROR("TX number of frags (%d) is <= than connection"
609                                " number of frags (%d). Consider setting peer's"
610                                " map_on_demand to 256\n", tx->tx_nfrags,
611                                tx->tx_conn->ibc_max_frags);
612                         return -EFBIG;
613                 }
614         }
615 #endif
616
617         fps = net->ibn_fmr_ps[cpt];
618         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
619         if (rc != 0) {
620                 CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
621                        tx->tx_nfrags, rd->rd_nfrags, rc);
622                 return rc;
623         }
624
625         /*
626          * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
627          * need the rkey
628          */
629         rd->rd_key = tx->tx_fmr.fmr_key;
630         /*
631          * for FastReg or FMR with no gaps we can accumulate all
632          * the fragments in one FastReg or FMR fragment.
633          */
634         if (
635 #ifdef HAVE_FMR_POOL_API
636             ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
637              && !tx->tx_gaps) ||
638 #endif
639             IS_FAST_REG_DEV(dev)) {
640                 /* FMR requires zero based address */
641 #ifdef HAVE_FMR_POOL_API
642                 if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
643                         rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
644 #endif
645                 rd->rd_frags[0].rf_nob = nob;
646                 rd->rd_nfrags = 1;
647         } else {
648                 /*
649                  * We're transmitting with gaps using FMR.
650                  * We'll need to use multiple fragments and identify the
651                  * zero based address of each fragment.
652                  */
653                 for (i = 0; i < rd->rd_nfrags; i++) {
654                         rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
655                         rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
656                 }
657         }
658
659         return 0;
660 }
661
662 static void
663 kiblnd_unmap_tx(struct kib_tx *tx)
664 {
665         if (
666 #ifdef HAVE_FMR_POOL_API
667                 tx->tx_fmr.fmr_pfmr ||
668 #endif
669                 tx->tx_fmr.fmr_frd)
670                 kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
671
672         if (tx->tx_nfrags != 0) {
673                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev, tx);
674                 tx->tx_nfrags = 0;
675         }
676 }
677
678 #ifdef HAVE_IB_GET_DMA_MR
679 static struct ib_mr *
680 kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
681 {
682         struct kib_net *net = ni->ni_data;
683         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
684         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
685
686         tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
687
688         /*
689          * if map-on-demand is turned on and the device supports
690          * either FMR or FastReg then use that. Otherwise use global
691          * memory regions. If that's not available either, then you're
692          * dead in the water and fail the operation.
693          */
694         if (tunables->lnd_map_on_demand && (IS_FAST_REG_DEV(net->ibn_dev)
695 #ifdef HAVE_FMR_POOL_API
696              || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
697 #endif
698         ))
699                 return NULL;
700
701         /*
702          * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
703          * in the call chain. The mapping will fail with appropriate error
704          * message.
705          */
706         return hdev->ibh_mrs;
707 }
708 #endif
709
710 static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
711                          struct kib_rdma_desc *rd, int nfrags)
712 {
713         struct kib_net *net = ni->ni_data;
714         struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
715 #ifdef HAVE_IB_GET_DMA_MR
716         struct ib_mr *mr = NULL;
717 #endif
718         __u32 nob;
719         int i;
720
721         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
722          * RDMA sink */
723         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
724         tx->tx_nfrags = nfrags;
725
726         rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx);
727         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
728                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
729                         hdev->ibh_ibdev, &tx->tx_frags[i]);
730                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
731                         hdev->ibh_ibdev, &tx->tx_frags[i]);
732                 nob += rd->rd_frags[i].rf_nob;
733         }
734
735 #ifdef HAVE_IB_GET_DMA_MR
736         mr = kiblnd_find_rd_dma_mr(ni, rd);
737         if (mr != NULL) {
738                 /* found pre-mapping MR */
739                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
740                 return 0;
741         }
742 #endif
743
744         if (net->ibn_fmr_ps != NULL)
745                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
746
747         return -EINVAL;
748 }
749
750 static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
751                                 struct kib_rdma_desc *rd, int nkiov,
752                                 struct bio_vec *kiov, int offset, int nob)
753 {
754         struct kib_net *net = ni->ni_data;
755         struct scatterlist *sg;
756         int fragnob;
757         int max_nkiov;
758         int sg_count = 0;
759
760         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
761
762         LASSERT(nob > 0);
763         LASSERT(nkiov > 0);
764         LASSERT(net != NULL);
765
766         while (offset >= kiov->bv_len) {
767                 offset -= kiov->bv_len;
768                 nkiov--;
769                 kiov++;
770                 LASSERT(nkiov > 0);
771         }
772
773         max_nkiov = nkiov;
774
775         sg = tx->tx_frags;
776         do {
777                 LASSERT(nkiov > 0);
778
779                 if (!sg) {
780                         CERROR("lacking enough sg entries to map tx\n");
781                         return -EFAULT;
782                 }
783                 sg_count++;
784
785                 fragnob = min((int)(kiov->bv_len - offset), nob);
786
787                 /*
788                  * We're allowed to start at a non-aligned page offset in
789                  * the first fragment and end at a non-aligned page offset
790                  * in the last fragment.
791                  */
792                 if ((fragnob < (int)(kiov->bv_len - offset)) &&
793                     nkiov < max_nkiov && nob > fragnob) {
794                         CDEBUG(D_NET, "fragnob %d < available page %d: with"
795                                       " remaining %d kiovs with %d nob left\n",
796                                fragnob, (int)(kiov->bv_len - offset),
797                                nkiov, nob);
798                         tx->tx_gaps = true;
799                 }
800
801                 sg_set_page(sg, kiov->bv_page, fragnob,
802                             kiov->bv_offset + offset);
803                 sg = sg_next(sg);
804
805                 offset = 0;
806                 kiov++;
807                 nkiov--;
808                 nob -= fragnob;
809         } while (nob > 0);
810
811         return kiblnd_map_tx(ni, tx, rd, sg_count);
812 }
813
814 static int
815 kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
816 __must_hold(&conn->ibc_lock)
817 {
818         struct kib_msg *msg = tx->tx_msg;
819         struct kib_peer_ni *peer_ni = conn->ibc_peer;
820         struct lnet_ni *ni = peer_ni->ibp_ni;
821         struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
822         int ver = conn->ibc_version;
823         int rc;
824         int done;
825
826         LASSERT(tx->tx_queued);
827         /* We rely on this for QP sizing */
828         LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
829         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
830
831         LASSERT(credit == 0 || credit == 1);
832         LASSERT(conn->ibc_outstanding_credits >= 0);
833         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
834         LASSERT(conn->ibc_credits >= 0);
835         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
836
837         if (conn->ibc_nsends_posted ==
838             kiblnd_concurrent_sends(ver, ni)) {
839                 /* tx completions outstanding... */
840                 CDEBUG(D_NET, "%s: posted enough\n",
841                        libcfs_nid2str(peer_ni->ibp_nid));
842                 return -EAGAIN;
843         }
844
845         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
846                 CDEBUG(D_NET, "%s: no credits\n",
847                        libcfs_nid2str(peer_ni->ibp_nid));
848                 return -EAGAIN;
849         }
850
851         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
852             conn->ibc_credits == 1 &&   /* last credit reserved */
853             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
854                 CDEBUG(D_NET, "%s: not using last credit\n",
855                        libcfs_nid2str(peer_ni->ibp_nid));
856                 return -EAGAIN;
857         }
858
859         /* NB don't drop ibc_lock before bumping tx_sending */
860         list_del(&tx->tx_list);
861         tx->tx_queued = 0;
862
863         if (msg->ibm_type == IBLND_MSG_NOOP &&
864             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
865              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
866               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
867                 /* OK to drop when posted enough NOOPs, since
868                  * kiblnd_check_sends_locked will queue NOOP again when
869                  * posted NOOPs complete */
870                 spin_unlock(&conn->ibc_lock);
871                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
872                 kiblnd_tx_done(tx);
873                 spin_lock(&conn->ibc_lock);
874                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
875                        libcfs_nid2str(peer_ni->ibp_nid),
876                        conn->ibc_noops_posted);
877                 return 0;
878         }
879
880         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
881                         peer_ni->ibp_nid, conn->ibc_incarnation);
882
883         conn->ibc_credits -= credit;
884         conn->ibc_outstanding_credits = 0;
885         conn->ibc_nsends_posted++;
886         if (msg->ibm_type == IBLND_MSG_NOOP)
887                 conn->ibc_noops_posted++;
888
889         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
890          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
891          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
892          * and then re-queued here.  It's (just) possible that
893          * tx_sending is non-zero if we've not done the tx_complete()
894          * from the first send; hence the ++ rather than = below. */
895         tx->tx_sending++;
896         list_add(&tx->tx_list, &conn->ibc_active_txs);
897
898         /* I'm still holding ibc_lock! */
899         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
900                 rc = -ECONNABORTED;
901         } else if (tx->tx_pool->tpo_pool.po_failed ||
902                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
903                 /* close_conn will launch failover */
904                 rc = -ENETDOWN;
905         } else {
906                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
907                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
908
909                 if (frd != NULL && !frd->frd_posted) {
910                         if (!frd->frd_valid) {
911                                 wr = &frd->frd_inv_wr.wr;
912                                 wr->next = &frd->frd_fastreg_wr.wr;
913                         } else {
914                                 wr = &frd->frd_fastreg_wr.wr;
915                         }
916                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
917                 }
918
919                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
920                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
921                          bad->wr_id, bad->opcode, bad->send_flags,
922                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
923
924                 bad = NULL;
925                 if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
926                         rc = -EINVAL;
927                 else
928 #ifdef HAVE_IB_POST_SEND_RECV_CONST
929                         rc = ib_post_send(conn->ibc_cmid->qp, wr,
930                                           (const struct ib_send_wr **)&bad);
931 #else
932                         rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
933 #endif
934         }
935
936         conn->ibc_last_send = ktime_get();
937
938         if (rc == 0) {
939                 if (frd != NULL)
940                         frd->frd_posted = true;
941                 return 0;
942         }
943
944         /* NB credits are transferred in the actual
945          * message, which can only be the last work item */
946         conn->ibc_credits += credit;
947         conn->ibc_outstanding_credits += msg->ibm_credits;
948         conn->ibc_nsends_posted--;
949         if (msg->ibm_type == IBLND_MSG_NOOP)
950                 conn->ibc_noops_posted--;
951
952         tx->tx_status = rc;
953         tx->tx_waiting = 0;
954         tx->tx_sending--;
955
956         done = (tx->tx_sending == 0);
957         if (done)
958                 list_del(&tx->tx_list);
959
960         spin_unlock(&conn->ibc_lock);
961
962         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
963                 CERROR("Error %d posting transmit to %s\n",
964                        rc, libcfs_nid2str(peer_ni->ibp_nid));
965         else
966                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
967                        rc, libcfs_nid2str(peer_ni->ibp_nid));
968
969         kiblnd_close_conn(conn, rc);
970
971         if (done)
972                 kiblnd_tx_done(tx);
973
974         spin_lock(&conn->ibc_lock);
975
976         return -EIO;
977 }
978
979 static void
980 kiblnd_check_sends_locked(struct kib_conn *conn)
981 {
982         int ver = conn->ibc_version;
983         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
984         struct kib_tx *tx;
985
986         /* Don't send anything until after the connection is established */
987         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
988                 CDEBUG(D_NET, "%s too soon\n",
989                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
990                 return;
991         }
992
993         LASSERT(conn->ibc_nsends_posted <=
994                 kiblnd_concurrent_sends(ver, ni));
995         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
996                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
997         LASSERT (conn->ibc_reserved_credits >= 0);
998
999         while (conn->ibc_reserved_credits > 0 &&
1000                (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
1001                                               struct kib_tx, tx_list)) != NULL) {
1002                 list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
1003                 conn->ibc_reserved_credits--;
1004         }
1005
1006         if (kiblnd_need_noop(conn)) {
1007                 spin_unlock(&conn->ibc_lock);
1008
1009                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1010                 if (tx != NULL)
1011                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
1012
1013                 spin_lock(&conn->ibc_lock);
1014                 if (tx != NULL)
1015                         kiblnd_queue_tx_locked(tx, conn);
1016         }
1017
1018         for (;;) {
1019                 int credit;
1020
1021                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
1022                         credit = 0;
1023                         tx = list_first_entry(&conn->ibc_tx_queue_nocred,
1024                                               struct kib_tx, tx_list);
1025                 } else if (!list_empty(&conn->ibc_tx_noops)) {
1026                         LASSERT (!IBLND_OOB_CAPABLE(ver));
1027                         credit = 1;
1028                         tx = list_first_entry(&conn->ibc_tx_noops,
1029                                               struct kib_tx, tx_list);
1030                 } else if (!list_empty(&conn->ibc_tx_queue)) {
1031                         credit = 1;
1032                         tx = list_first_entry(&conn->ibc_tx_queue,
1033                                               struct kib_tx, tx_list);
1034                 } else
1035                         break;
1036
1037                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
1038                         break;
1039         }
1040 }
1041
1042 static void
1043 kiblnd_tx_complete(struct kib_tx *tx, int status)
1044 {
1045         int           failed = (status != IB_WC_SUCCESS);
1046         struct kib_conn   *conn = tx->tx_conn;
1047         int           idle;
1048
1049         if (tx->tx_sending <= 0) {
1050                 CERROR("Received an event on a freed tx: %p status %d\n",
1051                        tx, tx->tx_status);
1052                 return;
1053         }
1054
1055         if (failed) {
1056                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
1057                         CNETERR("Tx -> %s cookie %#llx"
1058                                 " sending %d waiting %d: failed %d\n",
1059                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1060                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
1061                                 status);
1062
1063                 kiblnd_close_conn(conn, -EIO);
1064         } else {
1065                 kiblnd_peer_alive(conn->ibc_peer);
1066         }
1067
1068         spin_lock(&conn->ibc_lock);
1069
1070         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
1071          * gets to free it, which also drops its ref on 'conn'. */
1072
1073         tx->tx_sending--;
1074         conn->ibc_nsends_posted--;
1075         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1076                 conn->ibc_noops_posted--;
1077
1078         if (failed) {
1079                 tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
1080                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
1081                 tx->tx_status = -EIO;
1082         }
1083
1084         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1085                !tx->tx_waiting &&               /* Not waiting for peer_ni */
1086                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1087         if (idle)
1088                 list_del(&tx->tx_list);
1089
1090         kiblnd_check_sends_locked(conn);
1091         spin_unlock(&conn->ibc_lock);
1092
1093         if (idle)
1094                 kiblnd_tx_done(tx);
1095 }
1096
1097
1098 static void
1099 kiblnd_init_tx_sge(struct kib_tx *tx, u64 addr, unsigned int len)
1100 {
1101         struct ib_sge *sge = &tx->tx_sge[tx->tx_nsge];
1102         struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
1103 #ifdef HAVE_IB_GET_DMA_MR
1104         struct ib_mr *mr = hdev->ibh_mrs;
1105 #endif
1106
1107         *sge = (struct ib_sge) {
1108 #ifdef HAVE_IB_GET_DMA_MR
1109                 .lkey   = mr->lkey,
1110 #else
1111                 .lkey   = hdev->ibh_pd->local_dma_lkey,
1112 #endif
1113                 .addr   = addr,
1114                 .length = len,
1115         };
1116
1117         tx->tx_nsge++;
1118 }
1119
1120 static struct ib_rdma_wr *
1121 kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx, int type,
1122                    int body_nob, int payload)
1123 {
1124         struct ib_rdma_wr *wrq;
1125         int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
1126
1127         LASSERT(tx->tx_nwrq >= 0);
1128         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1129         LASSERT(nob <= IBLND_MSG_SIZE);
1130
1131         kiblnd_init_msg(tx->tx_msg, type, body_nob + payload);
1132
1133         wrq = &tx->tx_wrq[tx->tx_nwrq];
1134
1135         *wrq = (struct ib_rdma_wr) {
1136                 .wr = {
1137                         .wr_id          = kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
1138                         .num_sge        = 1,
1139                         .sg_list        = &tx->tx_sge[tx->tx_nsge],
1140                         .opcode         = IB_WR_SEND,
1141                         .send_flags     = IB_SEND_SIGNALED,
1142                 },
1143         };
1144
1145         kiblnd_init_tx_sge(tx, tx->tx_msgaddr, nob);
1146
1147         tx->tx_nwrq++;
1148         return wrq;
1149 }
1150
1151 static int
1152 kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
1153                  int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
1154 {
1155         struct kib_msg *ibmsg = tx->tx_msg;
1156         struct kib_rdma_desc *srcrd = tx->tx_rd;
1157         struct ib_rdma_wr *wrq = NULL;
1158         struct ib_sge     *sge;
1159         int                rc  = resid;
1160         int                srcidx;
1161         int                dstidx;
1162         int                sge_nob;
1163         int                wrq_sge;
1164
1165         LASSERT(!in_interrupt());
1166         LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
1167         LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
1168
1169         for (srcidx = dstidx = wrq_sge = sge_nob = 0;
1170              resid > 0; resid -= sge_nob) {
1171                 int     prev = dstidx;
1172
1173                 if (srcidx >= srcrd->rd_nfrags) {
1174                         CERROR("Src buffer exhausted: %d frags %px\n",
1175                                 srcidx, tx);
1176                         rc = -EPROTO;
1177                         break;
1178                 }
1179
1180                 if (dstidx >= dstrd->rd_nfrags) {
1181                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1182                         rc = -EPROTO;
1183                         break;
1184                 }
1185
1186                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1187                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1188                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1189                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1190                                conn->ibc_max_frags,
1191                                srcidx, srcrd->rd_nfrags,
1192                                dstidx, dstrd->rd_nfrags);
1193                         rc = -EMSGSIZE;
1194                         break;
1195                 }
1196
1197                 sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
1198                                kiblnd_rd_frag_size(dstrd, dstidx),
1199                                resid);
1200
1201                 sge = &tx->tx_sge[tx->tx_nsge];
1202                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1203                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1204                 sge->length = sge_nob;
1205
1206                 if (wrq_sge == 0) {
1207                         wrq = &tx->tx_wrq[tx->tx_nwrq];
1208
1209                         wrq->wr.next    = &(wrq + 1)->wr;
1210                         wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1211                         wrq->wr.sg_list = sge;
1212                         wrq->wr.opcode  = IB_WR_RDMA_WRITE;
1213                         wrq->wr.send_flags = 0;
1214
1215 #ifdef HAVE_IB_RDMA_WR
1216                         wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
1217                                                                       dstidx);
1218                         wrq->rkey               = kiblnd_rd_frag_key(dstrd,
1219                                                                      dstidx);
1220 #else
1221                         wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
1222                                                                         dstidx);
1223                         wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
1224                                                                      dstidx);
1225 #endif
1226                 }
1227
1228                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
1229                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
1230
1231                 wrq_sge++;
1232                 if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
1233                         tx->tx_nwrq++;
1234                         wrq->wr.num_sge = wrq_sge;
1235                         wrq_sge = 0;
1236                 }
1237                 tx->tx_nsge++;
1238         }
1239
1240         if (rc < 0)     /* no RDMA if completing with failure */
1241                 tx->tx_nwrq = tx->tx_nsge = 0;
1242
1243         ibmsg->ibm_u.completion.ibcm_status = rc;
1244         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1245         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1246                            type, sizeof(struct kib_completion_msg));
1247
1248         return rc;
1249 }
1250
1251 static void
1252 kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
1253 {
1254         struct list_head *q;
1255         s64 timeout_ns;
1256
1257         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1258         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1259         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1260
1261         if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
1262                 tx->tx_status = -ECONNABORTED;
1263                 tx->tx_waiting = 0;
1264                 if (tx->tx_conn != NULL) {
1265                         /* PUT_DONE first attached to conn as a PUT_REQ */
1266                         LASSERT(tx->tx_conn == conn);
1267                         LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1268                         tx->tx_conn = NULL;
1269                         kiblnd_conn_decref(conn);
1270                 }
1271                 list_add(&tx->tx_list, &conn->ibc_zombie_txs);
1272
1273                 return;
1274         }
1275
1276         timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
1277         tx->tx_queued = 1;
1278         tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
1279
1280         if (tx->tx_conn == NULL) {
1281                 kiblnd_conn_addref(conn);
1282                 tx->tx_conn = conn;
1283                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1284         } else {
1285                 /* PUT_DONE first attached to conn as a PUT_REQ */
1286                 LASSERT (tx->tx_conn == conn);
1287                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1288         }
1289
1290         switch (tx->tx_msg->ibm_type) {
1291         default:
1292                 LBUG();
1293
1294         case IBLND_MSG_PUT_REQ:
1295         case IBLND_MSG_GET_REQ:
1296                 q = &conn->ibc_tx_queue_rsrvd;
1297                 break;
1298
1299         case IBLND_MSG_PUT_NAK:
1300         case IBLND_MSG_PUT_ACK:
1301         case IBLND_MSG_PUT_DONE:
1302         case IBLND_MSG_GET_DONE:
1303                 q = &conn->ibc_tx_queue_nocred;
1304                 break;
1305
1306         case IBLND_MSG_NOOP:
1307                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1308                         q = &conn->ibc_tx_queue_nocred;
1309                 else
1310                         q = &conn->ibc_tx_noops;
1311                 break;
1312
1313         case IBLND_MSG_IMMEDIATE:
1314                 q = &conn->ibc_tx_queue;
1315                 break;
1316         }
1317
1318         list_add_tail(&tx->tx_list, q);
1319 }
1320
1321 static void
1322 kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
1323 {
1324         spin_lock(&conn->ibc_lock);
1325         kiblnd_queue_tx_locked(tx, conn);
1326         kiblnd_check_sends_locked(conn);
1327         spin_unlock(&conn->ibc_lock);
1328 }
1329
1330 static int
1331 kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
1332                         struct sockaddr_in *srcaddr,
1333                         struct sockaddr_in *dstaddr,
1334                         int timeout_ms)
1335 {
1336         unsigned short port;
1337         int rc;
1338
1339         /* allow the port to be reused */
1340         rc = rdma_set_reuseaddr(cmid, 1);
1341         if (rc != 0) {
1342                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1343                 return rc;
1344         }
1345
1346         /* look for a free privileged port */
1347         for (port = PROT_SOCK-1; port > 0; port--) {
1348                 srcaddr->sin_port = htons(port);
1349                 rc = rdma_resolve_addr(cmid,
1350                                        (struct sockaddr *)srcaddr,
1351                                        (struct sockaddr *)dstaddr,
1352                                        timeout_ms);
1353                 if (rc == 0) {
1354                         CDEBUG(D_NET, "bound to port %hu\n", port);
1355                         return 0;
1356                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1357                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1358                                port, rc);
1359                 } else {
1360                         return rc;
1361                 }
1362         }
1363
1364         CERROR("cannot bind to a free privileged port: rc = %d\n", rc);
1365
1366         return rc;
1367 }
1368
1369 static int
1370 kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1371                     struct sockaddr_in *srcaddr,
1372                     struct sockaddr_in *dstaddr,
1373                     int timeout_ms)
1374 {
1375         const struct cred *old_creds = NULL;
1376         struct cred *new_creds;
1377         int rc;
1378
1379         if (!capable(CAP_NET_BIND_SERVICE)) {
1380                 new_creds = prepare_kernel_cred(NULL);
1381                 if (!new_creds)
1382                         return -ENOMEM;
1383
1384                 cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
1385                 old_creds = override_creds(new_creds);
1386         }
1387
1388         rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
1389
1390         if (old_creds)
1391                 revert_creds(old_creds);
1392
1393         return rc;
1394 }
1395
1396 static void
1397 kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
1398 {
1399         struct rdma_cm_id *cmid;
1400         struct kib_dev *dev;
1401         struct kib_net *net = peer_ni->ibp_ni->ni_data;
1402         struct sockaddr_in srcaddr;
1403         struct sockaddr_in dstaddr;
1404         int rc;
1405
1406         LASSERT (net != NULL);
1407         LASSERT (peer_ni->ibp_connecting > 0);
1408
1409         cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns,
1410                                      kiblnd_cm_callback, peer_ni,
1411                                      RDMA_PS_TCP, IB_QPT_RC);
1412
1413         if (IS_ERR(cmid)) {
1414                 CERROR("Can't create CMID for %s: %ld\n",
1415                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1416                 rc = PTR_ERR(cmid);
1417                 goto failed;
1418         }
1419
1420         dev = net->ibn_dev;
1421         memset(&srcaddr, 0, sizeof(srcaddr));
1422         srcaddr.sin_family = AF_INET;
1423         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1424
1425         memset(&dstaddr, 0, sizeof(dstaddr));
1426         dstaddr.sin_family = AF_INET;
1427         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1428         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1429
1430         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1431
1432         if (*kiblnd_tunables.kib_use_priv_port) {
1433                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1434                                          kiblnd_timeout() * 1000);
1435         } else {
1436                 rc = rdma_resolve_addr(cmid,
1437                                        (struct sockaddr *)&srcaddr,
1438                                        (struct sockaddr *)&dstaddr,
1439                                        kiblnd_timeout() * 1000);
1440         }
1441         if (rc != 0) {
1442                 /* Can't initiate address resolution:  */
1443                 CERROR("Can't resolve addr for %s: %d\n",
1444                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1445                 goto failed2;
1446         }
1447
1448         return;
1449
1450  failed2:
1451         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1452         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1453         rdma_destroy_id(cmid);
1454         return;
1455  failed:
1456         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1457 }
1458
1459 bool
1460 kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
1461 {
1462         rwlock_t *glock = &kiblnd_data.kib_global_lock;
1463         char *reason = NULL;
1464         LIST_HEAD(txs);
1465         unsigned long flags;
1466
1467         write_lock_irqsave(glock, flags);
1468         if (peer_ni->ibp_reconnecting == 0) {
1469                 if (peer_ni->ibp_accepting)
1470                         reason = "accepting";
1471                 else if (peer_ni->ibp_connecting)
1472                         reason = "connecting";
1473                 else if (!list_empty(&peer_ni->ibp_conns))
1474                         reason = "connected";
1475                 else /* connected then closed */
1476                         reason = "closed";
1477
1478                 goto no_reconnect;
1479         }
1480
1481         if (peer_ni->ibp_accepting)
1482                 CNETERR("Detecting race between accepting and reconnecting\n");
1483         peer_ni->ibp_reconnecting--;
1484
1485         if (!kiblnd_peer_active(peer_ni)) {
1486                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1487                 reason = "unlinked";
1488                 goto no_reconnect;
1489         }
1490
1491         peer_ni->ibp_connecting++;
1492         peer_ni->ibp_reconnected++;
1493
1494         write_unlock_irqrestore(glock, flags);
1495
1496         kiblnd_connect_peer(peer_ni);
1497         return true;
1498
1499  no_reconnect:
1500         write_unlock_irqrestore(glock, flags);
1501
1502         CWARN("Abort reconnection of %s: %s\n",
1503               libcfs_nid2str(peer_ni->ibp_nid), reason);
1504         kiblnd_txlist_done(&txs, -ECONNABORTED,
1505                            LNET_MSG_STATUS_LOCAL_ABORTED);
1506         return false;
1507 }
1508
1509 void
1510 kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
1511 {
1512         struct kib_peer_ni *peer_ni;
1513         struct kib_peer_ni *peer2;
1514         struct kib_conn *conn;
1515         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1516         unsigned long flags;
1517         int rc;
1518         int i;
1519         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1520
1521         /* If I get here, I've committed to send, so I complete the tx with
1522          * failure on any problems
1523          */
1524
1525         LASSERT(!tx || !tx->tx_conn);     /* only set when assigned a conn */
1526         LASSERT(!tx || tx->tx_nwrq > 0);  /* work items have been set up */
1527
1528         /* First time, just use a read lock since I expect to find my peer_ni
1529          * connected
1530          */
1531         read_lock_irqsave(g_lock, flags);
1532
1533         peer_ni = kiblnd_find_peer_locked(ni, nid);
1534         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1535                 /* Found a peer_ni with an established connection */
1536                 conn = kiblnd_get_conn_locked(peer_ni);
1537                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1538
1539                 read_unlock_irqrestore(g_lock, flags);
1540
1541                 if (tx != NULL)
1542                         kiblnd_queue_tx(tx, conn);
1543                 kiblnd_conn_decref(conn); /* ...to here */
1544                 return;
1545         }
1546
1547         read_unlock(g_lock);
1548         /* Re-try with a write lock */
1549         write_lock(g_lock);
1550
1551         peer_ni = kiblnd_find_peer_locked(ni, nid);
1552         if (peer_ni != NULL) {
1553                 if (list_empty(&peer_ni->ibp_conns)) {
1554                         /* found a peer_ni, but it's still connecting... */
1555                         LASSERT(kiblnd_peer_connecting(peer_ni));
1556                         if (tx != NULL)
1557                                 list_add_tail(&tx->tx_list,
1558                                               &peer_ni->ibp_tx_queue);
1559                         write_unlock_irqrestore(g_lock, flags);
1560                 } else {
1561                         conn = kiblnd_get_conn_locked(peer_ni);
1562                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1563
1564                         write_unlock_irqrestore(g_lock, flags);
1565
1566                         if (tx != NULL)
1567                                 kiblnd_queue_tx(tx, conn);
1568                         kiblnd_conn_decref(conn); /* ...to here */
1569                 }
1570                 return;
1571         }
1572
1573         write_unlock_irqrestore(g_lock, flags);
1574
1575         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1576         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1577         if (rc != 0) {
1578                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1579                 if (tx != NULL) {
1580                         tx->tx_status = -EHOSTUNREACH;
1581                         tx->tx_waiting = 0;
1582                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1583                         kiblnd_tx_done(tx);
1584                 }
1585                 return;
1586         }
1587
1588         write_lock_irqsave(g_lock, flags);
1589
1590         peer2 = kiblnd_find_peer_locked(ni, nid);
1591         if (peer2 != NULL) {
1592                 if (list_empty(&peer2->ibp_conns)) {
1593                         /* found a peer_ni, but it's still connecting... */
1594                         LASSERT(kiblnd_peer_connecting(peer2));
1595                         if (tx != NULL)
1596                                 list_add_tail(&tx->tx_list,
1597                                               &peer2->ibp_tx_queue);
1598                         write_unlock_irqrestore(g_lock, flags);
1599                 } else {
1600                         conn = kiblnd_get_conn_locked(peer2);
1601                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1602
1603                         write_unlock_irqrestore(g_lock, flags);
1604
1605                         if (tx != NULL)
1606                                 kiblnd_queue_tx(tx, conn);
1607                         kiblnd_conn_decref(conn); /* ...to here */
1608                 }
1609
1610                 kiblnd_peer_decref(peer_ni);
1611                 return;
1612         }
1613
1614         /* Brand new peer_ni */
1615         LASSERT(peer_ni->ibp_connecting == 0);
1616         tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
1617         peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
1618
1619         /* always called with a ref on ni, which prevents ni being shutdown */
1620         LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
1621
1622         if (tx != NULL)
1623                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1624
1625         kiblnd_peer_addref(peer_ni);
1626         hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
1627
1628         write_unlock_irqrestore(g_lock, flags);
1629
1630         for (i = 0; i < tunables->lnd_conns_per_peer; i++)
1631                 kiblnd_connect_peer(peer_ni);
1632         kiblnd_peer_decref(peer_ni);
1633 }
1634
1635 int
1636 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1637 {
1638         struct kib_dev *dev = ((struct kib_net *)ni->ni_data)->ibn_dev;
1639         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1640         int type = lntmsg->msg_type;
1641         struct lnet_processid *target = &lntmsg->msg_target;
1642         int target_is_router = lntmsg->msg_target_is_router;
1643         int routing = lntmsg->msg_routing;
1644         unsigned int payload_niov = lntmsg->msg_niov;
1645         struct bio_vec *payload_kiov = lntmsg->msg_kiov;
1646         unsigned int payload_offset = lntmsg->msg_offset;
1647         unsigned int payload_nob = lntmsg->msg_len;
1648         struct lnet_libmd *msg_md = lntmsg->msg_md;
1649         bool gpu;
1650         struct kib_msg *ibmsg;
1651         struct kib_rdma_desc *rd;
1652         struct kib_tx *tx;
1653         int nob;
1654         int rc;
1655
1656         /* NB 'private' is different depending on what we're sending.... */
1657
1658         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1659                payload_nob, payload_niov, libcfs_idstr(target));
1660
1661         LASSERT(payload_nob == 0 || payload_niov > 0);
1662         LASSERT(payload_niov <= LNET_MAX_IOV);
1663
1664         /* Thread context */
1665         LASSERT(!in_interrupt());
1666
1667         tx = kiblnd_get_idle_tx(ni, lnet_nid_to_nid4(&target->nid));
1668         if (tx == NULL) {
1669                 CERROR("Can't allocate %s txd for %s\n",
1670                         lnet_msgtyp2str(type),
1671                         libcfs_nidstr(&target->nid));
1672                 return -ENOMEM;
1673         }
1674         ibmsg = tx->tx_msg;
1675         gpu = msg_md ? (msg_md->md_flags & LNET_MD_FLAG_GPU) : false;
1676
1677         switch (type) {
1678         default:
1679                 LBUG();
1680                 return (-EIO);
1681
1682         case LNET_MSG_ACK:
1683                 LASSERT(payload_nob == 0);
1684                 break;
1685
1686         case LNET_MSG_GET:
1687                 if (routing || target_is_router)
1688                         break;                  /* send IMMEDIATE */
1689
1690                 /* is the REPLY message too small for RDMA? */
1691                 nob = offsetof(struct kib_msg,
1692                                ibm_u.immediate.ibim_payload[msg_md->md_length]);
1693                 if (nob <= IBLND_MSG_SIZE && !gpu)
1694                         break;                  /* send IMMEDIATE */
1695
1696                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1697                 tx->tx_gpu = gpu;
1698                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1699                                           msg_md->md_niov,
1700                                           msg_md->md_kiov,
1701                                           0, msg_md->md_length);
1702                 if (rc != 0) {
1703                         CERROR("Can't setup GET sink for %s: %d\n",
1704                                libcfs_nidstr(&target->nid), rc);
1705                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1706                         kiblnd_tx_done(tx);
1707                         return -EIO;
1708                 }
1709
1710                 nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
1711                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1712                 lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.get.ibgm_hdr);
1713
1714                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1715
1716                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1717                 if (tx->tx_lntmsg[1] == NULL) {
1718                         CERROR("Can't create reply for GET -> %s\n",
1719                                libcfs_nidstr(&target->nid));
1720                         kiblnd_tx_done(tx);
1721                         return -EIO;
1722                 }
1723
1724                 /* finalise lntmsg[0,1] on completion */
1725                 tx->tx_lntmsg[0] = lntmsg;
1726                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1727                 kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
1728                 return 0;
1729
1730         case LNET_MSG_REPLY:
1731         case LNET_MSG_PUT:
1732                 /* Is the payload small enough not to need RDMA? */
1733                 nob = offsetof(struct kib_msg,
1734                                 ibm_u.immediate.ibim_payload[payload_nob]);
1735                 if (nob <= IBLND_MSG_SIZE && !gpu)
1736                         break;                  /* send IMMEDIATE */
1737
1738                 tx->tx_gpu = gpu;
1739
1740                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1741                                           payload_niov, payload_kiov,
1742                                           payload_offset, payload_nob);
1743                 if (rc != 0) {
1744                         CERROR("Can't setup PUT src for %s: %d\n",
1745                                libcfs_nidstr(&target->nid), rc);
1746                         kiblnd_tx_done(tx);
1747                         return -EIO;
1748                 }
1749
1750                 lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.putreq.ibprm_hdr);
1751                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1752                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
1753                                    sizeof(struct kib_putreq_msg));
1754
1755                 /* finalise lntmsg[0,1] on completion */
1756                 tx->tx_lntmsg[0] = lntmsg;
1757                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1758                 kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
1759                 return 0;
1760         }
1761
1762         /* send IMMEDIATE */
1763         LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
1764                 <= IBLND_MSG_SIZE);
1765
1766         ibmsg = tx->tx_msg;
1767         lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.immediate.ibim_hdr);
1768
1769         if (IS_FAST_REG_DEV(dev) && payload_nob)  {
1770                 struct ib_rdma_wr *wrq;
1771                 int i;
1772
1773                 nob = offsetof(struct kib_immediate_msg, ibim_payload[0]);
1774                 wrq = kiblnd_init_tx_msg_payload(ni, tx, IBLND_MSG_IMMEDIATE,
1775                                                  nob, payload_nob);
1776
1777                 rd = tx->tx_rd;
1778                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1779                                           payload_niov, payload_kiov,
1780                                           payload_offset, payload_nob);
1781                 if (rc != 0) {
1782                         CERROR("Can't setup IMMEDIATE src for %s: %d\n",
1783                                libcfs_nidstr(&target->nid), rc);
1784                         kiblnd_tx_done(tx);
1785                         return -EIO;
1786                 }
1787
1788                 /* lets generate a SGE chain */
1789                 for (i = 0; i < rd->rd_nfrags; i++) {
1790                         kiblnd_init_tx_sge(tx, rd->rd_frags[i].rf_addr,
1791                                            rd->rd_frags[i].rf_nob);
1792                         wrq->wr.num_sge++;
1793                 }
1794         } else {
1795                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1796                                     offsetof(struct kib_msg,
1797                                              ibm_u.immediate.ibim_payload),
1798                                     payload_niov, payload_kiov,
1799                                     payload_offset, payload_nob);
1800
1801                 nob = offsetof(struct kib_immediate_msg,
1802                                ibim_payload[payload_nob]);
1803
1804                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1805         }
1806
1807         /* finalise lntmsg on completion */
1808         tx->tx_lntmsg[0] = lntmsg;
1809
1810         kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
1811         return 0;
1812 }
1813
1814 static void
1815 kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
1816 {
1817         struct lnet_processid *target = &lntmsg->msg_target;
1818         unsigned int niov = lntmsg->msg_niov;
1819         struct bio_vec *kiov = lntmsg->msg_kiov;
1820         unsigned int offset = lntmsg->msg_offset;
1821         unsigned int nob = lntmsg->msg_len;
1822         struct lnet_libmd *payload_md = lntmsg->msg_md;
1823         struct kib_tx *tx;
1824         int rc;
1825
1826         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1827         if (tx == NULL) {
1828                 CERROR("Can't get tx for REPLY to %s\n",
1829                        libcfs_nidstr(&target->nid));
1830                 goto failed_0;
1831         }
1832
1833         tx->tx_gpu = !!(payload_md->md_flags & LNET_MD_FLAG_GPU);
1834         if (nob == 0)
1835                 rc = 0;
1836         else
1837                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1838                                           niov, kiov, offset, nob);
1839
1840         if (rc != 0) {
1841                 CERROR("Can't setup GET src for %s: %d\n",
1842                        libcfs_nidstr(&target->nid), rc);
1843                 goto failed_1;
1844         }
1845
1846         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1847                               IBLND_MSG_GET_DONE, nob,
1848                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1849                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1850         if (rc < 0) {
1851                 CERROR("Can't setup rdma for GET from %s: %d\n",
1852                        libcfs_nidstr(&target->nid), rc);
1853                 goto failed_1;
1854         }
1855
1856         if (nob == 0) {
1857                 /* No RDMA: local completion may happen now! */
1858                 lnet_finalize(lntmsg, 0);
1859         } else {
1860                 /* RDMA: lnet_finalize(lntmsg) when it
1861                  * completes */
1862                 tx->tx_lntmsg[0] = lntmsg;
1863         }
1864
1865         kiblnd_queue_tx(tx, rx->rx_conn);
1866         return;
1867
1868
1869 failed_1:
1870         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1871         kiblnd_tx_done(tx);
1872 failed_0:
1873         lnet_finalize(lntmsg, -EIO);
1874 }
1875
1876 unsigned int
1877 kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx)
1878 {
1879         struct kib_net *net = ni->ni_data;
1880         struct device *dev = NULL;
1881
1882         if (net)
1883                 dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device;
1884
1885         return lnet_get_dev_prio(dev, dev_idx);
1886
1887 }
1888
1889 int
1890 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1891             int delayed, unsigned int niov, struct bio_vec *kiov,
1892             unsigned int offset, unsigned int mlen, unsigned int rlen)
1893 {
1894         struct kib_rx *rx = private;
1895         struct kib_msg *rxmsg = rx->rx_msg;
1896         struct kib_conn *conn = rx->rx_conn;
1897         struct kib_tx *tx;
1898         __u64        ibprm_cookie;
1899         int          nob;
1900         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1901         int          rc = 0;
1902
1903         LASSERT (mlen <= rlen);
1904         LASSERT (!in_interrupt());
1905
1906         switch (rxmsg->ibm_type) {
1907         default:
1908                 LBUG();
1909                 /* fallthrough */
1910         case IBLND_MSG_IMMEDIATE:
1911                 nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
1912                 if (nob > rx->rx_nob) {
1913                         CERROR("Immediate message from %s too big: %d(%d)\n",
1914                                libcfs_nidstr(&lntmsg->msg_hdr.src_nid),
1915                                nob, rx->rx_nob);
1916                         rc = -EPROTO;
1917                         break;
1918                 }
1919
1920                 lnet_copy_flat2kiov(niov, kiov, offset,
1921                                     IBLND_MSG_SIZE, rxmsg,
1922                                     offsetof(struct kib_msg,
1923                                              ibm_u.immediate.ibim_payload),
1924                                     mlen);
1925                 lnet_finalize(lntmsg, 0);
1926                 break;
1927
1928         case IBLND_MSG_PUT_REQ: {
1929                 struct kib_msg  *txmsg;
1930                 struct kib_rdma_desc *rd;
1931                 struct lnet_libmd *payload_md = lntmsg->msg_md;
1932
1933                 ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1934                 if (mlen == 0) {
1935                         lnet_finalize(lntmsg, 0);
1936                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1937                                                0, ibprm_cookie);
1938                         break;
1939                 }
1940
1941                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1942                 if (tx == NULL) {
1943                         CERROR("Can't allocate tx for %s\n",
1944                                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
1945                         /* Not replying will break the connection */
1946                         rc = -ENOMEM;
1947                         break;
1948                 }
1949
1950                 tx->tx_gpu = !!(payload_md->md_flags & LNET_MD_FLAG_GPU);
1951                 txmsg = tx->tx_msg;
1952                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1953                 rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1954                                           niov, kiov, offset, mlen);
1955                 if (rc != 0) {
1956                         CERROR("Can't setup PUT sink for %s: %d\n",
1957                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1958                         tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
1959                         kiblnd_tx_done(tx);
1960                         /* tell peer_ni it's over */
1961                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1962                                                rc, ibprm_cookie);
1963                         break;
1964                 }
1965
1966                 nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
1967                 txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
1968                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1969
1970                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1971
1972                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1973                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1974                 kiblnd_queue_tx(tx, conn);
1975
1976                 /* reposted buffer reserved for PUT_DONE */
1977                 post_credit = IBLND_POSTRX_NO_CREDIT;
1978                 break;
1979                 }
1980
1981         case IBLND_MSG_GET_REQ:
1982                 if (lntmsg != NULL) {
1983                         /* Optimized GET; RDMA lntmsg's payload */
1984                         kiblnd_reply(ni, rx, lntmsg);
1985                 } else {
1986                         /* GET didn't match anything */
1987                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1988                                                -ENODATA,
1989                                                rxmsg->ibm_u.get.ibgm_cookie);
1990                 }
1991                 break;
1992         }
1993
1994         kiblnd_post_rx(rx, post_credit);
1995         return rc;
1996 }
1997
1998 static void
1999 kiblnd_thread_fini (void)
2000 {
2001         atomic_dec (&kiblnd_data.kib_nthreads);
2002 }
2003
2004 static void
2005 kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
2006 {
2007         /* This is racy, but everyone's only writing ktime_get_seconds() */
2008         peer_ni->ibp_last_alive = ktime_get_seconds();
2009         smp_mb();
2010 }
2011
2012 static void
2013 kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
2014 {
2015         int           error = 0;
2016         time64_t last_alive = 0;
2017         unsigned long flags;
2018
2019         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2020
2021         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
2022                 error = peer_ni->ibp_error;
2023                 peer_ni->ibp_error = 0;
2024
2025                 last_alive = peer_ni->ibp_last_alive;
2026         }
2027
2028         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2029
2030         if (error != 0)
2031                 lnet_notify(peer_ni->ibp_ni,
2032                             peer_ni->ibp_nid, false, false, last_alive);
2033 }
2034
2035 void
2036 kiblnd_close_conn_locked(struct kib_conn *conn, int error)
2037 {
2038         /* This just does the immediate housekeeping.  'error' is zero for a
2039          * normal shutdown which can happen only after the connection has been
2040          * established.  If the connection is established, schedule the
2041          * connection to be finished off by the connd.  Otherwise the connd is
2042          * already dealing with it (either to set it up or tear it down).
2043          * Caller holds kib_global_lock exclusively in irq context */
2044         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2045         struct kib_dev *dev;
2046         unsigned long flags;
2047
2048         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2049
2050         if (error != 0 && conn->ibc_comms_error == 0)
2051                 conn->ibc_comms_error = error;
2052
2053         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
2054                 return; /* already being handled  */
2055
2056         if (error == 0 &&
2057             list_empty(&conn->ibc_tx_noops) &&
2058             list_empty(&conn->ibc_tx_queue) &&
2059             list_empty(&conn->ibc_tx_queue_rsrvd) &&
2060             list_empty(&conn->ibc_tx_queue_nocred) &&
2061             list_empty(&conn->ibc_active_txs)) {
2062                 CDEBUG(D_NET, "closing conn to %s\n", 
2063                        libcfs_nid2str(peer_ni->ibp_nid));
2064         } else {
2065                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
2066                        libcfs_nid2str(peer_ni->ibp_nid), error,
2067                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
2068                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
2069                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
2070                                                 "" : "(sending_rsrvd)",
2071                        list_empty(&conn->ibc_tx_queue_nocred) ?
2072                                                  "" : "(sending_nocred)",
2073                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
2074         }
2075
2076         dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
2077         if (peer_ni->ibp_next_conn == conn)
2078                 /* clear next_conn so it won't be used */
2079                 peer_ni->ibp_next_conn = NULL;
2080         list_del(&conn->ibc_list);
2081         /* connd (see below) takes over ibc_list's ref */
2082
2083         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
2084             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
2085                 kiblnd_unlink_peer_locked(peer_ni);
2086
2087                 /* set/clear error on last conn */
2088                 peer_ni->ibp_error = conn->ibc_comms_error;
2089         }
2090
2091         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
2092
2093         if (error != 0 &&
2094             kiblnd_dev_can_failover(dev)) {
2095                 list_add_tail(&dev->ibd_fail_list,
2096                               &kiblnd_data.kib_failed_devs);
2097                 wake_up(&kiblnd_data.kib_failover_waitq);
2098         }
2099
2100         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
2101
2102         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
2103         wake_up(&kiblnd_data.kib_connd_waitq);
2104
2105         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
2106 }
2107
2108 void
2109 kiblnd_close_conn(struct kib_conn *conn, int error)
2110 {
2111         unsigned long flags;
2112
2113         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2114
2115         kiblnd_close_conn_locked(conn, error);
2116
2117         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2118 }
2119
2120 static void
2121 kiblnd_handle_early_rxs(struct kib_conn *conn)
2122 {
2123         unsigned long flags;
2124         struct kib_rx *rx;
2125
2126         LASSERT(!in_interrupt());
2127         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
2128
2129         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2130         while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
2131                                               struct kib_rx,
2132                                               rx_list)) != NULL) {
2133                 list_del(&rx->rx_list);
2134                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2135
2136                 kiblnd_handle_rx(rx);
2137
2138                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2139         }
2140         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2141 }
2142
2143 void
2144 kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
2145 {
2146         LIST_HEAD(zombies);
2147         struct kib_tx *nxt;
2148         struct kib_tx *tx;
2149
2150         spin_lock(&conn->ibc_lock);
2151
2152         list_for_each_entry_safe(tx, nxt, txs, tx_list) {
2153                 if (txs == &conn->ibc_active_txs) {
2154                         LASSERT(!tx->tx_queued);
2155                         LASSERT(tx->tx_waiting ||
2156                                 tx->tx_sending != 0);
2157                         if (conn->ibc_comms_error == -ETIMEDOUT) {
2158                                 if (tx->tx_waiting && !tx->tx_sending)
2159                                         tx->tx_hstatus =
2160                                           LNET_MSG_STATUS_REMOTE_TIMEOUT;
2161                                 else if (tx->tx_sending)
2162                                         tx->tx_hstatus =
2163                                           LNET_MSG_STATUS_NETWORK_TIMEOUT;
2164                         }
2165                 } else {
2166                         LASSERT(tx->tx_queued);
2167                         if (conn->ibc_comms_error == -ETIMEDOUT)
2168                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
2169                         else
2170                                 tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
2171                 }
2172
2173                 tx->tx_status = -ECONNABORTED;
2174                 tx->tx_waiting = 0;
2175
2176                 /*
2177                  * TODO: This makes an assumption that
2178                  * kiblnd_tx_complete() will be called for each tx. If
2179                  * that event is dropped we could end up with stale
2180                  * connections floating around. We'd like to deal with
2181                  * that in a better way.
2182                  *
2183                  * Also that means we can exceed the timeout by many
2184                  * seconds.
2185                  */
2186                 if (tx->tx_sending == 0) {
2187                         tx->tx_queued = 0;
2188                         list_move(&tx->tx_list, &zombies);
2189                 } else {
2190                         /* keep tx until cq destroy */
2191                         list_move(&tx->tx_list, &conn->ibc_zombie_txs);
2192                         conn->ibc_waits ++;
2193                 }
2194         }
2195
2196         spin_unlock(&conn->ibc_lock);
2197
2198         /*
2199          * aborting transmits occurs when finalizing the connection.
2200          * The connection is finalized on error.
2201          * Passing LNET_MSG_STATUS_OK to txlist_done() will not
2202          * override the value already set in tx->tx_hstatus above.
2203          */
2204         kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
2205 }
2206
2207 static bool
2208 kiblnd_tx_may_discard(struct kib_conn *conn)
2209 {
2210         bool rc = false;
2211         struct kib_tx *nxt;
2212         struct kib_tx *tx;
2213
2214         spin_lock(&conn->ibc_lock);
2215
2216         list_for_each_entry_safe(tx, nxt, &conn->ibc_zombie_txs, tx_list) {
2217                 if (tx->tx_sending > 0 && tx->tx_lntmsg[0] &&
2218                     lnet_md_discarded(tx->tx_lntmsg[0]->msg_md)) {
2219                         tx->tx_sending --;
2220                         if (tx->tx_sending == 0) {
2221                                 kiblnd_conn_decref(tx->tx_conn);
2222                                 tx->tx_conn = NULL;
2223                                 rc = true;
2224                         }
2225                 }
2226         }
2227
2228         spin_unlock(&conn->ibc_lock);
2229         return rc;
2230 }
2231
2232 static void
2233 kiblnd_finalise_conn(struct kib_conn *conn)
2234 {
2235         LASSERT (!in_interrupt());
2236         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2237
2238         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2239          * for connections that didn't get as far as being connected, because
2240          * rdma_disconnect() does this for free. */
2241         kiblnd_abort_receives(conn);
2242
2243         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2244
2245         /* Complete all tx descs not waiting for sends to complete.
2246          * NB we should be safe from RDMA now that the QP has changed state */
2247
2248         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2249         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2250         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2251         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2252         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2253
2254         kiblnd_handle_early_rxs(conn);
2255 }
2256
2257 static void
2258 kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
2259                            int error)
2260 {
2261         LIST_HEAD(zombies);
2262         unsigned long flags;
2263         enum lnet_msg_hstatus hstatus;
2264
2265         LASSERT(error != 0);
2266         LASSERT(!in_interrupt());
2267
2268         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2269
2270         if (active) {
2271                 LASSERT(peer_ni->ibp_connecting > 0);
2272                 peer_ni->ibp_connecting--;
2273         } else {
2274                 LASSERT (peer_ni->ibp_accepting > 0);
2275                 peer_ni->ibp_accepting--;
2276         }
2277
2278         if (kiblnd_peer_connecting(peer_ni)) {
2279                 /* another connection attempt under way... */
2280                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2281                                         flags);
2282                 return;
2283         }
2284
2285         peer_ni->ibp_reconnected = 0;
2286         if (list_empty(&peer_ni->ibp_conns)) {
2287                 /* Take peer_ni's blocked transmits to complete with error */
2288                 list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
2289
2290                 if (kiblnd_peer_active(peer_ni))
2291                         kiblnd_unlink_peer_locked(peer_ni);
2292
2293                 peer_ni->ibp_error = error;
2294         } else {
2295                 /* Can't have blocked transmits if there are connections */
2296                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2297         }
2298
2299         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2300
2301         kiblnd_peer_notify(peer_ni);
2302
2303         if (list_empty(&zombies))
2304                 return;
2305
2306         CNETERR("Deleting messages for %s: connection failed\n",
2307                 libcfs_nid2str(peer_ni->ibp_nid));
2308
2309         switch (error) {
2310         case -EHOSTUNREACH:
2311         case -ETIMEDOUT:
2312                 hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
2313                 break;
2314         case -ECONNREFUSED:
2315                 hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
2316                 break;
2317         default:
2318                 hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
2319                 break;
2320         }
2321
2322         kiblnd_txlist_done(&zombies, error, hstatus);
2323 }
2324
2325 static void
2326 kiblnd_connreq_done(struct kib_conn *conn, int status)
2327 {
2328         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2329         struct kib_tx *tx;
2330         LIST_HEAD(txs);
2331         unsigned long    flags;
2332         int              active;
2333
2334         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2335
2336         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2337                libcfs_nid2str(peer_ni->ibp_nid), active,
2338                conn->ibc_version, status);
2339
2340         LASSERT (!in_interrupt());
2341         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2342                   peer_ni->ibp_connecting > 0) ||
2343                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2344                   peer_ni->ibp_accepting > 0));
2345
2346         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2347         conn->ibc_connvars = NULL;
2348
2349         if (status != 0) {
2350                 /* failed to establish connection */
2351                 kiblnd_peer_connect_failed(peer_ni, active, status);
2352                 kiblnd_finalise_conn(conn);
2353                 return;
2354         }
2355
2356         /* connection established */
2357         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2358
2359         conn->ibc_last_send = ktime_get();
2360         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2361         kiblnd_peer_alive(peer_ni);
2362
2363         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2364          * peer_ni instance... */
2365         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2366         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2367         peer_ni->ibp_reconnected = 0;
2368         if (active)
2369                 peer_ni->ibp_connecting--;
2370         else
2371                 peer_ni->ibp_accepting--;
2372
2373         if (peer_ni->ibp_version == 0) {
2374                 peer_ni->ibp_version     = conn->ibc_version;
2375                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2376         }
2377
2378         if (peer_ni->ibp_version     != conn->ibc_version ||
2379             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2380                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2381                                                 conn->ibc_incarnation);
2382                 peer_ni->ibp_version     = conn->ibc_version;
2383                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2384         }
2385
2386         /* grab pending txs while I have the lock */
2387         list_splice_init(&peer_ni->ibp_tx_queue, &txs);
2388
2389         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2390             conn->ibc_comms_error != 0) {       /* error has happened already */
2391
2392                 /* start to shut down connection */
2393                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2394                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2395
2396                 kiblnd_txlist_done(&txs, -ECONNABORTED,
2397                                    LNET_MSG_STATUS_LOCAL_ERROR);
2398
2399                 return;
2400         }
2401
2402         /* +1 ref for myself, this connection is visible to other threads
2403          * now, refcount of peer:ibp_conns can be released by connection
2404          * close from either a different thread, or the calling of
2405          * kiblnd_check_sends_locked() below. See bz21911 for details.
2406          */
2407         kiblnd_conn_addref(conn);
2408         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2409
2410         /* Schedule blocked txs
2411          * Note: if we are running with conns_per_peer > 1, these blocked
2412          * txs will all get scheduled to the first connection which gets
2413          * scheduled.  We won't be using round robin on this first batch.
2414          */
2415         spin_lock(&conn->ibc_lock);
2416         while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
2417                                               tx_list)) != NULL) {
2418                 list_del(&tx->tx_list);
2419
2420                 kiblnd_queue_tx_locked(tx, conn);
2421         }
2422         kiblnd_check_sends_locked(conn);
2423         spin_unlock(&conn->ibc_lock);
2424
2425         /* schedule blocked rxs */
2426         kiblnd_handle_early_rxs(conn);
2427         kiblnd_conn_decref(conn);
2428 }
2429
2430 static void
2431 kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
2432 {
2433         int          rc;
2434
2435 #ifdef HAVE_RDMA_REJECT_4ARGS
2436         rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
2437 #else
2438         rc = rdma_reject(cmid, rej, sizeof(*rej));
2439 #endif
2440
2441         if (rc != 0)
2442                 CWARN("Error %d sending reject\n", rc);
2443 }
2444
2445 static int
2446 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2447 {
2448         rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2449         struct kib_msg *reqmsg = priv;
2450         struct kib_msg *ackmsg;
2451         struct kib_dev *ibdev;
2452         struct kib_peer_ni *peer_ni;
2453         struct kib_peer_ni *peer2;
2454         struct kib_conn *conn;
2455         struct lnet_ni *ni = NULL;
2456         struct kib_net *net = NULL;
2457         lnet_nid_t nid;
2458         struct rdma_conn_param cp;
2459         struct kib_rej rej;
2460         int version = IBLND_MSG_VERSION;
2461         unsigned long flags;
2462         int rc;
2463         struct sockaddr_in *peer_addr;
2464
2465         LASSERT(!in_interrupt());
2466         /* cmid inherits 'context' from the corresponding listener id */
2467         ibdev = cmid->context;
2468         LASSERT(ibdev);
2469
2470         memset(&rej, 0, sizeof(rej));
2471         rej.ibr_magic                = IBLND_MSG_MAGIC;
2472         rej.ibr_why                  = IBLND_REJECT_FATAL;
2473         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2474
2475         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2476         if (*kiblnd_tunables.kib_require_priv_port &&
2477             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2478                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2479                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2480                        &ip, ntohs(peer_addr->sin_port));
2481                 goto failed;
2482         }
2483
2484         if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
2485                 CERROR("Short connection request\n");
2486                 goto failed;
2487         }
2488
2489         /* Future protocol version compatibility support!  If the
2490          * o2iblnd-specific protocol changes, or when LNET unifies
2491          * protocols over all LNDs, the initial connection will
2492          * negotiate a protocol version.  I trap this here to avoid
2493          * console errors; the reject tells the peer_ni which protocol I
2494          * speak. */
2495         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2496             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2497                 goto failed;
2498         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2499             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2500             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2501                 goto failed;
2502         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2503             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2504             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2505                 goto failed;
2506
2507         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2508         if (rc != 0) {
2509                 CERROR("Can't parse connection request: %d\n", rc);
2510                 goto failed;
2511         }
2512
2513         nid = reqmsg->ibm_srcnid;
2514         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2515
2516         if (ni != NULL) {
2517                 net = (struct kib_net *)ni->ni_data;
2518                 rej.ibr_incarnation = net->ibn_incarnation;
2519         }
2520
2521         if (ni == NULL ||                       /* no matching net */
2522             lnet_nid_to_nid4(&ni->ni_nid) !=
2523             reqmsg->ibm_dstnid ||               /* right NET, wrong NID! */
2524             net->ibn_dev != ibdev) {            /* wrong device */
2525                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
2526                        ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
2527                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2528                        &ibdev->ibd_ifip,
2529                        libcfs_nid2str(reqmsg->ibm_dstnid));
2530
2531                 goto failed;
2532         }
2533
2534         /* check time stamp as soon as possible */
2535         if (reqmsg->ibm_dststamp != 0 &&
2536             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2537                 CWARN("Stale connection request\n");
2538                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2539                 goto failed;
2540         }
2541
2542         /* I can accept peer_ni's version */
2543         version = reqmsg->ibm_version;
2544
2545         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2546                 CERROR("Unexpected connreq msg type: %x from %s\n",
2547                        reqmsg->ibm_type, libcfs_nid2str(nid));
2548                 goto failed;
2549         }
2550
2551         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2552             kiblnd_msg_queue_size(version, ni)) {
2553                 CERROR("Can't accept conn from %s, queue depth too large:  %d (<=%d wanted)\n",
2554                        libcfs_nid2str(nid),
2555                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2556                        kiblnd_msg_queue_size(version, ni));
2557
2558                 if (version == IBLND_MSG_VERSION)
2559                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2560
2561                 goto failed;
2562         }
2563
2564         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2565             IBLND_MAX_RDMA_FRAGS) {
2566                 CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
2567                       libcfs_nid2str(nid), version,
2568                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2569                       IBLND_MAX_RDMA_FRAGS);
2570
2571                 if (version >= IBLND_MSG_VERSION)
2572                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2573
2574                 goto failed;
2575         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2576                    IBLND_MAX_RDMA_FRAGS &&
2577                    net->ibn_fmr_ps == NULL) {
2578                 CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
2579                       libcfs_nid2str(nid), version,
2580                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2581                       IBLND_MAX_RDMA_FRAGS);
2582
2583                 if (version == IBLND_MSG_VERSION)
2584                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2585
2586                 goto failed;
2587         }
2588
2589         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2590                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2591                        libcfs_nid2str(nid),
2592                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2593                        IBLND_MSG_SIZE);
2594                 goto failed;
2595         }
2596
2597         /* assume 'nid' is a new peer_ni; create  */
2598         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2599         if (rc != 0) {
2600                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2601                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2602                 goto failed;
2603         }
2604
2605         /* We have validated the peer's parameters so use those */
2606         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2607         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2608
2609         write_lock_irqsave(g_lock, flags);
2610
2611         peer2 = kiblnd_find_peer_locked(ni, nid);
2612         if (peer2 != NULL) {
2613                 if (peer2->ibp_version == 0) {
2614                         peer2->ibp_version     = version;
2615                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2616                 }
2617
2618                 /* not the guy I've talked with */
2619                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2620                     peer2->ibp_version     != version) {
2621                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2622
2623                         if (kiblnd_peer_active(peer2)) {
2624                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2625                                 peer2->ibp_version = version;
2626                         }
2627                         write_unlock_irqrestore(g_lock, flags);
2628
2629                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2630                               libcfs_nid2str(nid), peer2->ibp_version, version,
2631                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2632
2633                         kiblnd_peer_decref(peer_ni);
2634                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2635                         goto failed;
2636                 }
2637
2638                 /* Tie-break connection race in favour of the higher NID.
2639                  * If we keep running into a race condition multiple times,
2640                  * we have to assume that the connection attempt with the
2641                  * higher NID is stuck in a connecting state and will never
2642                  * recover.  As such, we pass through this if-block and let
2643                  * the lower NID connection win so we can move forward.
2644                  */
2645                 if (peer2->ibp_connecting != 0 &&
2646                     nid < lnet_nid_to_nid4(&ni->ni_nid) &&
2647                     peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
2648                         peer2->ibp_races++;
2649                         write_unlock_irqrestore(g_lock, flags);
2650
2651                         CDEBUG(D_NET, "Conn race %s\n",
2652                                libcfs_nid2str(peer2->ibp_nid));
2653
2654                         kiblnd_peer_decref(peer_ni);
2655                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2656                         goto failed;
2657                 }
2658                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2659                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2660                                 libcfs_nid2str(peer2->ibp_nid),
2661                                 MAX_CONN_RACES_BEFORE_ABORT);
2662                 /*
2663                  * passive connection is allowed even this peer_ni is waiting for
2664                  * reconnection.
2665                  */
2666                 peer2->ibp_reconnecting = 0;
2667                 peer2->ibp_races = 0;
2668                 peer2->ibp_accepting++;
2669                 kiblnd_peer_addref(peer2);
2670
2671                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2672                  * so copy validated parameters since we now know what the
2673                  * peer_ni's limits are */
2674                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2675                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2676
2677                 write_unlock_irqrestore(g_lock, flags);
2678                 kiblnd_peer_decref(peer_ni);
2679                 peer_ni = peer2;
2680         } else {
2681                 /* Brand new peer_ni */
2682                 LASSERT(peer_ni->ibp_accepting == 0);
2683                 LASSERT(peer_ni->ibp_version == 0 &&
2684                         peer_ni->ibp_incarnation == 0);
2685
2686                 peer_ni->ibp_accepting   = 1;
2687                 peer_ni->ibp_version     = version;
2688                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2689
2690                 /* I have a ref on ni that prevents it being shutdown */
2691                 LASSERT(net->ibn_shutdown == 0);
2692
2693                 kiblnd_peer_addref(peer_ni);
2694                 hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
2695
2696                 write_unlock_irqrestore(g_lock, flags);
2697         }
2698
2699         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT,
2700                                   version);
2701         if (!conn) {
2702                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2703                 kiblnd_peer_decref(peer_ni);
2704                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2705                 goto failed;
2706         }
2707
2708         /* conn now "owns" cmid, so I return success from here on to ensure the
2709          * CM callback doesn't destroy cmid.
2710          */
2711         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2712         conn->ibc_credits          = conn->ibc_queue_depth;
2713         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2714         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2715                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2716
2717         ackmsg = &conn->ibc_connvars->cv_msg;
2718         memset(ackmsg, 0, sizeof(*ackmsg));
2719
2720         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2721                         sizeof(ackmsg->ibm_u.connparams));
2722         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2723         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2724         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2725
2726         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2727
2728         memset(&cp, 0, sizeof(cp));
2729         cp.private_data        = ackmsg;
2730         cp.private_data_len    = ackmsg->ibm_nob;
2731         cp.responder_resources = 0;            /* No atomic ops or RDMA reads */
2732         cp.initiator_depth     = 0;
2733         cp.flow_control        = 1;
2734         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2735         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2736
2737         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2738
2739         rc = rdma_accept(cmid, &cp);
2740         if (rc != 0) {
2741                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2742                 rej.ibr_version = version;
2743                 rej.ibr_why     = IBLND_REJECT_FATAL;
2744
2745                 kiblnd_reject(cmid, &rej);
2746                 kiblnd_connreq_done(conn, rc);
2747                 kiblnd_conn_decref(conn);
2748         }
2749
2750         lnet_ni_decref(ni);
2751         return 0;
2752
2753  failed:
2754         if (ni != NULL) {
2755                 rej.ibr_cp.ibcp_queue_depth =
2756                         kiblnd_msg_queue_size(version, ni);
2757                 rej.ibr_cp.ibcp_max_frags   = IBLND_MAX_RDMA_FRAGS;
2758                 lnet_ni_decref(ni);
2759         }
2760
2761         rej.ibr_version = version;
2762         kiblnd_reject(cmid, &rej);
2763
2764         return -ECONNREFUSED;
2765 }
2766
2767 static void
2768 kiblnd_check_reconnect(struct kib_conn *conn, int version,
2769                        u64 incarnation, int why, struct kib_connparams *cp)
2770 {
2771         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2772         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2773         char            *reason;
2774         int              msg_size = IBLND_MSG_SIZE;
2775         int              frag_num = -1;
2776         int              queue_dep = -1;
2777         bool             reconnect;
2778         unsigned long    flags;
2779
2780         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2781         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2782
2783         if (cp) {
2784                 msg_size        = cp->ibcp_max_msg_size;
2785                 frag_num        = cp->ibcp_max_frags;
2786                 queue_dep       = cp->ibcp_queue_depth;
2787         }
2788
2789         write_lock_irqsave(glock, flags);
2790         /* retry connection if it's still needed and no other connection
2791          * attempts (active or passive) are in progress
2792          * NB: reconnect is still needed even when ibp_tx_queue is
2793          * empty if ibp_version != version because reconnect may be
2794          * initiated.
2795          */
2796         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2797                      peer_ni->ibp_version != version) &&
2798                     peer_ni->ibp_connecting &&
2799                     peer_ni->ibp_accepting == 0;
2800         if (!reconnect) {
2801                 reason = "no need";
2802                 goto out;
2803         }
2804
2805         switch (why) {
2806         default:
2807                 reason = "Unknown";
2808                 break;
2809
2810         case IBLND_REJECT_RDMA_FRAGS: {
2811                 if (!cp) {
2812                         reason = "can't negotiate max frags";
2813                         goto out;
2814                 }
2815
2816                 if (conn->ibc_max_frags <= frag_num) {
2817                         reason = "unsupported max frags";
2818                         goto out;
2819                 }
2820
2821                 peer_ni->ibp_max_frags = frag_num;
2822                 reason = "rdma fragments";
2823                 break;
2824         }
2825         case IBLND_REJECT_MSG_QUEUE_SIZE:
2826                 if (!cp) {
2827                         reason = "can't negotiate queue depth";
2828                         goto out;
2829                 }
2830                 if (conn->ibc_queue_depth <= queue_dep) {
2831                         reason = "unsupported queue depth";
2832                         goto out;
2833                 }
2834
2835                 peer_ni->ibp_queue_depth = queue_dep;
2836                 reason = "queue depth";
2837                 break;
2838
2839         case IBLND_REJECT_CONN_STALE:
2840                 reason = "stale";
2841                 break;
2842
2843         case IBLND_REJECT_CONN_RACE:
2844                 reason = "conn race";
2845                 break;
2846
2847         case IBLND_REJECT_CONN_UNCOMPAT:
2848                 reason = "version negotiation";
2849                 break;
2850         }
2851
2852         conn->ibc_reconnect = 1;
2853         peer_ni->ibp_reconnecting++;
2854         peer_ni->ibp_version = version;
2855         if (incarnation != 0)
2856                 peer_ni->ibp_incarnation = incarnation;
2857  out:
2858         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2859
2860         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2861                 libcfs_nid2str(peer_ni->ibp_nid),
2862                 reconnect ? "reconnect" : "don't reconnect",
2863                 reason, IBLND_MSG_VERSION, version, msg_size,
2864                 conn->ibc_queue_depth, queue_dep,
2865                 conn->ibc_max_frags, frag_num);
2866         /*
2867          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2868          * while destroying the zombie
2869          */
2870 }
2871
2872 static void
2873 kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
2874 {
2875         struct kib_peer_ni *peer_ni = conn->ibc_peer;
2876         int status = -ECONNREFUSED;
2877
2878         LASSERT (!in_interrupt());
2879         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2880
2881         switch (reason) {
2882         case IB_CM_REJ_STALE_CONN:
2883                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2884                                        IBLND_REJECT_CONN_STALE, NULL);
2885                 break;
2886
2887         case IB_CM_REJ_INVALID_SERVICE_ID:
2888                 status = -EHOSTUNREACH;
2889                 CNETERR("%s rejected: no listener at %d\n",
2890                         libcfs_nid2str(peer_ni->ibp_nid),
2891                         *kiblnd_tunables.kib_service);
2892                 break;
2893
2894         case IB_CM_REJ_CONSUMER_DEFINED:
2895                 if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
2896                         struct kib_rej *rej = priv;
2897                         struct kib_connparams *cp = NULL;
2898                         bool flip = false;
2899                         __u64 incarnation = -1;
2900
2901                         /* NB. default incarnation is -1 because:
2902                          * a) V1 will ignore dst incarnation in connreq.
2903                          * b) V2 will provide incarnation while rejecting me,
2904                          *    -1 will be overwrote.
2905                          *
2906                          * if I try to connect to a V1 peer_ni with V2 protocol,
2907                          * it rejected me then upgrade to V2, I have no idea
2908                          * about the upgrading and try to reconnect with V1,
2909                          * in this case upgraded V2 can find out I'm trying to
2910                          * talk to the old guy and reject me(incarnation is -1).
2911                          */
2912
2913                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2914                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2915                                 __swab32s(&rej->ibr_magic);
2916                                 __swab16s(&rej->ibr_version);
2917                                 flip = true;
2918                         }
2919
2920                         if (priv_nob >= sizeof(struct kib_rej) &&
2921                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2922                                 /* priv_nob is always 148 in current version
2923                                  * of OFED, so we still need to check version.
2924                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
2925                                  */
2926                                 cp = &rej->ibr_cp;
2927
2928                                 if (flip) {
2929                                         __swab64s(&rej->ibr_incarnation);
2930                                         __swab16s(&cp->ibcp_queue_depth);
2931                                         __swab16s(&cp->ibcp_max_frags);
2932                                         __swab32s(&cp->ibcp_max_msg_size);
2933                                 }
2934
2935                                 incarnation = rej->ibr_incarnation;
2936                         }
2937
2938                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2939                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2940                                 CERROR("%s rejected: consumer defined fatal error\n",
2941                                        libcfs_nid2str(peer_ni->ibp_nid));
2942                                 break;
2943                         }
2944
2945                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2946                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2947                                 CERROR("%s rejected: o2iblnd version %x error\n",
2948                                        libcfs_nid2str(peer_ni->ibp_nid),
2949                                        rej->ibr_version);
2950                                 break;
2951                         }
2952
2953                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2954                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2955                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2956                                        libcfs_nid2str(peer_ni->ibp_nid),
2957                                        rej->ibr_version);
2958
2959                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2960                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2961                         }
2962
2963                         switch (rej->ibr_why) {
2964                         case IBLND_REJECT_CONN_RACE:
2965                         case IBLND_REJECT_CONN_STALE:
2966                         case IBLND_REJECT_CONN_UNCOMPAT:
2967                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2968                         case IBLND_REJECT_RDMA_FRAGS:
2969                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2970                                                        incarnation,
2971                                                        rej->ibr_why, cp);
2972                                 break;
2973
2974                         case IBLND_REJECT_NO_RESOURCES:
2975                                 CERROR("%s rejected: o2iblnd no resources\n",
2976                                        libcfs_nid2str(peer_ni->ibp_nid));
2977                                 break;
2978
2979                         case IBLND_REJECT_FATAL:
2980                                 CERROR("%s rejected: o2iblnd fatal error\n",
2981                                        libcfs_nid2str(peer_ni->ibp_nid));
2982                                 break;
2983
2984                         default:
2985                                 CERROR("%s rejected: o2iblnd reason %d\n",
2986                                        libcfs_nid2str(peer_ni->ibp_nid),
2987                                        rej->ibr_why);
2988                                 break;
2989                         }
2990                         break;
2991                 }
2992                 fallthrough;
2993         default:
2994                 CNETERR("%s rejected: reason %d, size %d\n",
2995                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2996                 break;
2997         }
2998
2999         kiblnd_connreq_done(conn, status);
3000 }
3001
3002 static void
3003 kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
3004 {
3005         struct kib_peer_ni *peer_ni = conn->ibc_peer;
3006         struct lnet_ni *ni = peer_ni->ibp_ni;
3007         struct kib_net *net = ni->ni_data;
3008         struct kib_msg *msg = priv;
3009         int            ver  = conn->ibc_version;
3010         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
3011         unsigned long  flags;
3012
3013         LASSERT (net != NULL);
3014
3015         if (rc != 0) {
3016                 CERROR("Can't unpack connack from %s: %d\n",
3017                        libcfs_nid2str(peer_ni->ibp_nid), rc);
3018                 goto failed;
3019         }
3020
3021         if (msg->ibm_type != IBLND_MSG_CONNACK) {
3022                 CERROR("Unexpected message %d from %s\n",
3023                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
3024                 rc = -EPROTO;
3025                 goto failed;
3026         }
3027
3028         if (ver != msg->ibm_version) {
3029                 CERROR("%s replied version %x is different with "
3030                        "requested version %x\n",
3031                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
3032                 rc = -EPROTO;
3033                 goto failed;
3034         }
3035
3036         if (msg->ibm_u.connparams.ibcp_queue_depth >
3037             conn->ibc_queue_depth) {
3038                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
3039                        libcfs_nid2str(peer_ni->ibp_nid),
3040                        msg->ibm_u.connparams.ibcp_queue_depth,
3041                        conn->ibc_queue_depth);
3042                 rc = -EPROTO;
3043                 goto failed;
3044         }
3045
3046         if (msg->ibm_u.connparams.ibcp_max_frags >
3047             conn->ibc_max_frags) {
3048                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
3049                        libcfs_nid2str(peer_ni->ibp_nid),
3050                        msg->ibm_u.connparams.ibcp_max_frags,
3051                        conn->ibc_max_frags);
3052                 rc = -EPROTO;
3053                 goto failed;
3054         }
3055
3056         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
3057                 CERROR("%s max message size %d too big (%d max)\n",
3058                        libcfs_nid2str(peer_ni->ibp_nid),
3059                        msg->ibm_u.connparams.ibcp_max_msg_size,
3060                        IBLND_MSG_SIZE);
3061                 rc = -EPROTO;
3062                 goto failed;
3063         }
3064
3065         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3066         if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
3067             msg->ibm_dststamp == net->ibn_incarnation)
3068                 rc = 0;
3069         else
3070                 rc = -ESTALE;
3071         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3072
3073         if (rc != 0) {
3074                 CERROR("Bad connection reply from %s, rc = %d, "
3075                        "version: %x max_frags: %d\n",
3076                        libcfs_nid2str(peer_ni->ibp_nid), rc,
3077                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
3078                 goto failed;
3079         }
3080
3081         conn->ibc_incarnation      = msg->ibm_srcstamp;
3082         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
3083         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
3084         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
3085         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
3086         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
3087                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
3088
3089         kiblnd_connreq_done(conn, 0);
3090         return;
3091
3092  failed:
3093         /* NB My QP has already established itself, so I handle anything going
3094          * wrong here by setting ibc_comms_error.
3095          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
3096          * immediately tears it down. */
3097
3098         LASSERT (rc != 0);
3099         conn->ibc_comms_error = rc;
3100         kiblnd_connreq_done(conn, 0);
3101 }
3102
3103 static int
3104 kiblnd_active_connect(struct rdma_cm_id *cmid)
3105 {
3106         struct kib_peer_ni *peer_ni = cmid->context;
3107         struct kib_conn *conn;
3108         struct kib_msg *msg;
3109         struct rdma_conn_param cp;
3110         int                      version;
3111         __u64                    incarnation;
3112         unsigned long            flags;
3113         int                      rc;
3114
3115         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3116
3117         incarnation = peer_ni->ibp_incarnation;
3118         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
3119                                                  peer_ni->ibp_version;
3120
3121         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3122
3123         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
3124                                   version);
3125         if (conn == NULL) {
3126                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
3127                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
3128                 return -ENOMEM;
3129         }
3130
3131         /* conn "owns" cmid now, so I return success from here on to ensure the
3132          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
3133          * on peer_ni */
3134
3135         msg = &conn->ibc_connvars->cv_msg;
3136
3137         memset(msg, 0, sizeof(*msg));
3138         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
3139         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
3140         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
3141         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
3142
3143         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
3144                         0, peer_ni->ibp_nid, incarnation);
3145
3146         memset(&cp, 0, sizeof(cp));
3147         cp.private_data        = msg;
3148         cp.private_data_len    = msg->ibm_nob;
3149         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
3150         cp.initiator_depth     = 0;
3151         cp.flow_control        = 1;
3152         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
3153         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
3154
3155         LASSERT(cmid->context == (void *)conn);
3156         LASSERT(conn->ibc_cmid == cmid);
3157         rc = rdma_connect_locked(cmid, &cp);
3158         if (rc != 0) {
3159                 CERROR("Can't connect to %s: %d\n",
3160                        libcfs_nid2str(peer_ni->ibp_nid), rc);
3161                 kiblnd_connreq_done(conn, rc);
3162                 kiblnd_conn_decref(conn);
3163         }
3164
3165         return 0;
3166 }
3167
3168 int
3169 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
3170 {
3171         struct kib_peer_ni *peer_ni;
3172         struct kib_conn *conn;
3173         int rc;
3174
3175         switch (event->event) {
3176         default:
3177                 CERROR("Unexpected event: %d, status: %d\n",
3178                        event->event, event->status);
3179                 LBUG();
3180
3181         case RDMA_CM_EVENT_CONNECT_REQUEST:
3182                 /* destroy cmid on failure */
3183                 rc = kiblnd_passive_connect(cmid,
3184                                             (void *)KIBLND_CONN_PARAM(event),
3185                                             KIBLND_CONN_PARAM_LEN(event));
3186                 CDEBUG(D_NET, "connreq: %d\n", rc);
3187                 return rc;
3188
3189         case RDMA_CM_EVENT_ADDR_ERROR:
3190                 peer_ni = cmid->context;
3191                 CNETERR("%s: ADDR ERROR %d\n",
3192                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3193                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3194                 kiblnd_peer_decref(peer_ni);
3195                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
3196
3197         case RDMA_CM_EVENT_ADDR_RESOLVED:
3198                 peer_ni = cmid->context;
3199
3200                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
3201                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3202
3203                 if (event->status != 0) {
3204                         CNETERR("Can't resolve address for %s: %d\n",
3205                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
3206                         rc = event->status;
3207                 } else {
3208                         rc = rdma_resolve_route(
3209                                 cmid, kiblnd_timeout() * 1000);
3210                         if (rc == 0) {
3211                                 struct kib_net *net = peer_ni->ibp_ni->ni_data;
3212                                 struct kib_dev *dev = net->ibn_dev;
3213
3214                                 CDEBUG(D_NET, "%s: connection bound to "\
3215                                        "%s:%pI4h:%s\n",
3216                                        libcfs_nid2str(peer_ni->ibp_nid),
3217                                        dev->ibd_ifname,
3218                                        &dev->ibd_ifip, cmid->device->name);
3219
3220                                 return 0;
3221                         }
3222
3223                         /* Can't initiate route resolution */
3224                         CERROR("Can't resolve route for %s: %d\n",
3225                                libcfs_nid2str(peer_ni->ibp_nid), rc);
3226                 }
3227                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
3228                 kiblnd_peer_decref(peer_ni);
3229                 return rc;                      /* rc != 0 destroys cmid */
3230
3231         case RDMA_CM_EVENT_ROUTE_ERROR:
3232                 peer_ni = cmid->context;
3233                 CNETERR("%s: ROUTE ERROR %d\n",
3234                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3235                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3236                 kiblnd_peer_decref(peer_ni);
3237                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3238
3239         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3240                 peer_ni = cmid->context;
3241                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3242                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3243
3244                 if (event->status == 0)
3245                         return kiblnd_active_connect(cmid);
3246
3247                 CNETERR("Can't resolve route for %s: %d\n",
3248                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3249                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3250                 kiblnd_peer_decref(peer_ni);
3251                 return event->status;           /* rc != 0 destroys cmid */
3252
3253         case RDMA_CM_EVENT_UNREACHABLE:
3254                 conn = cmid->context;
3255                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3256                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3257                 CNETERR("%s: UNREACHABLE %d\n",
3258                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3259                 kiblnd_connreq_done(conn, -ENETDOWN);
3260                 kiblnd_conn_decref(conn);
3261                 return 0;
3262
3263         case RDMA_CM_EVENT_CONNECT_ERROR:
3264                 conn = cmid->context;
3265                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3266                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3267                 CNETERR("%s: CONNECT ERROR %d\n",
3268                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3269                 kiblnd_connreq_done(conn, -ENOTCONN);
3270                 kiblnd_conn_decref(conn);
3271                 return 0;
3272
3273         case RDMA_CM_EVENT_REJECTED:
3274                 conn = cmid->context;
3275                 switch (conn->ibc_state) {
3276                 default:
3277                         LBUG();
3278
3279                 case IBLND_CONN_PASSIVE_WAIT:
3280                         CERROR ("%s: REJECTED %d\n",
3281                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3282                                 event->status);
3283                         kiblnd_connreq_done(conn, -ECONNRESET);
3284                         break;
3285
3286                 case IBLND_CONN_ACTIVE_CONNECT:
3287                         kiblnd_rejected(conn, event->status,
3288                                         (void *)KIBLND_CONN_PARAM(event),
3289                                         KIBLND_CONN_PARAM_LEN(event));
3290                         break;
3291                 }
3292                 kiblnd_conn_decref(conn);
3293                 return 0;
3294
3295         case RDMA_CM_EVENT_ESTABLISHED:
3296                 conn = cmid->context;
3297                 switch (conn->ibc_state) {
3298                 default:
3299                         LBUG();
3300
3301                 case IBLND_CONN_PASSIVE_WAIT:
3302                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3303                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3304                         kiblnd_connreq_done(conn, 0);
3305                         break;
3306
3307                 case IBLND_CONN_ACTIVE_CONNECT:
3308                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3309                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3310                         kiblnd_check_connreply(conn,
3311                                                (void *)KIBLND_CONN_PARAM(event),
3312                                                KIBLND_CONN_PARAM_LEN(event));
3313                         break;
3314                 }
3315                 /* net keeps its ref on conn! */
3316                 return 0;
3317
3318         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3319                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3320                 return 0;
3321
3322         case RDMA_CM_EVENT_DISCONNECTED:
3323                 conn = cmid->context;
3324                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3325                         CERROR("%s DISCONNECTED\n",
3326                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3327                         kiblnd_connreq_done(conn, -ECONNRESET);
3328                 } else {
3329                         kiblnd_close_conn(conn, 0);
3330                 }
3331                 kiblnd_conn_decref(conn);
3332                 cmid->context = NULL;
3333                 return 0;
3334
3335         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3336                 LCONSOLE_ERROR_MSG(0x131,
3337                                    "Received notification of device removal\n"
3338                                    "Please shutdown LNET to allow this to proceed\n");
3339                 /* Can't remove network from underneath LNET for now, so I have
3340                  * to ignore this */
3341                 return 0;
3342
3343         case RDMA_CM_EVENT_ADDR_CHANGE:
3344                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3345                 return 0;
3346         }
3347 }
3348
3349 static int
3350 kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
3351 {
3352         struct kib_tx *tx;
3353
3354         list_for_each_entry(tx, txs, tx_list) {
3355                 if (txs != &conn->ibc_active_txs) {
3356                         LASSERT(tx->tx_queued);
3357                 } else {
3358                         LASSERT(!tx->tx_queued);
3359                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3360                 }
3361
3362                 if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3363                         CERROR("Timed out tx: %s(WSQ:%d%d%d), %lld seconds\n",
3364                                kiblnd_queue2str(conn, txs),
3365                                tx->tx_waiting, tx->tx_sending, tx->tx_queued,
3366                                kiblnd_timeout() +
3367                                ktime_ms_delta(ktime_get(),
3368                                               tx->tx_deadline) / MSEC_PER_SEC);
3369                         return 1;
3370                 }
3371         }
3372
3373         return 0;
3374 }
3375
3376 static int
3377 kiblnd_conn_timed_out_locked(struct kib_conn *conn)
3378 {
3379         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3380                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3381                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3382                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3383                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3384 }
3385
3386 static void
3387 kiblnd_check_conns (int idx)
3388 {
3389         LIST_HEAD(closes);
3390         LIST_HEAD(checksends);
3391         LIST_HEAD(timedout_txs);
3392         struct hlist_head *peers = &kiblnd_data.kib_peers[idx];
3393         struct kib_peer_ni *peer_ni;
3394         struct kib_conn *conn;
3395         struct kib_tx *tx, *tx_tmp;
3396         unsigned long flags;
3397
3398         /* NB. We expect to have a look at all the peers and not find any
3399          * RDMAs to time out, so we just use a shared lock while we
3400          * take a look...
3401          */
3402         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3403
3404         hlist_for_each_entry(peer_ni, peers, ibp_list) {
3405                 /* Check tx_deadline */
3406                 list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
3407                         if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
3408                                 CWARN("Timed out tx for %s: %lld seconds\n",
3409                                       libcfs_nid2str(peer_ni->ibp_nid),
3410                                       ktime_ms_delta(ktime_get(),
3411                                                      tx->tx_deadline) / MSEC_PER_SEC);
3412                                 list_move(&tx->tx_list, &timedout_txs);
3413                         }
3414                 }
3415
3416                 list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
3417                         int timedout;
3418                         int sendnoop;
3419
3420                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3421
3422                         spin_lock(&conn->ibc_lock);
3423
3424                         sendnoop = kiblnd_need_noop(conn);
3425                         timedout = kiblnd_conn_timed_out_locked(conn);
3426                         if (!sendnoop && !timedout) {
3427                                 spin_unlock(&conn->ibc_lock);
3428                                 continue;
3429                         }
3430
3431                         if (timedout) {
3432                                 CERROR("Timed out RDMA with %s (%lld): c: %u, oc: %u, rc: %u\n",
3433                                        libcfs_nid2str(peer_ni->ibp_nid),
3434                                        ktime_get_seconds()
3435                                        - peer_ni->ibp_last_alive,
3436                                        conn->ibc_credits,
3437                                        conn->ibc_outstanding_credits,
3438                                        conn->ibc_reserved_credits);
3439                                 list_add(&conn->ibc_connd_list, &closes);
3440                         } else {
3441                                 list_add(&conn->ibc_connd_list, &checksends);
3442                         }
3443                         /* +ref for 'closes' or 'checksends' */
3444                         kiblnd_conn_addref(conn);
3445
3446                         spin_unlock(&conn->ibc_lock);
3447                 }
3448         }
3449
3450         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3451
3452         if (!list_empty(&timedout_txs))
3453                 kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
3454                                    LNET_MSG_STATUS_NETWORK_TIMEOUT);
3455
3456         /* Handle timeout by closing the whole
3457          * connection. We can only be sure RDMA activity
3458          * has ceased once the QP has been modified.
3459          */
3460         while ((conn = list_first_entry_or_null(&closes,
3461                                                 struct kib_conn,
3462                                                 ibc_connd_list)) != NULL) {
3463                 list_del(&conn->ibc_connd_list);
3464                 kiblnd_close_conn(conn, -ETIMEDOUT);
3465                 kiblnd_conn_decref(conn);
3466         }
3467
3468         /* In case we have enough credits to return via a
3469          * NOOP, but there were no non-blocking tx descs
3470          * free to do it last time...
3471          */
3472         while ((conn = list_first_entry_or_null(&checksends,
3473                                                 struct kib_conn,
3474                                                 ibc_connd_list)) != NULL) {
3475                 list_del(&conn->ibc_connd_list);
3476
3477                 spin_lock(&conn->ibc_lock);
3478                 kiblnd_check_sends_locked(conn);
3479                 spin_unlock(&conn->ibc_lock);
3480
3481                 kiblnd_conn_decref(conn);
3482         }
3483 }
3484
3485 static void
3486 kiblnd_disconnect_conn(struct kib_conn *conn)
3487 {
3488         LASSERT (!in_interrupt());
3489         LASSERT (current == kiblnd_data.kib_connd);
3490         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3491
3492         rdma_disconnect(conn->ibc_cmid);
3493         kiblnd_finalise_conn(conn);
3494
3495         kiblnd_peer_notify(conn->ibc_peer);
3496 }
3497
3498 /*
3499  * High-water for reconnection to the same peer_ni, reconnection attempt should
3500  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3501  */
3502 #define KIB_RECONN_HIGH_RACE    10
3503 /*
3504  * Allow connd to take a break and handle other things after consecutive
3505  * reconnection attemps.
3506  */
3507 #define KIB_RECONN_BREAK        100
3508
3509 int
3510 kiblnd_connd (void *arg)
3511 {
3512         spinlock_t *lock = &kiblnd_data.kib_connd_lock;
3513         wait_queue_entry_t wait;
3514         unsigned long flags;
3515         struct kib_conn *conn;
3516         int timeout;
3517         int i;
3518         bool dropped_lock;
3519         int peer_index = 0;
3520         unsigned long deadline = jiffies;
3521
3522         init_wait(&wait);
3523         kiblnd_data.kib_connd = current;
3524
3525         spin_lock_irqsave(lock, flags);
3526
3527         while (!kiblnd_data.kib_shutdown) {
3528                 int reconn = 0;
3529
3530                 dropped_lock = false;
3531
3532                 conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
3533                                                 struct kib_conn, ibc_list);
3534                 if (conn) {
3535                         struct kib_peer_ni *peer_ni = NULL;
3536
3537                         list_del(&conn->ibc_list);
3538                         if (conn->ibc_reconnect) {
3539                                 peer_ni = conn->ibc_peer;
3540                                 kiblnd_peer_addref(peer_ni);
3541                         }
3542
3543                         spin_unlock_irqrestore(lock, flags);
3544                         dropped_lock = true;
3545
3546                         kiblnd_destroy_conn(conn);
3547
3548                         spin_lock_irqsave(lock, flags);
3549                         if (!peer_ni) {
3550                                 LIBCFS_FREE(conn, sizeof(*conn));
3551                                 continue;
3552                         }
3553
3554                         conn->ibc_peer = peer_ni;
3555                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3556                                 list_add_tail(&conn->ibc_list,
3557                                               &kiblnd_data.kib_reconn_list);
3558                         else
3559                                 list_add_tail(&conn->ibc_list,
3560                                               &kiblnd_data.kib_reconn_wait);
3561                 }
3562
3563                 conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
3564                                                 struct kib_conn, ibc_list);
3565                 if (conn) {
3566                         int wait;
3567
3568                         list_del(&conn->ibc_list);
3569
3570                         spin_unlock_irqrestore(lock, flags);
3571                         dropped_lock = true;
3572
3573                         kiblnd_disconnect_conn(conn);
3574                         wait = conn->ibc_waits;
3575                         if (wait == 0) /* keep ref for connd_wait, see below */
3576                                 kiblnd_conn_decref(conn);
3577
3578                         spin_lock_irqsave(lock, flags);
3579
3580                         if (wait)
3581                                 list_add_tail(&conn->ibc_list,
3582                                               &kiblnd_data.kib_connd_waits);
3583                 }
3584
3585                 while (reconn < KIB_RECONN_BREAK) {
3586                         if (kiblnd_data.kib_reconn_sec !=
3587                             ktime_get_real_seconds()) {
3588                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3589                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3590                                                  &kiblnd_data.kib_reconn_list);
3591                         }
3592
3593                         conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
3594                                                         struct kib_conn, ibc_list);
3595                         if (!conn)
3596                                 break;
3597
3598                         list_del(&conn->ibc_list);
3599
3600                         spin_unlock_irqrestore(lock, flags);
3601                         dropped_lock = true;
3602
3603                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3604                         kiblnd_peer_decref(conn->ibc_peer);
3605                         LIBCFS_FREE(conn, sizeof(*conn));
3606
3607                         spin_lock_irqsave(lock, flags);
3608                 }
3609
3610                 conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
3611                                                 struct kib_conn, ibc_list);
3612                 if (conn) {
3613                         list_del(&conn->ibc_list);
3614                         spin_unlock_irqrestore(lock, flags);
3615
3616                         dropped_lock = kiblnd_tx_may_discard(conn);
3617                         if (dropped_lock)
3618                                 kiblnd_conn_decref(conn);
3619
3620                         spin_lock_irqsave(lock, flags);
3621                         if (!dropped_lock)
3622                                 list_add_tail(&conn->ibc_list,
3623                                               &kiblnd_data.kib_connd_waits);
3624                 }
3625
3626                 /* careful with the jiffy wrap... */
3627                 timeout = (int)(deadline - jiffies);
3628                 if (timeout <= 0) {
3629                         const int n = 4;
3630                         const int p = 1;
3631                         int chunk = HASH_SIZE(kiblnd_data.kib_peers);
3632                         unsigned int lnd_timeout;
3633
3634                         spin_unlock_irqrestore(lock, flags);
3635                         dropped_lock = true;
3636
3637                         /* Time to check for RDMA timeouts on a few more
3638                          * peers: I do checks every 'p' seconds on a
3639                          * proportion of the peer_ni table and I need to check
3640                          * every connection 'n' times within a timeout
3641                          * interval, to ensure I detect a timeout on any
3642                          * connection within (n+1)/n times the timeout
3643                          * interval.
3644                          */
3645
3646                         lnd_timeout = kiblnd_timeout();
3647                         if (lnd_timeout > n * p)
3648                                 chunk = (chunk * n * p) / lnd_timeout;
3649                         if (chunk == 0)
3650                                 chunk = 1;
3651
3652                         for (i = 0; i < chunk; i++) {
3653                                 kiblnd_check_conns(peer_index);
3654                                 peer_index = (peer_index + 1) %
3655                                         HASH_SIZE(kiblnd_data.kib_peers);
3656                         }
3657
3658                         deadline += cfs_time_seconds(p);
3659                         spin_lock_irqsave(lock, flags);
3660                 }
3661
3662                 if (dropped_lock)
3663                         continue;
3664
3665                 /* Nothing to do for 'timeout'  */
3666                 set_current_state(TASK_INTERRUPTIBLE);
3667                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3668                 spin_unlock_irqrestore(lock, flags);
3669
3670                 schedule_timeout(timeout);
3671
3672                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3673                 spin_lock_irqsave(lock, flags);
3674         }
3675
3676         spin_unlock_irqrestore(lock, flags);
3677
3678         kiblnd_thread_fini();
3679         return 0;
3680 }
3681
3682 void
3683 kiblnd_qp_event(struct ib_event *event, void *arg)
3684 {
3685         struct kib_conn *conn = arg;
3686
3687         switch (event->event) {
3688         case IB_EVENT_COMM_EST:
3689                 CDEBUG(D_NET, "%s established\n",
3690                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3691                 /* We received a packet but connection isn't established
3692                  * probably handshake packet was lost, so free to
3693                  * force make connection established */
3694                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3695                 return;
3696
3697         case IB_EVENT_PORT_ERR:
3698         case IB_EVENT_DEVICE_FATAL:
3699                 CERROR("Fatal device error for NI %s\n",
3700                        libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
3701                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
3702                 return;
3703
3704         case IB_EVENT_PORT_ACTIVE:
3705                 CERROR("Port reactivated for NI %s\n",
3706                        libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
3707                 atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
3708                 return;
3709
3710         default:
3711                 CERROR("%s: Async QP event type %d\n",
3712                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3713                 return;
3714         }
3715 }
3716
3717 static void
3718 kiblnd_complete (struct ib_wc *wc)
3719 {
3720         switch (kiblnd_wreqid2type(wc->wr_id)) {
3721         default:
3722                 LBUG();
3723
3724         case IBLND_WID_MR:
3725                 if (wc->status != IB_WC_SUCCESS &&
3726                     wc->status != IB_WC_WR_FLUSH_ERR)
3727                         CNETERR("FastReg failed: %d\n", wc->status);
3728                 return;
3729
3730         case IBLND_WID_RDMA:
3731                 /* We only get RDMA completion notification if it fails.  All
3732                  * subsequent work items, including the final SEND will fail
3733                  * too.  However we can't print out any more info about the
3734                  * failing RDMA because 'tx' might be back on the idle list or
3735                  * even reused already if we didn't manage to post all our work
3736                  * items */
3737                 CNETERR("RDMA (tx: %p) failed: %d\n",
3738                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3739                 return;
3740
3741         case IBLND_WID_TX:
3742                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3743                 return;
3744
3745         case IBLND_WID_RX:
3746                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3747                                    wc->byte_len);
3748                 return;
3749         }
3750 }
3751
3752 void
3753 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3754 {
3755         /* NB I'm not allowed to schedule this conn once its refcount has
3756          * reached 0.  Since fundamentally I'm racing with scheduler threads
3757          * consuming my CQ I could be called after all completions have
3758          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3759          * and this CQ is about to be destroyed so I NOOP. */
3760         struct kib_conn *conn = arg;
3761         struct kib_sched_info *sched = conn->ibc_sched;
3762         unsigned long flags;
3763
3764         LASSERT(cq == conn->ibc_cq);
3765
3766         spin_lock_irqsave(&sched->ibs_lock, flags);
3767
3768         conn->ibc_ready = 1;
3769
3770         if (!conn->ibc_scheduled &&
3771             (conn->ibc_nrx > 0 ||
3772              conn->ibc_nsends_posted > 0)) {
3773                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3774                 conn->ibc_scheduled = 1;
3775                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3776
3777                 if (waitqueue_active(&sched->ibs_waitq))
3778                         wake_up(&sched->ibs_waitq);
3779         }
3780
3781         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3782 }
3783
3784 void
3785 kiblnd_cq_event(struct ib_event *event, void *arg)
3786 {
3787         struct kib_conn *conn = arg;
3788
3789         CERROR("%s: async CQ event type %d\n",
3790                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3791 }
3792
3793 int
3794 kiblnd_scheduler(void *arg)
3795 {
3796         long id = (long)arg;
3797         struct kib_sched_info *sched;
3798         struct kib_conn *conn;
3799         wait_queue_entry_t wait;
3800         unsigned long flags;
3801         struct ib_wc wc;
3802         bool did_something;
3803         int rc;
3804
3805         init_wait(&wait);
3806
3807         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3808
3809         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3810         if (rc != 0) {
3811                 CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
3812         }
3813
3814         spin_lock_irqsave(&sched->ibs_lock, flags);
3815
3816         while (!kiblnd_data.kib_shutdown) {
3817                 if (need_resched()) {
3818                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3819
3820                         cond_resched();
3821
3822                         spin_lock_irqsave(&sched->ibs_lock, flags);
3823                 }
3824
3825                 did_something = false;
3826
3827                 conn = list_first_entry_or_null(&sched->ibs_conns,
3828                                                 struct kib_conn,
3829                                                 ibc_sched_list);
3830                 if (conn) {
3831                         /* take over kib_sched_conns' ref on conn... */
3832                         LASSERT(conn->ibc_scheduled);
3833                         list_del(&conn->ibc_sched_list);
3834                         conn->ibc_ready = 0;
3835
3836                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3837
3838                         wc.wr_id = IBLND_WID_INVAL;
3839
3840                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3841                         if (rc == 0) {
3842                                 rc = ib_req_notify_cq(conn->ibc_cq,
3843                                                       IB_CQ_NEXT_COMP);
3844                                 if (rc < 0) {
3845                                         CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
3846                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3847                                         kiblnd_close_conn(conn, -EIO);
3848                                         kiblnd_conn_decref(conn);
3849                                         spin_lock_irqsave(&sched->ibs_lock,
3850                                                           flags);
3851                                         continue;
3852                                 }
3853
3854                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3855                         }
3856
3857                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3858                                 LCONSOLE_ERROR(
3859                                         "ib_poll_cq (rc: %d) returned invalid "
3860                                         "wr_id, opcode %d, status: %d, "
3861                                         "vendor_err: %d, conn: %s status: %d\n"
3862                                         "please upgrade firmware and OFED or "
3863                                         "contact vendor.\n", rc,
3864                                         wc.opcode, wc.status, wc.vendor_err,
3865                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3866                                         conn->ibc_state);
3867                                 rc = -EINVAL;
3868                         }
3869
3870                         if (rc < 0) {
3871                                 CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
3872                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3873                                       rc);
3874                                 kiblnd_close_conn(conn, -EIO);
3875                                 kiblnd_conn_decref(conn);
3876                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3877                                 continue;
3878                         }
3879
3880                         spin_lock_irqsave(&sched->ibs_lock, flags);
3881
3882                         if (rc != 0 || conn->ibc_ready) {
3883                                 /* There may be another completion waiting; get
3884                                  * another scheduler to check while I handle
3885                                  * this one... */
3886                                 /* +1 ref for sched_conns */
3887                                 kiblnd_conn_addref(conn);
3888                                 list_add_tail(&conn->ibc_sched_list,
3889                                               &sched->ibs_conns);
3890                                 if (waitqueue_active(&sched->ibs_waitq))
3891                                         wake_up(&sched->ibs_waitq);
3892                         } else {
3893                                 conn->ibc_scheduled = 0;
3894                         }
3895
3896                         if (rc != 0) {
3897                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3898                                 kiblnd_complete(&wc);
3899
3900                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3901                         }
3902
3903                         kiblnd_conn_decref(conn); /* ..drop my ref from above */
3904                         did_something = true;
3905                 }
3906
3907                 if (did_something)
3908                         continue;
3909
3910                 set_current_state(TASK_INTERRUPTIBLE);
3911                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3912                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3913
3914                 schedule();
3915
3916                 remove_wait_queue(&sched->ibs_waitq, &wait);
3917                 set_current_state(TASK_RUNNING);
3918                 spin_lock_irqsave(&sched->ibs_lock, flags);
3919         }
3920
3921         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3922
3923         kiblnd_thread_fini();
3924         return 0;
3925 }
3926
3927 int
3928 kiblnd_failover_thread(void *arg)
3929 {
3930         rwlock_t *glock = &kiblnd_data.kib_global_lock;
3931         struct kib_dev *dev;
3932         struct net *ns = arg;
3933         wait_queue_entry_t wait;
3934         unsigned long flags;
3935         int rc;
3936
3937         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3938
3939         init_wait(&wait);
3940         write_lock_irqsave(glock, flags);
3941
3942         while (!kiblnd_data.kib_shutdown) {
3943                 bool do_failover = false;
3944                 int long_sleep;
3945
3946                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3947                                     ibd_fail_list) {
3948                         if (ktime_get_seconds() < dev->ibd_next_failover)
3949                                 continue;
3950                         do_failover = true;
3951                         break;
3952                 }
3953
3954                 if (do_failover) {
3955                         list_del_init(&dev->ibd_fail_list);
3956                         dev->ibd_failover = 1;
3957                         write_unlock_irqrestore(glock, flags);
3958
3959                         rc = kiblnd_dev_failover(dev, ns);
3960
3961                         write_lock_irqsave(glock, flags);
3962
3963                         LASSERT(dev->ibd_failover);
3964                         dev->ibd_failover = 0;
3965                         if (rc >= 0) { /* Device is OK or failover succeed */
3966                                 dev->ibd_next_failover = ktime_get_seconds() + 3;
3967                                 continue;
3968                         }
3969
3970                         /* failed to failover, retry later */
3971                         dev->ibd_next_failover = ktime_get_seconds() +
3972                                 min(dev->ibd_failed_failover, 10);
3973                         if (kiblnd_dev_can_failover(dev)) {
3974                                 list_add_tail(&dev->ibd_fail_list,
3975                                               &kiblnd_data.kib_failed_devs);
3976                         }
3977
3978                         continue;
3979                 }
3980
3981                 /* long sleep if no more pending failover */
3982                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3983
3984                 set_current_state(TASK_INTERRUPTIBLE);
3985                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3986                 write_unlock_irqrestore(glock, flags);
3987
3988                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3989                                       cfs_time_seconds(1));
3990                 set_current_state(TASK_RUNNING);
3991                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3992                 write_lock_irqsave(glock, flags);
3993
3994                 if (!long_sleep || rc != 0)
3995                         continue;
3996
3997                 /* have a long sleep, routine check all active devices,
3998                  * we need checking like this because if there is not active
3999                  * connection on the dev and no SEND from local, we may listen
4000                  * on wrong HCA for ever while there is a bonding failover
4001                  */
4002                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
4003                         if (kiblnd_dev_can_failover(dev)) {
4004                                 list_add_tail(&dev->ibd_fail_list,
4005                                               &kiblnd_data.kib_failed_devs);
4006                         }
4007                 }
4008         }
4009
4010         write_unlock_irqrestore(glock, flags);
4011
4012         kiblnd_thread_fini();
4013         return 0;
4014 }