Whamcloud - gitweb
d2a922d182c1b878d5aeef515c7425568cd7c94b
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd_cb.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include "o2iblnd.h"
38
39 #define MAX_CONN_RACES_BEFORE_ABORT 20
40
41 static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni);
42 static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error);
43 static void kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx,
44                                int type, int body_nob);
45 static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
46                             int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
47 static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
48 static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
49
50 static void kiblnd_unmap_tx(kib_tx_t *tx);
51 static void kiblnd_check_sends_locked(kib_conn_t *conn);
52
53 void
54 kiblnd_tx_done(kib_tx_t *tx)
55 {
56         struct lnet_msg *lntmsg[2];
57         int         rc;
58         int         i;
59
60         LASSERT (!in_interrupt());
61         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
62         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
63         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
64         LASSERT (tx->tx_pool != NULL);
65
66         kiblnd_unmap_tx(tx);
67
68         /* tx may have up to 2 lnet msgs to finalise */
69         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
70         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
71         rc = tx->tx_status;
72
73         if (tx->tx_conn != NULL) {
74                 kiblnd_conn_decref(tx->tx_conn);
75                 tx->tx_conn = NULL;
76         }
77
78         tx->tx_nwrq = tx->tx_nsge = 0;
79         tx->tx_status = 0;
80
81         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
82
83         /* delay finalize until my descs have been freed */
84         for (i = 0; i < 2; i++) {
85                 if (lntmsg[i] == NULL)
86                         continue;
87
88                 lnet_finalize(lntmsg[i], rc);
89         }
90 }
91
92 void
93 kiblnd_txlist_done(struct list_head *txlist, int status)
94 {
95         kib_tx_t *tx;
96
97         while (!list_empty(txlist)) {
98                 tx = list_entry(txlist->next, kib_tx_t, tx_list);
99
100                 list_del(&tx->tx_list);
101                 /* complete now */
102                 tx->tx_waiting = 0;
103                 tx->tx_status = status;
104                 kiblnd_tx_done(tx);
105         }
106 }
107
108 static kib_tx_t *
109 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
110 {
111         kib_net_t               *net = (kib_net_t *)ni->ni_data;
112         struct list_head        *node;
113         kib_tx_t                *tx;
114         kib_tx_poolset_t        *tps;
115
116         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
117         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
118         if (node == NULL)
119                 return NULL;
120         tx = container_of(node, kib_tx_t, tx_list);
121
122         LASSERT (tx->tx_nwrq == 0);
123         LASSERT (!tx->tx_queued);
124         LASSERT (tx->tx_sending == 0);
125         LASSERT (!tx->tx_waiting);
126         LASSERT (tx->tx_status == 0);
127         LASSERT (tx->tx_conn == NULL);
128         LASSERT (tx->tx_lntmsg[0] == NULL);
129         LASSERT (tx->tx_lntmsg[1] == NULL);
130         LASSERT (tx->tx_nfrags == 0);
131
132         return tx;
133 }
134
135 static void
136 kiblnd_drop_rx(kib_rx_t *rx)
137 {
138         kib_conn_t              *conn   = rx->rx_conn;
139         struct kib_sched_info   *sched  = conn->ibc_sched;
140         unsigned long           flags;
141
142         spin_lock_irqsave(&sched->ibs_lock, flags);
143         LASSERT(conn->ibc_nrx > 0);
144         conn->ibc_nrx--;
145         spin_unlock_irqrestore(&sched->ibs_lock, flags);
146
147         kiblnd_conn_decref(conn);
148 }
149
150 int
151 kiblnd_post_rx (kib_rx_t *rx, int credit)
152 {
153         kib_conn_t         *conn = rx->rx_conn;
154         kib_net_t          *net = conn->ibc_peer->ibp_ni->ni_data;
155         struct ib_recv_wr  *bad_wrq = NULL;
156 #ifdef HAVE_IB_GET_DMA_MR
157         struct ib_mr       *mr = conn->ibc_hdev->ibh_mrs;
158 #endif
159         int                 rc;
160
161         LASSERT (net != NULL);
162         LASSERT (!in_interrupt());
163         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
164                  credit == IBLND_POSTRX_PEER_CREDIT ||
165                  credit == IBLND_POSTRX_RSRVD_CREDIT);
166 #ifdef HAVE_IB_GET_DMA_MR
167         LASSERT(mr != NULL);
168
169         rx->rx_sge.lkey   = mr->lkey;
170 #else
171         rx->rx_sge.lkey   = conn->ibc_hdev->ibh_pd->local_dma_lkey;
172 #endif
173         rx->rx_sge.addr   = rx->rx_msgaddr;
174         rx->rx_sge.length = IBLND_MSG_SIZE;
175
176         rx->rx_wrq.next = NULL;
177         rx->rx_wrq.sg_list = &rx->rx_sge;
178         rx->rx_wrq.num_sge = 1;
179         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
180
181         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
182         LASSERT (rx->rx_nob >= 0);              /* not posted */
183
184         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
185                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
186                 return 0;
187         }
188
189         rx->rx_nob = -1;                        /* flag posted */
190
191         /* NB: need an extra reference after ib_post_recv because we don't
192          * own this rx (and rx::rx_conn) anymore, LU-5678.
193          */
194         kiblnd_conn_addref(conn);
195         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
196         if (unlikely(rc != 0)) {
197                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
198                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
199                 rx->rx_nob = 0;
200         }
201
202         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
203                 goto out;
204
205         if (unlikely(rc != 0)) {
206                 kiblnd_close_conn(conn, rc);
207                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
208                 goto out;
209         }
210
211         if (credit == IBLND_POSTRX_NO_CREDIT)
212                 goto out;
213
214         spin_lock(&conn->ibc_lock);
215         if (credit == IBLND_POSTRX_PEER_CREDIT)
216                 conn->ibc_outstanding_credits++;
217         else
218                 conn->ibc_reserved_credits++;
219         kiblnd_check_sends_locked(conn);
220         spin_unlock(&conn->ibc_lock);
221
222 out:
223         kiblnd_conn_decref(conn);
224         return rc;
225 }
226
227 static kib_tx_t *
228 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
229 {
230         struct list_head *tmp;
231
232         list_for_each(tmp, &conn->ibc_active_txs) {
233                 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
234
235                 LASSERT(!tx->tx_queued);
236                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
237
238                 if (tx->tx_cookie != cookie)
239                         continue;
240
241                 if (tx->tx_waiting &&
242                     tx->tx_msg->ibm_type == txtype)
243                         return tx;
244
245                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
246                       tx->tx_waiting ? "" : "NOT ",
247                       tx->tx_msg->ibm_type, txtype);
248         }
249         return NULL;
250 }
251
252 static void
253 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
254 {
255         kib_tx_t    *tx;
256         struct lnet_ni   *ni = conn->ibc_peer->ibp_ni;
257         int          idle;
258
259         spin_lock(&conn->ibc_lock);
260
261         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
262         if (tx == NULL) {
263                 spin_unlock(&conn->ibc_lock);
264
265                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
266                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
267                 kiblnd_close_conn(conn, -EPROTO);
268                 return;
269         }
270
271         if (tx->tx_status == 0) {               /* success so far */
272                 if (status < 0) {               /* failed? */
273                         tx->tx_status = status;
274                 } else if (txtype == IBLND_MSG_GET_REQ) {
275                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
276                 }
277         }
278
279         tx->tx_waiting = 0;
280
281         idle = !tx->tx_queued && (tx->tx_sending == 0);
282         if (idle)
283                 list_del(&tx->tx_list);
284
285         spin_unlock(&conn->ibc_lock);
286
287         if (idle)
288                 kiblnd_tx_done(tx);
289 }
290
291 static void
292 kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
293 {
294         struct lnet_ni   *ni = conn->ibc_peer->ibp_ni;
295         kib_tx_t    *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
296
297         if (tx == NULL) {
298                 CERROR("Can't get tx for completion %x for %s\n",
299                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
300                 return;
301         }
302
303         tx->tx_msg->ibm_u.completion.ibcm_status = status;
304         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
305         kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
306
307         kiblnd_queue_tx(tx, conn);
308 }
309
310 static void
311 kiblnd_handle_rx (kib_rx_t *rx)
312 {
313         kib_msg_t    *msg = rx->rx_msg;
314         kib_conn_t   *conn = rx->rx_conn;
315         struct lnet_ni    *ni = conn->ibc_peer->ibp_ni;
316         int           credits = msg->ibm_credits;
317         kib_tx_t     *tx;
318         int           rc = 0;
319         int           rc2;
320         int           post_credit;
321
322         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
323
324         CDEBUG (D_NET, "Received %x[%d] from %s\n",
325                 msg->ibm_type, credits,
326                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
327
328         if (credits != 0) {
329                 /* Have I received credits that will let me send? */
330                 spin_lock(&conn->ibc_lock);
331
332                 if (conn->ibc_credits + credits >
333                     conn->ibc_queue_depth) {
334                         rc2 = conn->ibc_credits;
335                         spin_unlock(&conn->ibc_lock);
336
337                         CERROR("Bad credits from %s: %d + %d > %d\n",
338                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
339                                rc2, credits,
340                                conn->ibc_queue_depth);
341
342                         kiblnd_close_conn(conn, -EPROTO);
343                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
344                         return;
345                 }
346
347                 conn->ibc_credits += credits;
348
349                 /* This ensures the credit taken by NOOP can be returned */
350                 if (msg->ibm_type == IBLND_MSG_NOOP &&
351                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
352                         conn->ibc_outstanding_credits++;
353
354                 kiblnd_check_sends_locked(conn);
355                 spin_unlock(&conn->ibc_lock);
356         }
357
358         switch (msg->ibm_type) {
359         default:
360                 CERROR("Bad IBLND message type %x from %s\n",
361                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
362                 post_credit = IBLND_POSTRX_NO_CREDIT;
363                 rc = -EPROTO;
364                 break;
365
366         case IBLND_MSG_NOOP:
367                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
368                         post_credit = IBLND_POSTRX_NO_CREDIT;
369                         break;
370                 }
371
372                 if (credits != 0) /* credit already posted */
373                         post_credit = IBLND_POSTRX_NO_CREDIT;
374                 else              /* a keepalive NOOP */
375                         post_credit = IBLND_POSTRX_PEER_CREDIT;
376                 break;
377
378         case IBLND_MSG_IMMEDIATE:
379                 post_credit = IBLND_POSTRX_DONT_POST;
380                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
381                                 msg->ibm_srcnid, rx, 0);
382                 if (rc < 0)                     /* repost on error */
383                         post_credit = IBLND_POSTRX_PEER_CREDIT;
384                 break;
385
386         case IBLND_MSG_PUT_REQ:
387                 post_credit = IBLND_POSTRX_DONT_POST;
388                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
389                                 msg->ibm_srcnid, rx, 1);
390                 if (rc < 0)                     /* repost on error */
391                         post_credit = IBLND_POSTRX_PEER_CREDIT;
392                 break;
393
394         case IBLND_MSG_PUT_NAK:
395                 CWARN ("PUT_NACK from %s\n",
396                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
397                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
398                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
399                                          msg->ibm_u.completion.ibcm_status,
400                                          msg->ibm_u.completion.ibcm_cookie);
401                 break;
402
403         case IBLND_MSG_PUT_ACK:
404                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
405
406                 spin_lock(&conn->ibc_lock);
407                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
408                                         msg->ibm_u.putack.ibpam_src_cookie);
409                 if (tx != NULL)
410                         list_del(&tx->tx_list);
411                 spin_unlock(&conn->ibc_lock);
412
413                 if (tx == NULL) {
414                         CERROR("Unmatched PUT_ACK from %s\n",
415                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
416                         rc = -EPROTO;
417                         break;
418                 }
419
420                 LASSERT (tx->tx_waiting);
421                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
422                  * (a) I can overwrite tx_msg since my peer_ni has received it!
423                  * (b) tx_waiting set tells tx_complete() it's not done. */
424
425                 tx->tx_nwrq = tx->tx_nsge = 0;  /* overwrite PUT_REQ */
426
427                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
428                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
429                                        &msg->ibm_u.putack.ibpam_rd,
430                                        msg->ibm_u.putack.ibpam_dst_cookie);
431                 if (rc2 < 0)
432                         CERROR("Can't setup rdma for PUT to %s: %d\n",
433                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
434
435                 spin_lock(&conn->ibc_lock);
436                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
437                 kiblnd_queue_tx_locked(tx, conn);
438                 spin_unlock(&conn->ibc_lock);
439                 break;
440
441         case IBLND_MSG_PUT_DONE:
442                 post_credit = IBLND_POSTRX_PEER_CREDIT;
443                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
444                                          msg->ibm_u.completion.ibcm_status,
445                                          msg->ibm_u.completion.ibcm_cookie);
446                 break;
447
448         case IBLND_MSG_GET_REQ:
449                 post_credit = IBLND_POSTRX_DONT_POST;
450                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
451                                 msg->ibm_srcnid, rx, 1);
452                 if (rc < 0)                     /* repost on error */
453                         post_credit = IBLND_POSTRX_PEER_CREDIT;
454                 break;
455
456         case IBLND_MSG_GET_DONE:
457                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
458                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
459                                          msg->ibm_u.completion.ibcm_status,
460                                          msg->ibm_u.completion.ibcm_cookie);
461                 break;
462         }
463
464         if (rc < 0)                             /* protocol error */
465                 kiblnd_close_conn(conn, rc);
466
467         if (post_credit != IBLND_POSTRX_DONT_POST)
468                 kiblnd_post_rx(rx, post_credit);
469 }
470
471 static void
472 kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
473 {
474         kib_msg_t    *msg = rx->rx_msg;
475         kib_conn_t   *conn = rx->rx_conn;
476         struct lnet_ni    *ni = conn->ibc_peer->ibp_ni;
477         kib_net_t    *net = ni->ni_data;
478         int           rc;
479         int           err = -EIO;
480
481         LASSERT (net != NULL);
482         LASSERT (rx->rx_nob < 0);               /* was posted */
483         rx->rx_nob = 0;                         /* isn't now */
484
485         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
486                 goto ignore;
487
488         if (status != IB_WC_SUCCESS) {
489                 CNETERR("Rx from %s failed: %d\n",
490                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
491                 goto failed;
492         }
493
494         LASSERT (nob >= 0);
495         rx->rx_nob = nob;
496
497         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
498         if (rc != 0) {
499                 CERROR ("Error %d unpacking rx from %s\n",
500                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
501                 goto failed;
502         }
503
504         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
505             msg->ibm_dstnid != ni->ni_nid ||
506             msg->ibm_srcstamp != conn->ibc_incarnation ||
507             msg->ibm_dststamp != net->ibn_incarnation) {
508                 CERROR ("Stale rx from %s\n",
509                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
510                 err = -ESTALE;
511                 goto failed;
512         }
513
514         /* set time last known alive */
515         kiblnd_peer_alive(conn->ibc_peer);
516
517         /* racing with connection establishment/teardown! */
518
519         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
520                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
521                 unsigned long  flags;
522
523                 write_lock_irqsave(g_lock, flags);
524                 /* must check holding global lock to eliminate race */
525                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
526                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
527                         write_unlock_irqrestore(g_lock, flags);
528                         return;
529                 }
530                 write_unlock_irqrestore(g_lock, flags);
531         }
532         kiblnd_handle_rx(rx);
533         return;
534
535  failed:
536         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
537         kiblnd_close_conn(conn, err);
538  ignore:
539         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
540 }
541
542 static int
543 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
544 {
545         kib_hca_dev_t           *hdev;
546         kib_fmr_poolset_t       *fps;
547         int                     cpt;
548         int                     rc;
549         bool                    is_fastreg = 0;
550
551         LASSERT(tx->tx_pool != NULL);
552         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
553
554         hdev = tx->tx_pool->tpo_hdev;
555         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
556
557         fps = net->ibn_fmr_ps[cpt];
558         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr, &is_fastreg);
559         if (rc != 0) {
560                 CERROR("Can't map %u pages: %d\n", nob, rc);
561                 return rc;
562         }
563
564         /* If rd is not tx_rd, it's going to get sent to a peer_ni, who will need
565          * the rkey */
566         rd->rd_key = tx->fmr.fmr_key;
567         if (!is_fastreg)
568                 rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
569         rd->rd_frags[0].rf_nob   = nob;
570         rd->rd_nfrags = 1;
571
572         return 0;
573 }
574
575 static void
576 kiblnd_unmap_tx(kib_tx_t *tx)
577 {
578         if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd)
579                 kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
580
581         if (tx->tx_nfrags != 0) {
582                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
583                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
584                 tx->tx_nfrags = 0;
585         }
586 }
587
588 static int
589 kiblnd_map_tx(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags)
590 {
591         kib_net_t     *net   = ni->ni_data;
592         kib_hca_dev_t *hdev  = net->ibn_dev->ibd_hdev;
593 #ifdef HAVE_IB_GET_DMA_MR
594         struct ib_mr  *mr    = NULL;
595 #endif
596         __u32 nob;
597         int i;
598
599         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
600          * RDMA sink */
601         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
602         tx->tx_nfrags = nfrags;
603
604         rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
605                                           tx->tx_nfrags, tx->tx_dmadir);
606
607         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
608                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
609                         hdev->ibh_ibdev, &tx->tx_frags[i]);
610                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
611                         hdev->ibh_ibdev, &tx->tx_frags[i]);
612                 nob += rd->rd_frags[i].rf_nob;
613         }
614
615 #ifdef HAVE_IB_GET_DMA_MR
616         mr = kiblnd_find_rd_dma_mr(ni, rd,
617                                    (tx->tx_conn != NULL) ?
618                                    tx->tx_conn->ibc_max_frags : -1);
619         if (mr != NULL) {
620                 /* found pre-mapping MR */
621                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
622                 return 0;
623         }
624 #endif
625
626         if (net->ibn_fmr_ps != NULL)
627                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
628
629         return -EINVAL;
630 }
631
632
633 static int
634 kiblnd_setup_rd_iov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
635                     unsigned int niov, struct kvec *iov, int offset, int nob)
636 {
637         kib_net_t          *net = ni->ni_data;
638         struct page        *page;
639         struct scatterlist *sg;
640         unsigned long       vaddr;
641         int                 fragnob;
642         int                 page_offset;
643
644         LASSERT (nob > 0);
645         LASSERT (niov > 0);
646         LASSERT (net != NULL);
647
648         while (offset >= iov->iov_len) {
649                 offset -= iov->iov_len;
650                 niov--;
651                 iov++;
652                 LASSERT (niov > 0);
653         }
654
655         sg = tx->tx_frags;
656         do {
657                 LASSERT(niov > 0);
658
659                 vaddr = ((unsigned long)iov->iov_base) + offset;
660                 page_offset = vaddr & (PAGE_SIZE - 1);
661                 page = lnet_kvaddr_to_page(vaddr);
662                 if (page == NULL) {
663                         CERROR("Can't find page\n");
664                         return -EFAULT;
665                 }
666
667                 fragnob = min((int)(iov->iov_len - offset), nob);
668                 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
669
670                 sg_set_page(sg, page, fragnob, page_offset);
671                 sg = sg_next(sg);
672                 if (!sg) {
673                         CERROR("lacking enough sg entries to map tx\n");
674                         return -EFAULT;
675                 }
676
677                 if (offset + fragnob < iov->iov_len) {
678                         offset += fragnob;
679                 } else {
680                         offset = 0;
681                         iov++;
682                         niov--;
683                 }
684                 nob -= fragnob;
685         } while (nob > 0);
686
687         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
688 }
689
690 static int
691 kiblnd_setup_rd_kiov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
692                      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
693 {
694         kib_net_t          *net = ni->ni_data;
695         struct scatterlist *sg;
696         int                 fragnob;
697
698         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
699
700         LASSERT (nob > 0);
701         LASSERT (nkiov > 0);
702         LASSERT (net != NULL);
703
704         while (offset >= kiov->kiov_len) {
705                 offset -= kiov->kiov_len;
706                 nkiov--;
707                 kiov++;
708                 LASSERT (nkiov > 0);
709         }
710
711         sg = tx->tx_frags;
712         do {
713                 LASSERT (nkiov > 0);
714
715                 fragnob = min((int)(kiov->kiov_len - offset), nob);
716
717                 sg_set_page(sg, kiov->kiov_page, fragnob,
718                             kiov->kiov_offset + offset);
719                 sg = sg_next(sg);
720                 if (!sg) {
721                         CERROR("lacking enough sg entries to map tx\n");
722                         return -EFAULT;
723                 }
724
725                 offset = 0;
726                 kiov++;
727                 nkiov--;
728                 nob -= fragnob;
729         } while (nob > 0);
730
731         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
732 }
733
734 static int
735 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
736 __must_hold(&conn->ibc_lock)
737 {
738         kib_msg_t *msg = tx->tx_msg;
739         kib_peer_ni_t *peer_ni = conn->ibc_peer;
740         struct lnet_ni *ni = peer_ni->ibp_ni;
741         int ver = conn->ibc_version;
742         int rc;
743         int done;
744
745         LASSERT(tx->tx_queued);
746         /* We rely on this for QP sizing */
747         LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
748         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
749
750         LASSERT(credit == 0 || credit == 1);
751         LASSERT(conn->ibc_outstanding_credits >= 0);
752         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
753         LASSERT(conn->ibc_credits >= 0);
754         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
755
756         if (conn->ibc_nsends_posted ==
757             kiblnd_concurrent_sends(ver, ni)) {
758                 /* tx completions outstanding... */
759                 CDEBUG(D_NET, "%s: posted enough\n",
760                        libcfs_nid2str(peer_ni->ibp_nid));
761                 return -EAGAIN;
762         }
763
764         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
765                 CDEBUG(D_NET, "%s: no credits\n",
766                        libcfs_nid2str(peer_ni->ibp_nid));
767                 return -EAGAIN;
768         }
769
770         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
771             conn->ibc_credits == 1 &&   /* last credit reserved */
772             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
773                 CDEBUG(D_NET, "%s: not using last credit\n",
774                        libcfs_nid2str(peer_ni->ibp_nid));
775                 return -EAGAIN;
776         }
777
778         /* NB don't drop ibc_lock before bumping tx_sending */
779         list_del(&tx->tx_list);
780         tx->tx_queued = 0;
781
782         if (msg->ibm_type == IBLND_MSG_NOOP &&
783             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
784              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
785               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
786                 /* OK to drop when posted enough NOOPs, since
787                  * kiblnd_check_sends_locked will queue NOOP again when
788                  * posted NOOPs complete */
789                 spin_unlock(&conn->ibc_lock);
790                 kiblnd_tx_done(tx);
791                 spin_lock(&conn->ibc_lock);
792                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
793                        libcfs_nid2str(peer_ni->ibp_nid),
794                        conn->ibc_noops_posted);
795                 return 0;
796         }
797
798         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
799                         peer_ni->ibp_nid, conn->ibc_incarnation);
800
801         conn->ibc_credits -= credit;
802         conn->ibc_outstanding_credits = 0;
803         conn->ibc_nsends_posted++;
804         if (msg->ibm_type == IBLND_MSG_NOOP)
805                 conn->ibc_noops_posted++;
806
807         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
808          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
809          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
810          * and then re-queued here.  It's (just) possible that
811          * tx_sending is non-zero if we've not done the tx_complete()
812          * from the first send; hence the ++ rather than = below. */
813         tx->tx_sending++;
814         list_add(&tx->tx_list, &conn->ibc_active_txs);
815
816         /* I'm still holding ibc_lock! */
817         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
818                 rc = -ECONNABORTED;
819         } else if (tx->tx_pool->tpo_pool.po_failed ||
820                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
821                 /* close_conn will launch failover */
822                 rc = -ENETDOWN;
823         } else {
824                 struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
825                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
826                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
827
828                 if (frd != NULL) {
829                         if (!frd->frd_valid) {
830                                 wr = &frd->frd_inv_wr.wr;
831                                 wr->next = &frd->frd_fastreg_wr.wr;
832                         } else {
833                                 wr = &frd->frd_fastreg_wr.wr;
834                         }
835                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
836                 }
837
838                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
839                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
840                          bad->wr_id, bad->opcode, bad->send_flags,
841                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
842
843                 bad = NULL;
844                 rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
845         }
846
847         conn->ibc_last_send = jiffies;
848
849         if (rc == 0)
850                 return 0;
851
852         /* NB credits are transferred in the actual
853          * message, which can only be the last work item */
854         conn->ibc_credits += credit;
855         conn->ibc_outstanding_credits += msg->ibm_credits;
856         conn->ibc_nsends_posted--;
857         if (msg->ibm_type == IBLND_MSG_NOOP)
858                 conn->ibc_noops_posted--;
859
860         tx->tx_status = rc;
861         tx->tx_waiting = 0;
862         tx->tx_sending--;
863
864         done = (tx->tx_sending == 0);
865         if (done)
866                 list_del(&tx->tx_list);
867
868         spin_unlock(&conn->ibc_lock);
869
870         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
871                 CERROR("Error %d posting transmit to %s\n",
872                        rc, libcfs_nid2str(peer_ni->ibp_nid));
873         else
874                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
875                        rc, libcfs_nid2str(peer_ni->ibp_nid));
876
877         kiblnd_close_conn(conn, rc);
878
879         if (done)
880                 kiblnd_tx_done(tx);
881
882         spin_lock(&conn->ibc_lock);
883
884         return -EIO;
885 }
886
887 static void
888 kiblnd_check_sends_locked(kib_conn_t *conn)
889 {
890         int        ver = conn->ibc_version;
891         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
892         kib_tx_t  *tx;
893
894         /* Don't send anything until after the connection is established */
895         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
896                 CDEBUG(D_NET, "%s too soon\n",
897                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
898                 return;
899         }
900
901         LASSERT(conn->ibc_nsends_posted <=
902                 kiblnd_concurrent_sends(ver, ni));
903         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
904                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
905         LASSERT (conn->ibc_reserved_credits >= 0);
906
907         while (conn->ibc_reserved_credits > 0 &&
908                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
909                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
910                                     kib_tx_t, tx_list);
911                 list_del(&tx->tx_list);
912                 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
913                 conn->ibc_reserved_credits--;
914         }
915
916         if (kiblnd_need_noop(conn)) {
917                 spin_unlock(&conn->ibc_lock);
918
919                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
920                 if (tx != NULL)
921                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
922
923                 spin_lock(&conn->ibc_lock);
924                 if (tx != NULL)
925                         kiblnd_queue_tx_locked(tx, conn);
926         }
927
928         for (;;) {
929                 int credit;
930
931                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
932                         credit = 0;
933                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
934                                             kib_tx_t, tx_list);
935                 } else if (!list_empty(&conn->ibc_tx_noops)) {
936                         LASSERT (!IBLND_OOB_CAPABLE(ver));
937                         credit = 1;
938                         tx = list_entry(conn->ibc_tx_noops.next,
939                                         kib_tx_t, tx_list);
940                 } else if (!list_empty(&conn->ibc_tx_queue)) {
941                         credit = 1;
942                         tx = list_entry(conn->ibc_tx_queue.next,
943                                             kib_tx_t, tx_list);
944                 } else
945                         break;
946
947                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
948                         break;
949         }
950 }
951
952 static void
953 kiblnd_tx_complete (kib_tx_t *tx, int status)
954 {
955         int           failed = (status != IB_WC_SUCCESS);
956         kib_conn_t   *conn = tx->tx_conn;
957         int           idle;
958
959         LASSERT (tx->tx_sending > 0);
960
961         if (failed) {
962                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
963                         CNETERR("Tx -> %s cookie %#llx"
964                                 " sending %d waiting %d: failed %d\n",
965                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
966                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
967                                 status);
968
969                 kiblnd_close_conn(conn, -EIO);
970         } else {
971                 kiblnd_peer_alive(conn->ibc_peer);
972         }
973
974         spin_lock(&conn->ibc_lock);
975
976         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
977          * gets to free it, which also drops its ref on 'conn'. */
978
979         tx->tx_sending--;
980         conn->ibc_nsends_posted--;
981         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
982                 conn->ibc_noops_posted--;
983
984         if (failed) {
985                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
986                 tx->tx_status = -EIO;
987         }
988
989         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
990                !tx->tx_waiting &&               /* Not waiting for peer_ni */
991                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
992         if (idle)
993                 list_del(&tx->tx_list);
994
995         kiblnd_check_sends_locked(conn);
996         spin_unlock(&conn->ibc_lock);
997
998         if (idle)
999                 kiblnd_tx_done(tx);
1000 }
1001
1002 static void
1003 kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, int type, int body_nob)
1004 {
1005         kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
1006         struct ib_sge *sge = &tx->tx_msgsge;
1007         struct ib_rdma_wr *wrq;
1008         int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
1009 #ifdef HAVE_IB_GET_DMA_MR
1010         struct ib_mr *mr = hdev->ibh_mrs;
1011 #endif
1012
1013         LASSERT(tx->tx_nwrq >= 0);
1014         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1015         LASSERT(nob <= IBLND_MSG_SIZE);
1016 #ifdef HAVE_IB_GET_DMA_MR
1017         LASSERT(mr != NULL);
1018 #endif
1019
1020         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1021
1022 #ifdef HAVE_IB_GET_DMA_MR
1023         sge->lkey   = mr->lkey;
1024 #else
1025         sge->lkey   = hdev->ibh_pd->local_dma_lkey;
1026 #endif
1027         sge->addr   = tx->tx_msgaddr;
1028         sge->length = nob;
1029
1030         wrq = &tx->tx_wrq[tx->tx_nwrq];
1031         memset(wrq, 0, sizeof(*wrq));
1032
1033         wrq->wr.next            = NULL;
1034         wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1035         wrq->wr.sg_list         = sge;
1036         wrq->wr.num_sge         = 1;
1037         wrq->wr.opcode          = IB_WR_SEND;
1038         wrq->wr.send_flags      = IB_SEND_SIGNALED;
1039
1040         tx->tx_nwrq++;
1041 }
1042
1043 static int
1044 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
1045                  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
1046 {
1047         kib_msg_t         *ibmsg = tx->tx_msg;
1048         kib_rdma_desc_t   *srcrd = tx->tx_rd;
1049         struct ib_rdma_wr *wrq = NULL;
1050         struct ib_sge     *sge;
1051         int                rc  = resid;
1052         int                srcidx;
1053         int                dstidx;
1054         int                sge_nob;
1055         int                wrq_sge;
1056
1057         LASSERT(!in_interrupt());
1058         LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
1059         LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
1060
1061         for (srcidx = dstidx = wrq_sge = sge_nob = 0;
1062              resid > 0; resid -= sge_nob) {
1063                 int     prev = dstidx;
1064
1065                 if (srcidx >= srcrd->rd_nfrags) {
1066                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1067                         rc = -EPROTO;
1068                         break;
1069                 }
1070
1071                 if (dstidx >= dstrd->rd_nfrags) {
1072                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1073                         rc = -EPROTO;
1074                         break;
1075                 }
1076
1077                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1078                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1079                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1080                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1081                                conn->ibc_max_frags,
1082                                srcidx, srcrd->rd_nfrags,
1083                                dstidx, dstrd->rd_nfrags);
1084                         rc = -EMSGSIZE;
1085                         break;
1086                 }
1087
1088                 sge_nob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1089                                   kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1090
1091                 sge = &tx->tx_sge[tx->tx_nsge];
1092                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1093                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1094                 sge->length = sge_nob;
1095
1096                 if (wrq_sge == 0) {
1097                         wrq = &tx->tx_wrq[tx->tx_nwrq];
1098
1099                         wrq->wr.next    = &(wrq + 1)->wr;
1100                         wrq->wr.wr_id   = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1101                         wrq->wr.sg_list = sge;
1102                         wrq->wr.opcode  = IB_WR_RDMA_WRITE;
1103                         wrq->wr.send_flags = 0;
1104
1105 #ifdef HAVE_IB_RDMA_WR
1106                         wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd,
1107                                                                       dstidx);
1108                         wrq->rkey               = kiblnd_rd_frag_key(dstrd,
1109                                                                      dstidx);
1110 #else
1111                         wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
1112                                                                         dstidx);
1113                         wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd,
1114                                                                      dstidx);
1115 #endif
1116                 }
1117
1118                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
1119                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
1120
1121                 wrq_sge++;
1122                 if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
1123                         tx->tx_nwrq++;
1124                         wrq->wr.num_sge = wrq_sge;
1125                         wrq_sge = 0;
1126                 }
1127                 tx->tx_nsge++;
1128         }
1129
1130         if (rc < 0)     /* no RDMA if completing with failure */
1131                 tx->tx_nwrq = tx->tx_nsge = 0;
1132
1133         ibmsg->ibm_u.completion.ibcm_status = rc;
1134         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1135         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1136                            type, sizeof (kib_completion_msg_t));
1137
1138         return rc;
1139 }
1140
1141 static void
1142 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
1143 {
1144         struct list_head *q;
1145
1146         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1147         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1148         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1149
1150         tx->tx_queued = 1;
1151         tx->tx_deadline = jiffies +
1152                           msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
1153                                            MSEC_PER_SEC);
1154
1155         if (tx->tx_conn == NULL) {
1156                 kiblnd_conn_addref(conn);
1157                 tx->tx_conn = conn;
1158                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1159         } else {
1160                 /* PUT_DONE first attached to conn as a PUT_REQ */
1161                 LASSERT (tx->tx_conn == conn);
1162                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1163         }
1164
1165         switch (tx->tx_msg->ibm_type) {
1166         default:
1167                 LBUG();
1168
1169         case IBLND_MSG_PUT_REQ:
1170         case IBLND_MSG_GET_REQ:
1171                 q = &conn->ibc_tx_queue_rsrvd;
1172                 break;
1173
1174         case IBLND_MSG_PUT_NAK:
1175         case IBLND_MSG_PUT_ACK:
1176         case IBLND_MSG_PUT_DONE:
1177         case IBLND_MSG_GET_DONE:
1178                 q = &conn->ibc_tx_queue_nocred;
1179                 break;
1180
1181         case IBLND_MSG_NOOP:
1182                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1183                         q = &conn->ibc_tx_queue_nocred;
1184                 else
1185                         q = &conn->ibc_tx_noops;
1186                 break;
1187
1188         case IBLND_MSG_IMMEDIATE:
1189                 q = &conn->ibc_tx_queue;
1190                 break;
1191         }
1192
1193         list_add_tail(&tx->tx_list, q);
1194 }
1195
1196 static void
1197 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1198 {
1199         spin_lock(&conn->ibc_lock);
1200         kiblnd_queue_tx_locked(tx, conn);
1201         kiblnd_check_sends_locked(conn);
1202         spin_unlock(&conn->ibc_lock);
1203 }
1204
1205 static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1206                                struct sockaddr_in *srcaddr,
1207                                struct sockaddr_in *dstaddr,
1208                                int timeout_ms)
1209 {
1210         unsigned short port;
1211         int rc;
1212
1213         /* allow the port to be reused */
1214         rc = rdma_set_reuseaddr(cmid, 1);
1215         if (rc != 0) {
1216                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1217                 return rc;
1218         }
1219
1220         /* look for a free privileged port */
1221         for (port = PROT_SOCK-1; port > 0; port--) {
1222                 srcaddr->sin_port = htons(port);
1223                 rc = rdma_resolve_addr(cmid,
1224                                        (struct sockaddr *)srcaddr,
1225                                        (struct sockaddr *)dstaddr,
1226                                        timeout_ms);
1227                 if (rc == 0) {
1228                         CDEBUG(D_NET, "bound to port %hu\n", port);
1229                         return 0;
1230                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1231                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1232                                port, rc);
1233                 } else {
1234                         return rc;
1235                 }
1236         }
1237
1238         CERROR("Failed to bind to a free privileged port\n");
1239         return rc;
1240 }
1241
1242 static void
1243 kiblnd_connect_peer (kib_peer_ni_t *peer_ni)
1244 {
1245         struct rdma_cm_id *cmid;
1246         kib_dev_t         *dev;
1247         kib_net_t         *net = peer_ni->ibp_ni->ni_data;
1248         struct sockaddr_in srcaddr;
1249         struct sockaddr_in dstaddr;
1250         int                rc;
1251
1252         LASSERT (net != NULL);
1253         LASSERT (peer_ni->ibp_connecting > 0);
1254
1255         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
1256                                      IB_QPT_RC);
1257
1258         if (IS_ERR(cmid)) {
1259                 CERROR("Can't create CMID for %s: %ld\n",
1260                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1261                 rc = PTR_ERR(cmid);
1262                 goto failed;
1263         }
1264
1265         dev = net->ibn_dev;
1266         memset(&srcaddr, 0, sizeof(srcaddr));
1267         srcaddr.sin_family = AF_INET;
1268         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1269
1270         memset(&dstaddr, 0, sizeof(dstaddr));
1271         dstaddr.sin_family = AF_INET;
1272         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1273         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1274
1275         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1276
1277         if (*kiblnd_tunables.kib_use_priv_port) {
1278                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1279                                          *kiblnd_tunables.kib_timeout * 1000);
1280         } else {
1281                 rc = rdma_resolve_addr(cmid,
1282                                        (struct sockaddr *)&srcaddr,
1283                                        (struct sockaddr *)&dstaddr,
1284                                        *kiblnd_tunables.kib_timeout * 1000);
1285         }
1286         if (rc != 0) {
1287                 /* Can't initiate address resolution:  */
1288                 CERROR("Can't resolve addr for %s: %d\n",
1289                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1290                 goto failed2;
1291         }
1292
1293         LASSERT (cmid->device != NULL);
1294         CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
1295                libcfs_nid2str(peer_ni->ibp_nid), dev->ibd_ifname,
1296                &dev->ibd_ifip, cmid->device->name);
1297
1298         return;
1299
1300  failed2:
1301         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1302         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1303         rdma_destroy_id(cmid);
1304         return;
1305  failed:
1306         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1307 }
1308
1309 bool
1310 kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni)
1311 {
1312         rwlock_t         *glock = &kiblnd_data.kib_global_lock;
1313         char             *reason = NULL;
1314         struct list_head  txs;
1315         unsigned long     flags;
1316
1317         INIT_LIST_HEAD(&txs);
1318
1319         write_lock_irqsave(glock, flags);
1320         if (peer_ni->ibp_reconnecting == 0) {
1321                 if (peer_ni->ibp_accepting)
1322                         reason = "accepting";
1323                 else if (peer_ni->ibp_connecting)
1324                         reason = "connecting";
1325                 else if (!list_empty(&peer_ni->ibp_conns))
1326                         reason = "connected";
1327                 else /* connected then closed */
1328                         reason = "closed";
1329
1330                 goto no_reconnect;
1331         }
1332
1333         if (peer_ni->ibp_accepting)
1334                 CNETERR("Detecting race between accepting and reconnecting\n");
1335         peer_ni->ibp_reconnecting--;
1336
1337         if (!kiblnd_peer_active(peer_ni)) {
1338                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1339                 reason = "unlinked";
1340                 goto no_reconnect;
1341         }
1342
1343         peer_ni->ibp_connecting++;
1344         peer_ni->ibp_reconnected++;
1345
1346         write_unlock_irqrestore(glock, flags);
1347
1348         kiblnd_connect_peer(peer_ni);
1349         return true;
1350
1351  no_reconnect:
1352         write_unlock_irqrestore(glock, flags);
1353
1354         CWARN("Abort reconnection of %s: %s\n",
1355               libcfs_nid2str(peer_ni->ibp_nid), reason);
1356         kiblnd_txlist_done(&txs, -ECONNABORTED);
1357         return false;
1358 }
1359
1360 void
1361 kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid)
1362 {
1363         kib_peer_ni_t        *peer_ni;
1364         kib_peer_ni_t        *peer2;
1365         kib_conn_t        *conn;
1366         rwlock_t        *g_lock = &kiblnd_data.kib_global_lock;
1367         unsigned long      flags;
1368         int                rc;
1369         int                i;
1370         struct lnet_ioctl_config_o2iblnd_tunables *tunables;
1371
1372         /* If I get here, I've committed to send, so I complete the tx with
1373          * failure on any problems */
1374
1375         LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1376         LASSERT (tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
1377
1378         /* First time, just use a read lock since I expect to find my peer_ni
1379          * connected */
1380         read_lock_irqsave(g_lock, flags);
1381
1382         peer_ni = kiblnd_find_peer_locked(ni, nid);
1383         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1384                 /* Found a peer_ni with an established connection */
1385                 conn = kiblnd_get_conn_locked(peer_ni);
1386                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1387
1388                 read_unlock_irqrestore(g_lock, flags);
1389
1390                 if (tx != NULL)
1391                         kiblnd_queue_tx(tx, conn);
1392                 kiblnd_conn_decref(conn); /* ...to here */
1393                 return;
1394         }
1395
1396         read_unlock(g_lock);
1397         /* Re-try with a write lock */
1398         write_lock(g_lock);
1399
1400         peer_ni = kiblnd_find_peer_locked(ni, nid);
1401         if (peer_ni != NULL) {
1402                 if (list_empty(&peer_ni->ibp_conns)) {
1403                         /* found a peer_ni, but it's still connecting... */
1404                         LASSERT(kiblnd_peer_connecting(peer_ni));
1405                         if (tx != NULL)
1406                                 list_add_tail(&tx->tx_list,
1407                                                   &peer_ni->ibp_tx_queue);
1408                         write_unlock_irqrestore(g_lock, flags);
1409                 } else {
1410                         conn = kiblnd_get_conn_locked(peer_ni);
1411                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1412
1413                         write_unlock_irqrestore(g_lock, flags);
1414
1415                         if (tx != NULL)
1416                                 kiblnd_queue_tx(tx, conn);
1417                         kiblnd_conn_decref(conn); /* ...to here */
1418                 }
1419                 return;
1420         }
1421
1422         write_unlock_irqrestore(g_lock, flags);
1423
1424         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1425         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1426         if (rc != 0) {
1427                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1428                 if (tx != NULL) {
1429                         tx->tx_status = -EHOSTUNREACH;
1430                         tx->tx_waiting = 0;
1431                         kiblnd_tx_done(tx);
1432                 }
1433                 return;
1434         }
1435
1436         write_lock_irqsave(g_lock, flags);
1437
1438         peer2 = kiblnd_find_peer_locked(ni, nid);
1439         if (peer2 != NULL) {
1440                 if (list_empty(&peer2->ibp_conns)) {
1441                         /* found a peer_ni, but it's still connecting... */
1442                         LASSERT(kiblnd_peer_connecting(peer2));
1443                         if (tx != NULL)
1444                                 list_add_tail(&tx->tx_list,
1445                                                   &peer2->ibp_tx_queue);
1446                         write_unlock_irqrestore(g_lock, flags);
1447                 } else {
1448                         conn = kiblnd_get_conn_locked(peer2);
1449                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1450
1451                         write_unlock_irqrestore(g_lock, flags);
1452
1453                         if (tx != NULL)
1454                                 kiblnd_queue_tx(tx, conn);
1455                         kiblnd_conn_decref(conn); /* ...to here */
1456                 }
1457
1458                 kiblnd_peer_decref(peer_ni);
1459                 return;
1460         }
1461
1462         /* Brand new peer_ni */
1463         LASSERT(peer_ni->ibp_connecting == 0);
1464         tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
1465         peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
1466
1467         /* always called with a ref on ni, which prevents ni being shutdown */
1468         LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
1469
1470         if (tx != NULL)
1471                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1472
1473         kiblnd_peer_addref(peer_ni);
1474         list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
1475
1476         write_unlock_irqrestore(g_lock, flags);
1477
1478         for (i = 0; i < tunables->lnd_conns_per_peer; i++)
1479                 kiblnd_connect_peer(peer_ni);
1480         kiblnd_peer_decref(peer_ni);
1481 }
1482
1483 int
1484 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1485 {
1486         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1487         int               type = lntmsg->msg_type;
1488         struct lnet_process_id target = lntmsg->msg_target;
1489         int               target_is_router = lntmsg->msg_target_is_router;
1490         int               routing = lntmsg->msg_routing;
1491         unsigned int      payload_niov = lntmsg->msg_niov;
1492         struct kvec      *payload_iov = lntmsg->msg_iov;
1493         lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
1494         unsigned int      payload_offset = lntmsg->msg_offset;
1495         unsigned int      payload_nob = lntmsg->msg_len;
1496         kib_msg_t        *ibmsg;
1497         kib_rdma_desc_t  *rd;
1498         kib_tx_t         *tx;
1499         int               nob;
1500         int               rc;
1501
1502         /* NB 'private' is different depending on what we're sending.... */
1503
1504         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1505                payload_nob, payload_niov, libcfs_id2str(target));
1506
1507         LASSERT (payload_nob == 0 || payload_niov > 0);
1508         LASSERT (payload_niov <= LNET_MAX_IOV);
1509
1510         /* Thread context */
1511         LASSERT (!in_interrupt());
1512         /* payload is either all vaddrs or all pages */
1513         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1514
1515         switch (type) {
1516         default:
1517                 LBUG();
1518                 return (-EIO);
1519
1520         case LNET_MSG_ACK:
1521                 LASSERT (payload_nob == 0);
1522                 break;
1523
1524         case LNET_MSG_GET:
1525                 if (routing || target_is_router)
1526                         break;                  /* send IMMEDIATE */
1527
1528                 /* is the REPLY message too small for RDMA? */
1529                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1530                 if (nob <= IBLND_MSG_SIZE)
1531                         break;                  /* send IMMEDIATE */
1532
1533                 tx = kiblnd_get_idle_tx(ni, target.nid);
1534                 if (tx == NULL) {
1535                         CERROR("Can't allocate txd for GET to %s\n",
1536                                libcfs_nid2str(target.nid));
1537                         return -ENOMEM;
1538                 }
1539
1540                 ibmsg = tx->tx_msg;
1541                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1542                 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1543                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1544                                                  lntmsg->msg_md->md_niov,
1545                                                  lntmsg->msg_md->md_iov.iov,
1546                                                  0, lntmsg->msg_md->md_length);
1547                 else
1548                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1549                                                   lntmsg->msg_md->md_niov,
1550                                                   lntmsg->msg_md->md_iov.kiov,
1551                                                   0, lntmsg->msg_md->md_length);
1552                 if (rc != 0) {
1553                         CERROR("Can't setup GET sink for %s: %d\n",
1554                                libcfs_nid2str(target.nid), rc);
1555                         kiblnd_tx_done(tx);
1556                         return -EIO;
1557                 }
1558
1559                 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
1560                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1561                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1562
1563                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1564
1565                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1566                 if (tx->tx_lntmsg[1] == NULL) {
1567                         CERROR("Can't create reply for GET -> %s\n",
1568                                libcfs_nid2str(target.nid));
1569                         kiblnd_tx_done(tx);
1570                         return -EIO;
1571                 }
1572
1573                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1574                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1575                 kiblnd_launch_tx(ni, tx, target.nid);
1576                 return 0;
1577
1578         case LNET_MSG_REPLY:
1579         case LNET_MSG_PUT:
1580                 /* Is the payload small enough not to need RDMA? */
1581                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
1582                 if (nob <= IBLND_MSG_SIZE)
1583                         break;                  /* send IMMEDIATE */
1584
1585                 tx = kiblnd_get_idle_tx(ni, target.nid);
1586                 if (tx == NULL) {
1587                         CERROR("Can't allocate %s txd for %s\n",
1588                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1589                                libcfs_nid2str(target.nid));
1590                         return -ENOMEM;
1591                 }
1592
1593                 if (payload_kiov == NULL)
1594                         rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1595                                                  payload_niov, payload_iov,
1596                                                  payload_offset, payload_nob);
1597                 else
1598                         rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1599                                                   payload_niov, payload_kiov,
1600                                                   payload_offset, payload_nob);
1601                 if (rc != 0) {
1602                         CERROR("Can't setup PUT src for %s: %d\n",
1603                                libcfs_nid2str(target.nid), rc);
1604                         kiblnd_tx_done(tx);
1605                         return -EIO;
1606                 }
1607
1608                 ibmsg = tx->tx_msg;
1609                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1610                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1611                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1612
1613                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1614                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1615                 kiblnd_launch_tx(ni, tx, target.nid);
1616                 return 0;
1617         }
1618
1619         /* send IMMEDIATE */
1620
1621         LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
1622                  <= IBLND_MSG_SIZE);
1623
1624         tx = kiblnd_get_idle_tx(ni, target.nid);
1625         if (tx == NULL) {
1626                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1627                         type, libcfs_nid2str(target.nid));
1628                 return -ENOMEM;
1629         }
1630
1631         ibmsg = tx->tx_msg;
1632         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1633
1634         if (payload_kiov != NULL)
1635                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1636                                     offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1637                                     payload_niov, payload_kiov,
1638                                     payload_offset, payload_nob);
1639         else
1640                 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1641                                    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1642                                    payload_niov, payload_iov,
1643                                    payload_offset, payload_nob);
1644
1645         nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
1646         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1647
1648         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1649         kiblnd_launch_tx(ni, tx, target.nid);
1650         return 0;
1651 }
1652
1653 static void
1654 kiblnd_reply(struct lnet_ni *ni, kib_rx_t *rx, struct lnet_msg *lntmsg)
1655 {
1656         struct lnet_process_id target = lntmsg->msg_target;
1657         unsigned int      niov = lntmsg->msg_niov;
1658         struct kvec      *iov = lntmsg->msg_iov;
1659         lnet_kiov_t      *kiov = lntmsg->msg_kiov;
1660         unsigned int      offset = lntmsg->msg_offset;
1661         unsigned int      nob = lntmsg->msg_len;
1662         kib_tx_t         *tx;
1663         int               rc;
1664
1665         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1666         if (tx == NULL) {
1667                 CERROR("Can't get tx for REPLY to %s\n",
1668                        libcfs_nid2str(target.nid));
1669                 goto failed_0;
1670         }
1671
1672         if (nob == 0)
1673                 rc = 0;
1674         else if (kiov == NULL)
1675                 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1676                                          niov, iov, offset, nob);
1677         else
1678                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1679                                           niov, kiov, offset, nob);
1680
1681         if (rc != 0) {
1682                 CERROR("Can't setup GET src for %s: %d\n",
1683                        libcfs_nid2str(target.nid), rc);
1684                 goto failed_1;
1685         }
1686
1687         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1688                               IBLND_MSG_GET_DONE, nob,
1689                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1690                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1691         if (rc < 0) {
1692                 CERROR("Can't setup rdma for GET from %s: %d\n",
1693                        libcfs_nid2str(target.nid), rc);
1694                 goto failed_1;
1695         }
1696
1697         if (nob == 0) {
1698                 /* No RDMA: local completion may happen now! */
1699                 lnet_finalize(lntmsg, 0);
1700         } else {
1701                 /* RDMA: lnet_finalize(lntmsg) when it
1702                  * completes */
1703                 tx->tx_lntmsg[0] = lntmsg;
1704         }
1705
1706         kiblnd_queue_tx(tx, rx->rx_conn);
1707         return;
1708
1709  failed_1:
1710         kiblnd_tx_done(tx);
1711  failed_0:
1712         lnet_finalize(lntmsg, -EIO);
1713 }
1714
1715 int
1716 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1717             int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1718             unsigned int offset, unsigned int mlen, unsigned int rlen)
1719 {
1720         kib_rx_t    *rx = private;
1721         kib_msg_t   *rxmsg = rx->rx_msg;
1722         kib_conn_t  *conn = rx->rx_conn;
1723         kib_tx_t    *tx;
1724         __u64        ibprm_cookie;
1725         int          nob;
1726         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1727         int          rc = 0;
1728
1729         LASSERT (mlen <= rlen);
1730         LASSERT (!in_interrupt());
1731         /* Either all pages or all vaddrs */
1732         LASSERT (!(kiov != NULL && iov != NULL));
1733
1734         switch (rxmsg->ibm_type) {
1735         default:
1736                 LBUG();
1737
1738         case IBLND_MSG_IMMEDIATE:
1739                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
1740                 if (nob > rx->rx_nob) {
1741                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1742                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1743                                 nob, rx->rx_nob);
1744                         rc = -EPROTO;
1745                         break;
1746                 }
1747
1748                 if (kiov != NULL)
1749                         lnet_copy_flat2kiov(niov, kiov, offset,
1750                                             IBLND_MSG_SIZE, rxmsg,
1751                                             offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1752                                             mlen);
1753                 else
1754                         lnet_copy_flat2iov(niov, iov, offset,
1755                                            IBLND_MSG_SIZE, rxmsg,
1756                                            offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1757                                            mlen);
1758                 lnet_finalize(lntmsg, 0);
1759                 break;
1760
1761         case IBLND_MSG_PUT_REQ: {
1762                 kib_msg_t       *txmsg;
1763                 kib_rdma_desc_t *rd;
1764                 ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1765
1766                 if (mlen == 0) {
1767                         lnet_finalize(lntmsg, 0);
1768                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1769                                                0, ibprm_cookie);
1770                         break;
1771                 }
1772
1773                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1774                 if (tx == NULL) {
1775                         CERROR("Can't allocate tx for %s\n",
1776                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1777                         /* Not replying will break the connection */
1778                         rc = -ENOMEM;
1779                         break;
1780                 }
1781
1782                 txmsg = tx->tx_msg;
1783                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1784                 if (kiov == NULL)
1785                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1786                                                  niov, iov, offset, mlen);
1787                 else
1788                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1789                                                   niov, kiov, offset, mlen);
1790                 if (rc != 0) {
1791                         CERROR("Can't setup PUT sink for %s: %d\n",
1792                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1793                         kiblnd_tx_done(tx);
1794                         /* tell peer_ni it's over */
1795                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
1796                                                rc, ibprm_cookie);
1797                         break;
1798                 }
1799
1800                 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
1801                 txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
1802                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1803
1804                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1805
1806                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1807                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1808                 kiblnd_queue_tx(tx, conn);
1809
1810                 /* reposted buffer reserved for PUT_DONE */
1811                 post_credit = IBLND_POSTRX_NO_CREDIT;
1812                 break;
1813                 }
1814
1815         case IBLND_MSG_GET_REQ:
1816                 if (lntmsg != NULL) {
1817                         /* Optimized GET; RDMA lntmsg's payload */
1818                         kiblnd_reply(ni, rx, lntmsg);
1819                 } else {
1820                         /* GET didn't match anything */
1821                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1822                                                -ENODATA,
1823                                                rxmsg->ibm_u.get.ibgm_cookie);
1824                 }
1825                 break;
1826         }
1827
1828         kiblnd_post_rx(rx, post_credit);
1829         return rc;
1830 }
1831
1832 int
1833 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1834 {
1835         struct task_struct *task = kthread_run(fn, arg, name);
1836
1837         if (IS_ERR(task))
1838                 return PTR_ERR(task);
1839
1840         atomic_inc(&kiblnd_data.kib_nthreads);
1841         return 0;
1842 }
1843
1844 static void
1845 kiblnd_thread_fini (void)
1846 {
1847         atomic_dec (&kiblnd_data.kib_nthreads);
1848 }
1849
1850 static void
1851 kiblnd_peer_alive (kib_peer_ni_t *peer_ni)
1852 {
1853         /* This is racy, but everyone's only writing cfs_time_current() */
1854         peer_ni->ibp_last_alive = cfs_time_current();
1855         smp_mb();
1856 }
1857
1858 static void
1859 kiblnd_peer_notify (kib_peer_ni_t *peer_ni)
1860 {
1861         int           error = 0;
1862         cfs_time_t    last_alive = 0;
1863         unsigned long flags;
1864
1865         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1866
1867         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
1868                 error = peer_ni->ibp_error;
1869                 peer_ni->ibp_error = 0;
1870
1871                 last_alive = peer_ni->ibp_last_alive;
1872         }
1873
1874         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1875
1876         if (error != 0)
1877                 lnet_notify(peer_ni->ibp_ni,
1878                             peer_ni->ibp_nid, 0, last_alive);
1879 }
1880
1881 void
1882 kiblnd_close_conn_locked (kib_conn_t *conn, int error)
1883 {
1884         /* This just does the immediate housekeeping.  'error' is zero for a
1885          * normal shutdown which can happen only after the connection has been
1886          * established.  If the connection is established, schedule the
1887          * connection to be finished off by the connd.  Otherwise the connd is
1888          * already dealing with it (either to set it up or tear it down).
1889          * Caller holds kib_global_lock exclusively in irq context */
1890         kib_peer_ni_t       *peer_ni = conn->ibc_peer;
1891         kib_dev_t        *dev;
1892         unsigned long     flags;
1893
1894         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1895
1896         if (error != 0 && conn->ibc_comms_error == 0)
1897                 conn->ibc_comms_error = error;
1898
1899         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
1900                 return; /* already being handled  */
1901
1902         if (error == 0 &&
1903             list_empty(&conn->ibc_tx_noops) &&
1904             list_empty(&conn->ibc_tx_queue) &&
1905             list_empty(&conn->ibc_tx_queue_rsrvd) &&
1906             list_empty(&conn->ibc_tx_queue_nocred) &&
1907             list_empty(&conn->ibc_active_txs)) {
1908                 CDEBUG(D_NET, "closing conn to %s\n", 
1909                        libcfs_nid2str(peer_ni->ibp_nid));
1910         } else {
1911                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
1912                        libcfs_nid2str(peer_ni->ibp_nid), error,
1913                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1914                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
1915                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
1916                                                 "" : "(sending_rsrvd)",
1917                        list_empty(&conn->ibc_tx_queue_nocred) ?
1918                                                  "" : "(sending_nocred)",
1919                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1920         }
1921
1922         dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev;
1923         if (peer_ni->ibp_next_conn == conn)
1924                 /* clear next_conn so it won't be used */
1925                 peer_ni->ibp_next_conn = NULL;
1926         list_del(&conn->ibc_list);
1927         /* connd (see below) takes over ibc_list's ref */
1928
1929         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
1930             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
1931                 kiblnd_unlink_peer_locked(peer_ni);
1932
1933                 /* set/clear error on last conn */
1934                 peer_ni->ibp_error = conn->ibc_comms_error;
1935         }
1936
1937         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
1938
1939         if (error != 0 &&
1940             kiblnd_dev_can_failover(dev)) {
1941                 list_add_tail(&dev->ibd_fail_list,
1942                               &kiblnd_data.kib_failed_devs);
1943                 wake_up(&kiblnd_data.kib_failover_waitq);
1944         }
1945
1946         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
1947
1948         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
1949         wake_up(&kiblnd_data.kib_connd_waitq);
1950
1951         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
1952 }
1953
1954 void
1955 kiblnd_close_conn(kib_conn_t *conn, int error)
1956 {
1957         unsigned long flags;
1958
1959         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1960
1961         kiblnd_close_conn_locked(conn, error);
1962
1963         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1964 }
1965
1966 static void
1967 kiblnd_handle_early_rxs(kib_conn_t *conn)
1968 {
1969         unsigned long    flags;
1970         kib_rx_t        *rx;
1971
1972         LASSERT(!in_interrupt());
1973         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1974
1975         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1976         while (!list_empty(&conn->ibc_early_rxs)) {
1977                 rx = list_entry(conn->ibc_early_rxs.next,
1978                                     kib_rx_t, rx_list);
1979                 list_del(&rx->rx_list);
1980                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1981
1982                 kiblnd_handle_rx(rx);
1983
1984                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1985         }
1986         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1987 }
1988
1989 static void
1990 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
1991 {
1992         struct list_head         zombies = LIST_HEAD_INIT(zombies);
1993         struct list_head        *tmp;
1994         struct list_head        *nxt;
1995         kib_tx_t                *tx;
1996
1997         spin_lock(&conn->ibc_lock);
1998
1999         list_for_each_safe(tmp, nxt, txs) {
2000                 tx = list_entry(tmp, kib_tx_t, tx_list);
2001
2002                 if (txs == &conn->ibc_active_txs) {
2003                         LASSERT(!tx->tx_queued);
2004                         LASSERT(tx->tx_waiting ||
2005                                 tx->tx_sending != 0);
2006                 } else {
2007                         LASSERT(tx->tx_queued);
2008                 }
2009
2010                 tx->tx_status = -ECONNABORTED;
2011                 tx->tx_waiting = 0;
2012
2013                 if (tx->tx_sending == 0) {
2014                         tx->tx_queued = 0;
2015                         list_del(&tx->tx_list);
2016                         list_add(&tx->tx_list, &zombies);
2017                 }
2018         }
2019
2020         spin_unlock(&conn->ibc_lock);
2021
2022         kiblnd_txlist_done(&zombies, -ECONNABORTED);
2023 }
2024
2025 static void
2026 kiblnd_finalise_conn (kib_conn_t *conn)
2027 {
2028         LASSERT (!in_interrupt());
2029         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2030
2031         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2032
2033         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2034          * for connections that didn't get as far as being connected, because
2035          * rdma_disconnect() does this for free. */
2036         kiblnd_abort_receives(conn);
2037
2038         /* Complete all tx descs not waiting for sends to complete.
2039          * NB we should be safe from RDMA now that the QP has changed state */
2040
2041         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2042         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2043         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2044         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2045         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2046
2047         kiblnd_handle_early_rxs(conn);
2048 }
2049
2050 static void
2051 kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error)
2052 {
2053         struct list_head zombies = LIST_HEAD_INIT(zombies);
2054         unsigned long   flags;
2055
2056         LASSERT (error != 0);
2057         LASSERT (!in_interrupt());
2058
2059         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2060
2061         if (active) {
2062                 LASSERT(peer_ni->ibp_connecting > 0);
2063                 peer_ni->ibp_connecting--;
2064         } else {
2065                 LASSERT (peer_ni->ibp_accepting > 0);
2066                 peer_ni->ibp_accepting--;
2067         }
2068
2069         if (kiblnd_peer_connecting(peer_ni)) {
2070                 /* another connection attempt under way... */
2071                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2072                                         flags);
2073                 return;
2074         }
2075
2076         peer_ni->ibp_reconnected = 0;
2077         if (list_empty(&peer_ni->ibp_conns)) {
2078                 /* Take peer_ni's blocked transmits to complete with error */
2079                 list_add(&zombies, &peer_ni->ibp_tx_queue);
2080                 list_del_init(&peer_ni->ibp_tx_queue);
2081
2082                 if (kiblnd_peer_active(peer_ni))
2083                         kiblnd_unlink_peer_locked(peer_ni);
2084
2085                 peer_ni->ibp_error = error;
2086         } else {
2087                 /* Can't have blocked transmits if there are connections */
2088                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2089         }
2090
2091         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2092
2093         kiblnd_peer_notify(peer_ni);
2094
2095         if (list_empty(&zombies))
2096                 return;
2097
2098         CNETERR("Deleting messages for %s: connection failed\n",
2099                 libcfs_nid2str(peer_ni->ibp_nid));
2100
2101         kiblnd_txlist_done(&zombies, -EHOSTUNREACH);
2102 }
2103
2104 static void
2105 kiblnd_connreq_done(kib_conn_t *conn, int status)
2106 {
2107         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2108         kib_tx_t         *tx;
2109         struct list_head txs;
2110         unsigned long    flags;
2111         int              active;
2112
2113         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2114
2115         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2116                libcfs_nid2str(peer_ni->ibp_nid), active,
2117                conn->ibc_version, status);
2118
2119         LASSERT (!in_interrupt());
2120         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2121                   peer_ni->ibp_connecting > 0) ||
2122                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2123                   peer_ni->ibp_accepting > 0));
2124
2125         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2126         conn->ibc_connvars = NULL;
2127
2128         if (status != 0) {
2129                 /* failed to establish connection */
2130                 kiblnd_peer_connect_failed(peer_ni, active, status);
2131                 kiblnd_finalise_conn(conn);
2132                 return;
2133         }
2134
2135         /* connection established */
2136         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2137
2138         conn->ibc_last_send = jiffies;
2139         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2140         kiblnd_peer_alive(peer_ni);
2141
2142         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2143          * peer_ni instance... */
2144         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2145         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2146         peer_ni->ibp_reconnected = 0;
2147         if (active)
2148                 peer_ni->ibp_connecting--;
2149         else
2150                 peer_ni->ibp_accepting--;
2151
2152         if (peer_ni->ibp_version == 0) {
2153                 peer_ni->ibp_version     = conn->ibc_version;
2154                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2155         }
2156
2157         if (peer_ni->ibp_version     != conn->ibc_version ||
2158             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2159                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2160                                                 conn->ibc_incarnation);
2161                 peer_ni->ibp_version     = conn->ibc_version;
2162                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2163         }
2164
2165         /* grab pending txs while I have the lock */
2166         list_add(&txs, &peer_ni->ibp_tx_queue);
2167         list_del_init(&peer_ni->ibp_tx_queue);
2168
2169         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2170             conn->ibc_comms_error != 0) {       /* error has happened already */
2171
2172                 /* start to shut down connection */
2173                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2174                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2175
2176                 kiblnd_txlist_done(&txs, -ECONNABORTED);
2177
2178                 return;
2179         }
2180
2181         /* +1 ref for myself, this connection is visible to other threads
2182          * now, refcount of peer:ibp_conns can be released by connection
2183          * close from either a different thread, or the calling of
2184          * kiblnd_check_sends_locked() below. See bz21911 for details.
2185          */
2186         kiblnd_conn_addref(conn);
2187         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2188
2189         /* Schedule blocked txs
2190          * Note: if we are running with conns_per_peer > 1, these blocked
2191          * txs will all get scheduled to the first connection which gets
2192          * scheduled.  We won't be using round robin on this first batch.
2193          */
2194         spin_lock(&conn->ibc_lock);
2195         while (!list_empty(&txs)) {
2196                 tx = list_entry(txs.next, kib_tx_t, tx_list);
2197                 list_del(&tx->tx_list);
2198
2199                 kiblnd_queue_tx_locked(tx, conn);
2200         }
2201         kiblnd_check_sends_locked(conn);
2202         spin_unlock(&conn->ibc_lock);
2203
2204         /* schedule blocked rxs */
2205         kiblnd_handle_early_rxs(conn);
2206         kiblnd_conn_decref(conn);
2207 }
2208
2209 static void
2210 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
2211 {
2212         int          rc;
2213
2214         rc = rdma_reject(cmid, rej, sizeof(*rej));
2215
2216         if (rc != 0)
2217                 CWARN("Error %d sending reject\n", rc);
2218 }
2219
2220 static int
2221 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2222 {
2223         rwlock_t                *g_lock = &kiblnd_data.kib_global_lock;
2224         kib_msg_t             *reqmsg = priv;
2225         kib_msg_t             *ackmsg;
2226         kib_dev_t             *ibdev;
2227         kib_peer_ni_t            *peer_ni;
2228         kib_peer_ni_t            *peer2;
2229         kib_conn_t            *conn;
2230         struct lnet_ni             *ni  = NULL;
2231         kib_net_t             *net = NULL;
2232         lnet_nid_t             nid;
2233         struct rdma_conn_param cp;
2234         kib_rej_t              rej;
2235         int                    version = IBLND_MSG_VERSION;
2236         unsigned long          flags;
2237         int                    rc;
2238         struct sockaddr_in    *peer_addr;
2239         LASSERT (!in_interrupt());
2240
2241         /* cmid inherits 'context' from the corresponding listener id */
2242         ibdev = (kib_dev_t *)cmid->context;
2243         LASSERT (ibdev != NULL);
2244
2245         memset(&rej, 0, sizeof(rej));
2246         rej.ibr_magic                = IBLND_MSG_MAGIC;
2247         rej.ibr_why                  = IBLND_REJECT_FATAL;
2248         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2249
2250         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2251         if (*kiblnd_tunables.kib_require_priv_port &&
2252             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2253                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2254                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2255                        &ip, ntohs(peer_addr->sin_port));
2256                 goto failed;
2257         }
2258
2259         if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
2260                 CERROR("Short connection request\n");
2261                 goto failed;
2262         }
2263
2264         /* Future protocol version compatibility support!  If the
2265          * o2iblnd-specific protocol changes, or when LNET unifies
2266          * protocols over all LNDs, the initial connection will
2267          * negotiate a protocol version.  I trap this here to avoid
2268          * console errors; the reject tells the peer_ni which protocol I
2269          * speak. */
2270         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2271             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2272                 goto failed;
2273         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2274             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2275             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2276                 goto failed;
2277         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2278             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2279             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2280                 goto failed;
2281
2282         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2283         if (rc != 0) {
2284                 CERROR("Can't parse connection request: %d\n", rc);
2285                 goto failed;
2286         }
2287
2288         nid = reqmsg->ibm_srcnid;
2289         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2290
2291         if (ni != NULL) {
2292                 net = (kib_net_t *)ni->ni_data;
2293                 rej.ibr_incarnation = net->ibn_incarnation;
2294         }
2295
2296         if (ni == NULL ||                         /* no matching net */
2297             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2298             net->ibn_dev != ibdev) {              /* wrong device */
2299                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
2300                        "bad dst nid %s\n", libcfs_nid2str(nid),
2301                        ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
2302                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2303                         &ibdev->ibd_ifip,
2304                        libcfs_nid2str(reqmsg->ibm_dstnid));
2305
2306                 goto failed;
2307         }
2308
2309        /* check time stamp as soon as possible */
2310         if (reqmsg->ibm_dststamp != 0 &&
2311             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2312                 CWARN("Stale connection request\n");
2313                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2314                 goto failed;
2315         }
2316
2317         /* I can accept peer_ni's version */
2318         version = reqmsg->ibm_version;
2319
2320         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2321                 CERROR("Unexpected connreq msg type: %x from %s\n",
2322                        reqmsg->ibm_type, libcfs_nid2str(nid));
2323                 goto failed;
2324         }
2325
2326         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2327             kiblnd_msg_queue_size(version, ni)) {
2328                 CERROR("Can't accept conn from %s, queue depth too large: "
2329                        " %d (<=%d wanted)\n",
2330                        libcfs_nid2str(nid),
2331                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2332                        kiblnd_msg_queue_size(version, ni));
2333
2334                 if (version == IBLND_MSG_VERSION)
2335                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2336
2337                 goto failed;
2338         }
2339
2340         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2341             kiblnd_rdma_frags(version, ni)) {
2342                 CWARN("Can't accept conn from %s (version %x): "
2343                       "max_frags %d too large (%d wanted)\n",
2344                       libcfs_nid2str(nid), version,
2345                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2346                       kiblnd_rdma_frags(version, ni));
2347
2348                 if (version >= IBLND_MSG_VERSION)
2349                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2350
2351                 goto failed;
2352         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2353                    kiblnd_rdma_frags(version, ni) &&
2354                    net->ibn_fmr_ps == NULL) {
2355                 CWARN("Can't accept conn from %s (version %x): "
2356                       "max_frags %d incompatible without FMR pool "
2357                       "(%d wanted)\n",
2358                       libcfs_nid2str(nid), version,
2359                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2360                       kiblnd_rdma_frags(version, ni));
2361
2362                 if (version == IBLND_MSG_VERSION)
2363                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2364
2365                 goto failed;
2366         }
2367
2368         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2369                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2370                        libcfs_nid2str(nid),
2371                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2372                        IBLND_MSG_SIZE);
2373                 goto failed;
2374         }
2375
2376         /* assume 'nid' is a new peer_ni; create  */
2377         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2378         if (rc != 0) {
2379                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2380                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2381                 goto failed;
2382         }
2383
2384         /* We have validated the peer's parameters so use those */
2385         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2386         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2387
2388         write_lock_irqsave(g_lock, flags);
2389
2390         peer2 = kiblnd_find_peer_locked(ni, nid);
2391         if (peer2 != NULL) {
2392                 if (peer2->ibp_version == 0) {
2393                         peer2->ibp_version     = version;
2394                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2395                 }
2396
2397                 /* not the guy I've talked with */
2398                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2399                     peer2->ibp_version     != version) {
2400                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2401
2402                         if (kiblnd_peer_active(peer2)) {
2403                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2404                                 peer2->ibp_version = version;
2405                         }
2406                         write_unlock_irqrestore(g_lock, flags);
2407
2408                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2409                               libcfs_nid2str(nid), peer2->ibp_version, version,
2410                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2411
2412                         kiblnd_peer_decref(peer_ni);
2413                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2414                         goto failed;
2415                 }
2416
2417                 /* Tie-break connection race in favour of the higher NID.
2418                  * If we keep running into a race condition multiple times,
2419                  * we have to assume that the connection attempt with the
2420                  * higher NID is stuck in a connecting state and will never
2421                  * recover.  As such, we pass through this if-block and let
2422                  * the lower NID connection win so we can move forward.
2423                  */
2424                 if (peer2->ibp_connecting != 0 &&
2425                     nid < ni->ni_nid && peer2->ibp_races <
2426                     MAX_CONN_RACES_BEFORE_ABORT) {
2427                         peer2->ibp_races++;
2428                         write_unlock_irqrestore(g_lock, flags);
2429
2430                         CDEBUG(D_NET, "Conn race %s\n",
2431                                libcfs_nid2str(peer2->ibp_nid));
2432
2433                         kiblnd_peer_decref(peer_ni);
2434                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2435                         goto failed;
2436                 }
2437                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2438                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2439                                 libcfs_nid2str(peer2->ibp_nid),
2440                                 MAX_CONN_RACES_BEFORE_ABORT);
2441                 /*
2442                  * passive connection is allowed even this peer_ni is waiting for
2443                  * reconnection.
2444                  */
2445                 peer2->ibp_reconnecting = 0;
2446                 peer2->ibp_races = 0;
2447                 peer2->ibp_accepting++;
2448                 kiblnd_peer_addref(peer2);
2449
2450                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2451                  * so copy validated parameters since we now know what the
2452                  * peer_ni's limits are */
2453                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2454                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2455
2456                 write_unlock_irqrestore(g_lock, flags);
2457                 kiblnd_peer_decref(peer_ni);
2458                 peer_ni = peer2;
2459         } else {
2460                 /* Brand new peer_ni */
2461                 LASSERT (peer_ni->ibp_accepting == 0);
2462                 LASSERT (peer_ni->ibp_version == 0 &&
2463                          peer_ni->ibp_incarnation == 0);
2464
2465                 peer_ni->ibp_accepting   = 1;
2466                 peer_ni->ibp_version     = version;
2467                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2468
2469                 /* I have a ref on ni that prevents it being shutdown */
2470                 LASSERT (net->ibn_shutdown == 0);
2471
2472                 kiblnd_peer_addref(peer_ni);
2473                 list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
2474
2475                 write_unlock_irqrestore(g_lock, flags);
2476         }
2477
2478         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2479         if (conn == NULL) {
2480                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2481                 kiblnd_peer_decref(peer_ni);
2482                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2483                 goto failed;
2484         }
2485
2486         /* conn now "owns" cmid, so I return success from here on to ensure the
2487          * CM callback doesn't destroy cmid. */
2488         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2489         conn->ibc_credits          = conn->ibc_queue_depth;
2490         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2491         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2492                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2493
2494         ackmsg = &conn->ibc_connvars->cv_msg;
2495         memset(ackmsg, 0, sizeof(*ackmsg));
2496
2497         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2498                         sizeof(ackmsg->ibm_u.connparams));
2499         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2500         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2501         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2502
2503         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2504
2505         memset(&cp, 0, sizeof(cp));
2506         cp.private_data        = ackmsg;
2507         cp.private_data_len    = ackmsg->ibm_nob;
2508         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2509         cp.initiator_depth     = 0;
2510         cp.flow_control        = 1;
2511         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2512         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2513
2514         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2515
2516         rc = rdma_accept(cmid, &cp);
2517         if (rc != 0) {
2518                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2519                 rej.ibr_version = version;
2520                 rej.ibr_why     = IBLND_REJECT_FATAL;
2521
2522                 kiblnd_reject(cmid, &rej);
2523                 kiblnd_connreq_done(conn, rc);
2524                 kiblnd_conn_decref(conn);
2525         }
2526
2527         lnet_ni_decref(ni);
2528         return 0;
2529
2530  failed:
2531         if (ni != NULL) {
2532                 rej.ibr_cp.ibcp_queue_depth =
2533                         kiblnd_msg_queue_size(version, ni);
2534                 rej.ibr_cp.ibcp_max_frags   = kiblnd_rdma_frags(version, ni);
2535                 lnet_ni_decref(ni);
2536         }
2537
2538         rej.ibr_version = version;
2539         kiblnd_reject(cmid, &rej);
2540
2541         return -ECONNREFUSED;
2542 }
2543
2544 static void
2545 kiblnd_check_reconnect(kib_conn_t *conn, int version,
2546                        __u64 incarnation, int why, kib_connparams_t *cp)
2547 {
2548         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2549         kib_peer_ni_t   *peer_ni = conn->ibc_peer;
2550         char            *reason;
2551         int              msg_size = IBLND_MSG_SIZE;
2552         int              frag_num = -1;
2553         int              queue_dep = -1;
2554         bool             reconnect;
2555         unsigned long    flags;
2556
2557         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2558         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2559
2560         if (cp) {
2561                 msg_size        = cp->ibcp_max_msg_size;
2562                 frag_num        = cp->ibcp_max_frags;
2563                 queue_dep       = cp->ibcp_queue_depth;
2564         }
2565
2566         write_lock_irqsave(glock, flags);
2567         /* retry connection if it's still needed and no other connection
2568          * attempts (active or passive) are in progress
2569          * NB: reconnect is still needed even when ibp_tx_queue is
2570          * empty if ibp_version != version because reconnect may be
2571          * initiated by kiblnd_query() */
2572         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2573                      peer_ni->ibp_version != version) &&
2574                     peer_ni->ibp_connecting &&
2575                     peer_ni->ibp_accepting == 0;
2576         if (!reconnect) {
2577                 reason = "no need";
2578                 goto out;
2579         }
2580
2581         switch (why) {
2582         default:
2583                 reason = "Unknown";
2584                 break;
2585
2586         case IBLND_REJECT_RDMA_FRAGS: {
2587                 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2588
2589                 if (!cp) {
2590                         reason = "can't negotiate max frags";
2591                         goto out;
2592                 }
2593                 tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2594                 if (!tunables->lnd_map_on_demand) {
2595                         reason = "map_on_demand must be enabled";
2596                         goto out;
2597                 }
2598                 if (conn->ibc_max_frags <= frag_num) {
2599                         reason = "unsupported max frags";
2600                         goto out;
2601                 }
2602
2603                 peer_ni->ibp_max_frags = frag_num;
2604                 reason = "rdma fragments";
2605                 break;
2606         }
2607         case IBLND_REJECT_MSG_QUEUE_SIZE:
2608                 if (!cp) {
2609                         reason = "can't negotiate queue depth";
2610                         goto out;
2611                 }
2612                 if (conn->ibc_queue_depth <= queue_dep) {
2613                         reason = "unsupported queue depth";
2614                         goto out;
2615                 }
2616
2617                 peer_ni->ibp_queue_depth = queue_dep;
2618                 reason = "queue depth";
2619                 break;
2620
2621         case IBLND_REJECT_CONN_STALE:
2622                 reason = "stale";
2623                 break;
2624
2625         case IBLND_REJECT_CONN_RACE:
2626                 reason = "conn race";
2627                 break;
2628
2629         case IBLND_REJECT_CONN_UNCOMPAT:
2630                 reason = "version negotiation";
2631                 break;
2632
2633         case IBLND_REJECT_INVALID_SRV_ID:
2634                 reason = "invalid service id";
2635                 break;
2636         }
2637
2638         conn->ibc_reconnect = 1;
2639         peer_ni->ibp_reconnecting++;
2640         peer_ni->ibp_version = version;
2641         if (incarnation != 0)
2642                 peer_ni->ibp_incarnation = incarnation;
2643  out:
2644         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2645
2646         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2647                 libcfs_nid2str(peer_ni->ibp_nid),
2648                 reconnect ? "reconnect" : "don't reconnect",
2649                 reason, IBLND_MSG_VERSION, version, msg_size,
2650                 conn->ibc_queue_depth, queue_dep,
2651                 conn->ibc_max_frags, frag_num);
2652         /*
2653          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2654          * while destroying the zombie
2655          */
2656 }
2657
2658 static void
2659 kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
2660 {
2661         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2662
2663         LASSERT (!in_interrupt());
2664         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2665
2666         switch (reason) {
2667         case IB_CM_REJ_STALE_CONN:
2668                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2669                                        IBLND_REJECT_CONN_STALE, NULL);
2670                 break;
2671
2672         case IB_CM_REJ_INVALID_SERVICE_ID:
2673                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2674                                        IBLND_REJECT_INVALID_SRV_ID, NULL);
2675                 CNETERR("%s rejected: no listener at %d\n",
2676                         libcfs_nid2str(peer_ni->ibp_nid),
2677                         *kiblnd_tunables.kib_service);
2678                 break;
2679
2680         case IB_CM_REJ_CONSUMER_DEFINED:
2681                 if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
2682                         kib_rej_t        *rej         = priv;
2683                         kib_connparams_t *cp          = NULL;
2684                         int               flip        = 0;
2685                         __u64             incarnation = -1;
2686
2687                         /* NB. default incarnation is -1 because:
2688                          * a) V1 will ignore dst incarnation in connreq.
2689                          * b) V2 will provide incarnation while rejecting me,
2690                          *    -1 will be overwrote.
2691                          *
2692                          * if I try to connect to a V1 peer_ni with V2 protocol,
2693                          * it rejected me then upgrade to V2, I have no idea
2694                          * about the upgrading and try to reconnect with V1,
2695                          * in this case upgraded V2 can find out I'm trying to
2696                          * talk to the old guy and reject me(incarnation is -1). 
2697                          */
2698
2699                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2700                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2701                                 __swab32s(&rej->ibr_magic);
2702                                 __swab16s(&rej->ibr_version);
2703                                 flip = 1;
2704                         }
2705
2706                         if (priv_nob >= sizeof(kib_rej_t) &&
2707                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2708                                 /* priv_nob is always 148 in current version
2709                                  * of OFED, so we still need to check version.
2710                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2711                                 cp = &rej->ibr_cp;
2712
2713                                 if (flip) {
2714                                         __swab64s(&rej->ibr_incarnation);
2715                                         __swab16s(&cp->ibcp_queue_depth);
2716                                         __swab16s(&cp->ibcp_max_frags);
2717                                         __swab32s(&cp->ibcp_max_msg_size);
2718                                 }
2719
2720                                 incarnation = rej->ibr_incarnation;
2721                         }
2722
2723                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2724                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2725                                 CERROR("%s rejected: consumer defined fatal error\n",
2726                                        libcfs_nid2str(peer_ni->ibp_nid));
2727                                 break;
2728                         }
2729
2730                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2731                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2732                                 CERROR("%s rejected: o2iblnd version %x error\n",
2733                                        libcfs_nid2str(peer_ni->ibp_nid),
2734                                        rej->ibr_version);
2735                                 break;
2736                         }
2737
2738                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2739                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2740                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2741                                        libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
2742
2743                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2744                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2745                         }
2746
2747                         switch (rej->ibr_why) {
2748                         case IBLND_REJECT_CONN_RACE:
2749                         case IBLND_REJECT_CONN_STALE:
2750                         case IBLND_REJECT_CONN_UNCOMPAT:
2751                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2752                         case IBLND_REJECT_RDMA_FRAGS:
2753                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2754                                                 incarnation, rej->ibr_why, cp);
2755                                 break;
2756
2757                         case IBLND_REJECT_NO_RESOURCES:
2758                                 CERROR("%s rejected: o2iblnd no resources\n",
2759                                        libcfs_nid2str(peer_ni->ibp_nid));
2760                                 break;
2761
2762                         case IBLND_REJECT_FATAL:
2763                                 CERROR("%s rejected: o2iblnd fatal error\n",
2764                                        libcfs_nid2str(peer_ni->ibp_nid));
2765                                 break;
2766
2767                         default:
2768                                 CERROR("%s rejected: o2iblnd reason %d\n",
2769                                        libcfs_nid2str(peer_ni->ibp_nid),
2770                                        rej->ibr_why);
2771                                 break;
2772                         }
2773                         break;
2774                 }
2775                 /* fall through */
2776         default:
2777                 CNETERR("%s rejected: reason %d, size %d\n",
2778                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2779                 break;
2780         }
2781
2782         kiblnd_connreq_done(conn, -ECONNREFUSED);
2783 }
2784
2785 static void
2786 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
2787 {
2788         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2789         struct lnet_ni *ni   = peer_ni->ibp_ni;
2790         kib_net_t     *net  = ni->ni_data;
2791         kib_msg_t     *msg  = priv;
2792         int            ver  = conn->ibc_version;
2793         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2794         unsigned long  flags;
2795
2796         LASSERT (net != NULL);
2797
2798         if (rc != 0) {
2799                 CERROR("Can't unpack connack from %s: %d\n",
2800                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2801                 goto failed;
2802         }
2803
2804         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2805                 CERROR("Unexpected message %d from %s\n",
2806                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
2807                 rc = -EPROTO;
2808                 goto failed;
2809         }
2810
2811         if (ver != msg->ibm_version) {
2812                 CERROR("%s replied version %x is different with "
2813                        "requested version %x\n",
2814                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
2815                 rc = -EPROTO;
2816                 goto failed;
2817         }
2818
2819         if (msg->ibm_u.connparams.ibcp_queue_depth >
2820             conn->ibc_queue_depth) {
2821                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2822                        libcfs_nid2str(peer_ni->ibp_nid),
2823                        msg->ibm_u.connparams.ibcp_queue_depth,
2824                        conn->ibc_queue_depth);
2825                 rc = -EPROTO;
2826                 goto failed;
2827         }
2828
2829         if (msg->ibm_u.connparams.ibcp_max_frags >
2830             conn->ibc_max_frags) {
2831                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
2832                        libcfs_nid2str(peer_ni->ibp_nid),
2833                        msg->ibm_u.connparams.ibcp_max_frags,
2834                        conn->ibc_max_frags);
2835                 rc = -EPROTO;
2836                 goto failed;
2837         }
2838
2839         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2840                 CERROR("%s max message size %d too big (%d max)\n",
2841                        libcfs_nid2str(peer_ni->ibp_nid),
2842                        msg->ibm_u.connparams.ibcp_max_msg_size,
2843                        IBLND_MSG_SIZE);
2844                 rc = -EPROTO;
2845                 goto failed;
2846         }
2847
2848         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2849         if (msg->ibm_dstnid == ni->ni_nid &&
2850             msg->ibm_dststamp == net->ibn_incarnation)
2851                 rc = 0;
2852         else
2853                 rc = -ESTALE;
2854         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2855
2856         if (rc != 0) {
2857                 CERROR("Bad connection reply from %s, rc = %d, "
2858                        "version: %x max_frags: %d\n",
2859                        libcfs_nid2str(peer_ni->ibp_nid), rc,
2860                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
2861                 goto failed;
2862         }
2863
2864         conn->ibc_incarnation      = msg->ibm_srcstamp;
2865         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
2866         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
2867         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
2868         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
2869         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2870                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
2871
2872         kiblnd_connreq_done(conn, 0);
2873         return;
2874
2875  failed:
2876         /* NB My QP has already established itself, so I handle anything going
2877          * wrong here by setting ibc_comms_error.
2878          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2879          * immediately tears it down. */
2880
2881         LASSERT (rc != 0);
2882         conn->ibc_comms_error = rc;
2883         kiblnd_connreq_done(conn, 0);
2884 }
2885
2886 static int
2887 kiblnd_active_connect (struct rdma_cm_id *cmid)
2888 {
2889         kib_peer_ni_t              *peer_ni = (kib_peer_ni_t *)cmid->context;
2890         kib_conn_t              *conn;
2891         kib_msg_t               *msg;
2892         struct rdma_conn_param   cp;
2893         int                      version;
2894         __u64                    incarnation;
2895         unsigned long            flags;
2896         int                      rc;
2897
2898         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2899
2900         incarnation = peer_ni->ibp_incarnation;
2901         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
2902                                                  peer_ni->ibp_version;
2903
2904         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2905
2906         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
2907                                   version);
2908         if (conn == NULL) {
2909                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
2910                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
2911                 return -ENOMEM;
2912         }
2913
2914         /* conn "owns" cmid now, so I return success from here on to ensure the
2915          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2916          * on peer_ni */
2917
2918         msg = &conn->ibc_connvars->cv_msg;
2919
2920         memset(msg, 0, sizeof(*msg));
2921         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
2922         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2923         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2924         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2925
2926         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
2927                         0, peer_ni->ibp_nid, incarnation);
2928
2929         memset(&cp, 0, sizeof(cp));
2930         cp.private_data        = msg;
2931         cp.private_data_len    = msg->ibm_nob;
2932         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2933         cp.initiator_depth     = 0;
2934         cp.flow_control        = 1;
2935         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2936         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2937
2938         LASSERT(cmid->context == (void *)conn);
2939         LASSERT(conn->ibc_cmid == cmid);
2940
2941         rc = rdma_connect(cmid, &cp);
2942         if (rc != 0) {
2943                 CERROR("Can't connect to %s: %d\n",
2944                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2945                 kiblnd_connreq_done(conn, rc);
2946                 kiblnd_conn_decref(conn);
2947         }
2948
2949         return 0;
2950 }
2951
2952 int
2953 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2954 {
2955         kib_peer_ni_t  *peer_ni;
2956         kib_conn_t  *conn;
2957         int          rc;
2958
2959         switch (event->event) {
2960         default:
2961                 CERROR("Unexpected event: %d, status: %d\n",
2962                        event->event, event->status);
2963                 LBUG();
2964
2965         case RDMA_CM_EVENT_CONNECT_REQUEST:
2966                 /* destroy cmid on failure */
2967                 rc = kiblnd_passive_connect(cmid, 
2968                                             (void *)KIBLND_CONN_PARAM(event),
2969                                             KIBLND_CONN_PARAM_LEN(event));
2970                 CDEBUG(D_NET, "connreq: %d\n", rc);
2971                 return rc;
2972                 
2973         case RDMA_CM_EVENT_ADDR_ERROR:
2974                 peer_ni = (kib_peer_ni_t *)cmid->context;
2975                 CNETERR("%s: ADDR ERROR %d\n",
2976                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
2977                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
2978                 kiblnd_peer_decref(peer_ni);
2979                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
2980
2981         case RDMA_CM_EVENT_ADDR_RESOLVED:
2982                 peer_ni = (kib_peer_ni_t *)cmid->context;
2983
2984                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
2985                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
2986
2987                 if (event->status != 0) {
2988                         CNETERR("Can't resolve address for %s: %d\n",
2989                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
2990                         rc = event->status;
2991                 } else {
2992                         rc = rdma_resolve_route(
2993                                 cmid, *kiblnd_tunables.kib_timeout * 1000);
2994                         if (rc == 0)
2995                                 return 0;
2996                         /* Can't initiate route resolution */
2997                         CERROR("Can't resolve route for %s: %d\n",
2998                                libcfs_nid2str(peer_ni->ibp_nid), rc);
2999                 }
3000                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
3001                 kiblnd_peer_decref(peer_ni);
3002                 return rc;                      /* rc != 0 destroys cmid */
3003
3004         case RDMA_CM_EVENT_ROUTE_ERROR:
3005                 peer_ni = (kib_peer_ni_t *)cmid->context;
3006                 CNETERR("%s: ROUTE ERROR %d\n",
3007                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3008                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3009                 kiblnd_peer_decref(peer_ni);
3010                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3011
3012         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3013                 peer_ni = (kib_peer_ni_t *)cmid->context;
3014                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3015                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3016
3017                 if (event->status == 0)
3018                         return kiblnd_active_connect(cmid);
3019
3020                 CNETERR("Can't resolve route for %s: %d\n",
3021                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3022                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3023                 kiblnd_peer_decref(peer_ni);
3024                 return event->status;           /* rc != 0 destroys cmid */
3025                 
3026         case RDMA_CM_EVENT_UNREACHABLE:
3027                 conn = (kib_conn_t *)cmid->context;
3028                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3029                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3030                 CNETERR("%s: UNREACHABLE %d\n",
3031                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3032                 kiblnd_connreq_done(conn, -ENETDOWN);
3033                 kiblnd_conn_decref(conn);
3034                 return 0;
3035
3036         case RDMA_CM_EVENT_CONNECT_ERROR:
3037                 conn = (kib_conn_t *)cmid->context;
3038                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3039                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3040                 CNETERR("%s: CONNECT ERROR %d\n",
3041                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3042                 kiblnd_connreq_done(conn, -ENOTCONN);
3043                 kiblnd_conn_decref(conn);
3044                 return 0;
3045
3046         case RDMA_CM_EVENT_REJECTED:
3047                 conn = (kib_conn_t *)cmid->context;
3048                 switch (conn->ibc_state) {
3049                 default:
3050                         LBUG();
3051
3052                 case IBLND_CONN_PASSIVE_WAIT:
3053                         CERROR ("%s: REJECTED %d\n",
3054                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3055                                 event->status);
3056                         kiblnd_connreq_done(conn, -ECONNRESET);
3057                         break;
3058
3059                 case IBLND_CONN_ACTIVE_CONNECT:
3060                         kiblnd_rejected(conn, event->status,
3061                                         (void *)KIBLND_CONN_PARAM(event),
3062                                         KIBLND_CONN_PARAM_LEN(event));
3063                         break;
3064                 }
3065                 kiblnd_conn_decref(conn);
3066                 return 0;
3067
3068         case RDMA_CM_EVENT_ESTABLISHED:
3069                 conn = (kib_conn_t *)cmid->context;
3070                 switch (conn->ibc_state) {
3071                 default:
3072                         LBUG();
3073
3074                 case IBLND_CONN_PASSIVE_WAIT:
3075                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3076                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3077                         kiblnd_connreq_done(conn, 0);
3078                         break;
3079
3080                 case IBLND_CONN_ACTIVE_CONNECT:
3081                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3082                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3083                         kiblnd_check_connreply(conn,
3084                                                (void *)KIBLND_CONN_PARAM(event),
3085                                                KIBLND_CONN_PARAM_LEN(event));
3086                         break;
3087                 }
3088                 /* net keeps its ref on conn! */
3089                 return 0;
3090
3091         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3092                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3093                 return 0;
3094
3095         case RDMA_CM_EVENT_DISCONNECTED:
3096                 conn = (kib_conn_t *)cmid->context;
3097                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3098                         CERROR("%s DISCONNECTED\n",
3099                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3100                         kiblnd_connreq_done(conn, -ECONNRESET);
3101                 } else {
3102                         kiblnd_close_conn(conn, 0);
3103                 }
3104                 kiblnd_conn_decref(conn);
3105                 cmid->context = NULL;
3106                 return 0;
3107
3108         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3109                 LCONSOLE_ERROR_MSG(0x131,
3110                                    "Received notification of device removal\n"
3111                                    "Please shutdown LNET to allow this to proceed\n");
3112                 /* Can't remove network from underneath LNET for now, so I have
3113                  * to ignore this */
3114                 return 0;
3115
3116         case RDMA_CM_EVENT_ADDR_CHANGE:
3117                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3118                 return 0;
3119         }
3120 }
3121
3122 static int
3123 kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
3124 {
3125         kib_tx_t         *tx;
3126         struct list_head *ttmp;
3127
3128         list_for_each(ttmp, txs) {
3129                 tx = list_entry(ttmp, kib_tx_t, tx_list);
3130
3131                 if (txs != &conn->ibc_active_txs) {
3132                         LASSERT(tx->tx_queued);
3133                 } else {
3134                         LASSERT(!tx->tx_queued);
3135                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3136                 }
3137
3138                 if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
3139                         CERROR("Timed out tx: %s, %lu seconds\n",
3140                                kiblnd_queue2str(conn, txs),
3141                                cfs_duration_sec(jiffies - tx->tx_deadline));
3142                         return 1;
3143                 }
3144         }
3145
3146         return 0;
3147 }
3148
3149 static int
3150 kiblnd_conn_timed_out_locked(kib_conn_t *conn)
3151 {
3152         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3153                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3154                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3155                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3156                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3157 }
3158
3159 static void
3160 kiblnd_check_conns (int idx)
3161 {
3162         struct list_head  closes = LIST_HEAD_INIT(closes);
3163         struct list_head  checksends = LIST_HEAD_INIT(checksends);
3164         struct list_head  timedout_txs = LIST_HEAD_INIT(timedout_txs);
3165         struct list_head *peers = &kiblnd_data.kib_peers[idx];
3166         struct list_head *ptmp;
3167         kib_peer_ni_t    *peer_ni;
3168         kib_conn_t       *conn;
3169         kib_tx_t         *tx, *tx_tmp;
3170         struct list_head *ctmp;
3171         unsigned long     flags;
3172
3173         /* NB. We expect to have a look at all the peers and not find any
3174          * RDMAs to time out, so we just use a shared lock while we
3175          * take a look... */
3176         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3177
3178         list_for_each(ptmp, peers) {
3179                 peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
3180
3181                 /* Check tx_deadline */
3182                 list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
3183                         if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
3184                                 CWARN("Timed out tx for %s: %lu seconds\n",
3185                                       libcfs_nid2str(peer_ni->ibp_nid),
3186                                       cfs_duration_sec(jiffies - tx->tx_deadline));
3187                                 list_move(&tx->tx_list, &timedout_txs);
3188                         }
3189                 }
3190
3191                 list_for_each(ctmp, &peer_ni->ibp_conns) {
3192                         int timedout;
3193                         int sendnoop;
3194
3195                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
3196
3197                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3198
3199                         spin_lock(&conn->ibc_lock);
3200
3201                         sendnoop = kiblnd_need_noop(conn);
3202                         timedout = kiblnd_conn_timed_out_locked(conn);
3203                         if (!sendnoop && !timedout) {
3204                                 spin_unlock(&conn->ibc_lock);
3205                                 continue;
3206                         }
3207
3208                         if (timedout) {
3209                                 CERROR("Timed out RDMA with %s (%lu): "
3210                                        "c: %u, oc: %u, rc: %u\n",
3211                                        libcfs_nid2str(peer_ni->ibp_nid),
3212                                        cfs_duration_sec(cfs_time_current() -
3213                                                         peer_ni->ibp_last_alive),
3214                                        conn->ibc_credits,
3215                                        conn->ibc_outstanding_credits,
3216                                        conn->ibc_reserved_credits);
3217                                 list_add(&conn->ibc_connd_list, &closes);
3218                         } else {
3219                                 list_add(&conn->ibc_connd_list, &checksends);
3220                         }
3221                         /* +ref for 'closes' or 'checksends' */
3222                         kiblnd_conn_addref(conn);
3223
3224                         spin_unlock(&conn->ibc_lock);
3225                 }
3226         }
3227
3228         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3229
3230         if (!list_empty(&timedout_txs))
3231                 kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT);
3232
3233         /* Handle timeout by closing the whole
3234          * connection. We can only be sure RDMA activity
3235          * has ceased once the QP has been modified. */
3236         while (!list_empty(&closes)) {
3237                 conn = list_entry(closes.next,
3238                                   kib_conn_t, ibc_connd_list);
3239                 list_del(&conn->ibc_connd_list);
3240                 kiblnd_close_conn(conn, -ETIMEDOUT);
3241                 kiblnd_conn_decref(conn);
3242         }
3243
3244         /* In case we have enough credits to return via a
3245          * NOOP, but there were no non-blocking tx descs
3246          * free to do it last time... */
3247         while (!list_empty(&checksends)) {
3248                 conn = list_entry(checksends.next,
3249                                   kib_conn_t, ibc_connd_list);
3250                 list_del(&conn->ibc_connd_list);
3251
3252                 spin_lock(&conn->ibc_lock);
3253                 kiblnd_check_sends_locked(conn);
3254                 spin_unlock(&conn->ibc_lock);
3255
3256                 kiblnd_conn_decref(conn);
3257         }
3258 }
3259
3260 static void
3261 kiblnd_disconnect_conn (kib_conn_t *conn)
3262 {
3263         LASSERT (!in_interrupt());
3264         LASSERT (current == kiblnd_data.kib_connd);
3265         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3266
3267         rdma_disconnect(conn->ibc_cmid);
3268         kiblnd_finalise_conn(conn);
3269
3270         kiblnd_peer_notify(conn->ibc_peer);
3271 }
3272
3273 /*
3274  * High-water for reconnection to the same peer_ni, reconnection attempt should
3275  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3276  */
3277 #define KIB_RECONN_HIGH_RACE    10
3278 /*
3279  * Allow connd to take a break and handle other things after consecutive
3280  * reconnection attemps.
3281  */
3282 #define KIB_RECONN_BREAK        100
3283
3284 int
3285 kiblnd_connd (void *arg)
3286 {
3287         spinlock_t        *lock= &kiblnd_data.kib_connd_lock;
3288         wait_queue_t       wait;
3289         unsigned long      flags;
3290         kib_conn_t        *conn;
3291         int                timeout;
3292         int                i;
3293         int                dropped_lock;
3294         int                peer_index = 0;
3295         unsigned long      deadline = jiffies;
3296
3297         cfs_block_allsigs();
3298
3299         init_waitqueue_entry(&wait, current);
3300         kiblnd_data.kib_connd = current;
3301
3302         spin_lock_irqsave(lock, flags);
3303
3304         while (!kiblnd_data.kib_shutdown) {
3305                 int reconn = 0;
3306
3307                 dropped_lock = 0;
3308
3309                 if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
3310                         kib_peer_ni_t *peer_ni = NULL;
3311
3312                         conn = list_entry(kiblnd_data.kib_connd_zombies.next,
3313                                           kib_conn_t, ibc_list);
3314                         list_del(&conn->ibc_list);
3315                         if (conn->ibc_reconnect) {
3316                                 peer_ni = conn->ibc_peer;
3317                                 kiblnd_peer_addref(peer_ni);
3318                         }
3319
3320                         spin_unlock_irqrestore(lock, flags);
3321                         dropped_lock = 1;
3322
3323                         kiblnd_destroy_conn(conn, !peer_ni);
3324
3325                         spin_lock_irqsave(lock, flags);
3326                         if (!peer_ni)
3327                                 continue;
3328
3329                         conn->ibc_peer = peer_ni;
3330                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3331                                 list_add_tail(&conn->ibc_list,
3332                                               &kiblnd_data.kib_reconn_list);
3333                         else
3334                                 list_add_tail(&conn->ibc_list,
3335                                               &kiblnd_data.kib_reconn_wait);
3336                 }
3337
3338                 if (!list_empty(&kiblnd_data.kib_connd_conns)) {
3339                         conn = list_entry(kiblnd_data.kib_connd_conns.next,
3340                                               kib_conn_t, ibc_list);
3341                         list_del(&conn->ibc_list);
3342
3343                         spin_unlock_irqrestore(lock, flags);
3344                         dropped_lock = 1;
3345
3346                         kiblnd_disconnect_conn(conn);
3347                         kiblnd_conn_decref(conn);
3348
3349                         spin_lock_irqsave(lock, flags);
3350                 }
3351
3352                 while (reconn < KIB_RECONN_BREAK) {
3353                         if (kiblnd_data.kib_reconn_sec !=
3354                             ktime_get_real_seconds()) {
3355                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3356                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3357                                                  &kiblnd_data.kib_reconn_list);
3358                         }
3359
3360                         if (list_empty(&kiblnd_data.kib_reconn_list))
3361                                 break;
3362
3363                         conn = list_entry(kiblnd_data.kib_reconn_list.next,
3364                                           kib_conn_t, ibc_list);
3365                         list_del(&conn->ibc_list);
3366
3367                         spin_unlock_irqrestore(lock, flags);
3368                         dropped_lock = 1;
3369
3370                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3371                         kiblnd_peer_decref(conn->ibc_peer);
3372                         LIBCFS_FREE(conn, sizeof(*conn));
3373
3374                         spin_lock_irqsave(lock, flags);
3375                 }
3376
3377                 /* careful with the jiffy wrap... */
3378                 timeout = (int)(deadline - jiffies);
3379                 if (timeout <= 0) {
3380                         const int n = 4;
3381                         const int p = 1;
3382                         int       chunk = kiblnd_data.kib_peer_hash_size;
3383
3384                         spin_unlock_irqrestore(lock, flags);
3385                         dropped_lock = 1;
3386
3387                         /* Time to check for RDMA timeouts on a few more
3388                          * peers: I do checks every 'p' seconds on a
3389                          * proportion of the peer_ni table and I need to check
3390                          * every connection 'n' times within a timeout
3391                          * interval, to ensure I detect a timeout on any
3392                          * connection within (n+1)/n times the timeout
3393                          * interval. */
3394
3395                         if (*kiblnd_tunables.kib_timeout > n * p)
3396                                 chunk = (chunk * n * p) /
3397                                         *kiblnd_tunables.kib_timeout;
3398                         if (chunk == 0)
3399                                 chunk = 1;
3400
3401                         for (i = 0; i < chunk; i++) {
3402                                 kiblnd_check_conns(peer_index);
3403                                 peer_index = (peer_index + 1) %
3404                                              kiblnd_data.kib_peer_hash_size;
3405                         }
3406
3407                         deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
3408                         spin_lock_irqsave(lock, flags);
3409                 }
3410
3411                 if (dropped_lock)
3412                         continue;
3413
3414                 /* Nothing to do for 'timeout'  */
3415                 set_current_state(TASK_INTERRUPTIBLE);
3416                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3417                 spin_unlock_irqrestore(lock, flags);
3418
3419                 schedule_timeout(timeout);
3420
3421                 set_current_state(TASK_RUNNING);
3422                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3423                 spin_lock_irqsave(lock, flags);
3424         }
3425
3426         spin_unlock_irqrestore(lock, flags);
3427
3428         kiblnd_thread_fini();
3429         return 0;
3430 }
3431
3432 void
3433 kiblnd_qp_event(struct ib_event *event, void *arg)
3434 {
3435         kib_conn_t *conn = arg;
3436
3437         switch (event->event) {
3438         case IB_EVENT_COMM_EST:
3439                 CDEBUG(D_NET, "%s established\n",
3440                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3441                 /* We received a packet but connection isn't established
3442                  * probably handshake packet was lost, so free to
3443                  * force make connection established */
3444                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3445                 return;
3446
3447         default:
3448                 CERROR("%s: Async QP event type %d\n",
3449                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3450                 return;
3451         }
3452 }
3453
3454 static void
3455 kiblnd_complete (struct ib_wc *wc)
3456 {
3457         switch (kiblnd_wreqid2type(wc->wr_id)) {
3458         default:
3459                 LBUG();
3460
3461         case IBLND_WID_MR:
3462                 if (wc->status != IB_WC_SUCCESS &&
3463                     wc->status != IB_WC_WR_FLUSH_ERR)
3464                         CNETERR("FastReg failed: %d\n", wc->status);
3465                 return;
3466
3467         case IBLND_WID_RDMA:
3468                 /* We only get RDMA completion notification if it fails.  All
3469                  * subsequent work items, including the final SEND will fail
3470                  * too.  However we can't print out any more info about the
3471                  * failing RDMA because 'tx' might be back on the idle list or
3472                  * even reused already if we didn't manage to post all our work
3473                  * items */
3474                 CNETERR("RDMA (tx: %p) failed: %d\n",
3475                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3476                 return;
3477
3478         case IBLND_WID_TX:
3479                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3480                 return;
3481
3482         case IBLND_WID_RX:
3483                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3484                                    wc->byte_len);
3485                 return;
3486         }
3487 }
3488
3489 void
3490 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3491 {
3492         /* NB I'm not allowed to schedule this conn once its refcount has
3493          * reached 0.  Since fundamentally I'm racing with scheduler threads
3494          * consuming my CQ I could be called after all completions have
3495          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3496          * and this CQ is about to be destroyed so I NOOP. */
3497         kib_conn_t              *conn = (kib_conn_t *)arg;
3498         struct kib_sched_info   *sched = conn->ibc_sched;
3499         unsigned long           flags;
3500
3501         LASSERT(cq == conn->ibc_cq);
3502
3503         spin_lock_irqsave(&sched->ibs_lock, flags);
3504
3505         conn->ibc_ready = 1;
3506
3507         if (!conn->ibc_scheduled &&
3508             (conn->ibc_nrx > 0 ||
3509              conn->ibc_nsends_posted > 0)) {
3510                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3511                 conn->ibc_scheduled = 1;
3512                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3513
3514                 if (waitqueue_active(&sched->ibs_waitq))
3515                         wake_up(&sched->ibs_waitq);
3516         }
3517
3518         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3519 }
3520
3521 void
3522 kiblnd_cq_event(struct ib_event *event, void *arg)
3523 {
3524         kib_conn_t *conn = arg;
3525
3526         CERROR("%s: async CQ event type %d\n",
3527                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3528 }
3529
3530 int
3531 kiblnd_scheduler(void *arg)
3532 {
3533         long                    id = (long)arg;
3534         struct kib_sched_info   *sched;
3535         kib_conn_t              *conn;
3536         wait_queue_t            wait;
3537         unsigned long           flags;
3538         struct ib_wc            wc;
3539         int                     did_something;
3540         int                     busy_loops = 0;
3541         int                     rc;
3542
3543         cfs_block_allsigs();
3544
3545         init_waitqueue_entry(&wait, current);
3546
3547         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3548
3549         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3550         if (rc != 0) {
3551                 CWARN("Unable to bind on CPU partition %d, please verify "
3552                       "whether all CPUs are healthy and reload modules if "
3553                       "necessary, otherwise your system might under risk of "
3554                       "low performance\n", sched->ibs_cpt);
3555         }
3556
3557         spin_lock_irqsave(&sched->ibs_lock, flags);
3558
3559         while (!kiblnd_data.kib_shutdown) {
3560                 if (busy_loops++ >= IBLND_RESCHED) {
3561                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3562
3563                         cond_resched();
3564                         busy_loops = 0;
3565
3566                         spin_lock_irqsave(&sched->ibs_lock, flags);
3567                 }
3568
3569                 did_something = 0;
3570
3571                 if (!list_empty(&sched->ibs_conns)) {
3572                         conn = list_entry(sched->ibs_conns.next,
3573                                               kib_conn_t, ibc_sched_list);
3574                         /* take over kib_sched_conns' ref on conn... */
3575                         LASSERT(conn->ibc_scheduled);
3576                         list_del(&conn->ibc_sched_list);
3577                         conn->ibc_ready = 0;
3578
3579                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3580
3581                         wc.wr_id = IBLND_WID_INVAL;
3582
3583                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3584                         if (rc == 0) {
3585                                 rc = ib_req_notify_cq(conn->ibc_cq,
3586                                                       IB_CQ_NEXT_COMP);
3587                                 if (rc < 0) {
3588                                         CWARN("%s: ib_req_notify_cq failed: %d, "
3589                                               "closing connection\n",
3590                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3591                                         kiblnd_close_conn(conn, -EIO);
3592                                         kiblnd_conn_decref(conn);
3593                                         spin_lock_irqsave(&sched->ibs_lock,
3594                                                               flags);
3595                                         continue;
3596                                 }
3597
3598                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3599                         }
3600
3601                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3602                                 LCONSOLE_ERROR(
3603                                         "ib_poll_cq (rc: %d) returned invalid "
3604                                         "wr_id, opcode %d, status: %d, "
3605                                         "vendor_err: %d, conn: %s status: %d\n"
3606                                         "please upgrade firmware and OFED or "
3607                                         "contact vendor.\n", rc,
3608                                         wc.opcode, wc.status, wc.vendor_err,
3609                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3610                                         conn->ibc_state);
3611                                 rc = -EINVAL;
3612                         }
3613
3614                         if (rc < 0) {
3615                                 CWARN("%s: ib_poll_cq failed: %d, "
3616                                       "closing connection\n",
3617                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3618                                       rc);
3619                                 kiblnd_close_conn(conn, -EIO);
3620                                 kiblnd_conn_decref(conn);
3621                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3622                                 continue;
3623                         }
3624
3625                         spin_lock_irqsave(&sched->ibs_lock, flags);
3626
3627                         if (rc != 0 || conn->ibc_ready) {
3628                                 /* There may be another completion waiting; get
3629                                  * another scheduler to check while I handle
3630                                  * this one... */
3631                                 /* +1 ref for sched_conns */
3632                                 kiblnd_conn_addref(conn);
3633                                 list_add_tail(&conn->ibc_sched_list,
3634                                                   &sched->ibs_conns);
3635                                 if (waitqueue_active(&sched->ibs_waitq))
3636                                         wake_up(&sched->ibs_waitq);
3637                         } else {
3638                                 conn->ibc_scheduled = 0;
3639                         }
3640
3641                         if (rc != 0) {
3642                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3643                                 kiblnd_complete(&wc);
3644
3645                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3646                         }
3647
3648                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
3649                         did_something = 1;
3650                 }
3651
3652                 if (did_something)
3653                         continue;
3654
3655                 set_current_state(TASK_INTERRUPTIBLE);
3656                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3657                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3658
3659                 schedule();
3660                 busy_loops = 0;
3661
3662                 remove_wait_queue(&sched->ibs_waitq, &wait);
3663                 set_current_state(TASK_RUNNING);
3664                 spin_lock_irqsave(&sched->ibs_lock, flags);
3665         }
3666
3667         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3668
3669         kiblnd_thread_fini();
3670         return 0;
3671 }
3672
3673 int
3674 kiblnd_failover_thread(void *arg)
3675 {
3676         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
3677         kib_dev_t       *dev;
3678         wait_queue_t     wait;
3679         unsigned long    flags;
3680         int              rc;
3681
3682         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3683
3684         cfs_block_allsigs();
3685
3686         init_waitqueue_entry(&wait, current);
3687         write_lock_irqsave(glock, flags);
3688
3689         while (!kiblnd_data.kib_shutdown) {
3690                 int     do_failover = 0;
3691                 int     long_sleep;
3692
3693                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3694                                     ibd_fail_list) {
3695                         if (cfs_time_before(cfs_time_current(),
3696                                             dev->ibd_next_failover))
3697                                 continue;
3698                         do_failover = 1;
3699                         break;
3700                 }
3701
3702                 if (do_failover) {
3703                         list_del_init(&dev->ibd_fail_list);
3704                         dev->ibd_failover = 1;
3705                         write_unlock_irqrestore(glock, flags);
3706
3707                         rc = kiblnd_dev_failover(dev);
3708
3709                         write_lock_irqsave(glock, flags);
3710
3711                         LASSERT (dev->ibd_failover);
3712                         dev->ibd_failover = 0;
3713                         if (rc >= 0) { /* Device is OK or failover succeed */
3714                                 dev->ibd_next_failover = cfs_time_shift(3);
3715                                 continue;
3716                         }
3717
3718                         /* failed to failover, retry later */
3719                         dev->ibd_next_failover =
3720                                 cfs_time_shift(min(dev->ibd_failed_failover, 10));
3721                         if (kiblnd_dev_can_failover(dev)) {
3722                                 list_add_tail(&dev->ibd_fail_list,
3723                                               &kiblnd_data.kib_failed_devs);
3724                         }
3725
3726                         continue;
3727                 }
3728
3729                 /* long sleep if no more pending failover */
3730                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3731
3732                 set_current_state(TASK_INTERRUPTIBLE);
3733                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3734                 write_unlock_irqrestore(glock, flags);
3735
3736                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3737                                                    cfs_time_seconds(1));
3738                 set_current_state(TASK_RUNNING);
3739                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3740                 write_lock_irqsave(glock, flags);
3741
3742                 if (!long_sleep || rc != 0)
3743                         continue;
3744
3745                 /* have a long sleep, routine check all active devices,
3746                  * we need checking like this because if there is not active
3747                  * connection on the dev and no SEND from local, we may listen
3748                  * on wrong HCA for ever while there is a bonding failover */
3749                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3750                         if (kiblnd_dev_can_failover(dev)) {
3751                                 list_add_tail(&dev->ibd_fail_list,
3752                                               &kiblnd_data.kib_failed_devs);
3753                         }
3754                 }
3755         }
3756
3757         write_unlock_irqrestore(glock, flags);
3758
3759         kiblnd_thread_fini();
3760         return 0;
3761 }