Whamcloud - gitweb
LU-9094 o2iblnd: reconnect peer for REJ_INVALID_SERVICE_ID
[fs/lustre-release.git] / lnet / klnds / o2iblnd / o2iblnd_cb.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/klnds/o2iblnd/o2iblnd_cb.c
33  *
34  * Author: Eric Barton <eric@bartonsoftware.com>
35  */
36
37 #include "o2iblnd.h"
38
39 #define MAX_CONN_RACES_BEFORE_ABORT 20
40
41 static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni);
42 static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error);
43 static void kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx,
44                                int type, int body_nob);
45 static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
46                             int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
47 static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
48 static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
49 static void kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx);
50 static void kiblnd_check_sends_locked(kib_conn_t *conn);
51
52 void
53 kiblnd_tx_done(struct lnet_ni *ni, kib_tx_t *tx)
54 {
55         struct lnet_msg *lntmsg[2];
56         kib_net_t  *net = ni->ni_data;
57         int         rc;
58         int         i;
59
60         LASSERT (net != NULL);
61         LASSERT (!in_interrupt());
62         LASSERT (!tx->tx_queued);               /* mustn't be queued for sending */
63         LASSERT (tx->tx_sending == 0);          /* mustn't be awaiting sent callback */
64         LASSERT (!tx->tx_waiting);              /* mustn't be awaiting peer_ni response */
65         LASSERT (tx->tx_pool != NULL);
66
67         kiblnd_unmap_tx(ni, tx);
68
69         /* tx may have up to 2 lnet msgs to finalise */
70         lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
71         lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
72         rc = tx->tx_status;
73
74         if (tx->tx_conn != NULL) {
75                 LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
76
77                 kiblnd_conn_decref(tx->tx_conn);
78                 tx->tx_conn = NULL;
79         }
80
81         tx->tx_nwrq = 0;
82         tx->tx_status = 0;
83
84         kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
85
86         /* delay finalize until my descs have been freed */
87         for (i = 0; i < 2; i++) {
88                 if (lntmsg[i] == NULL)
89                         continue;
90
91                 lnet_finalize(ni, lntmsg[i], rc);
92         }
93 }
94
95 void
96 kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status)
97 {
98         kib_tx_t *tx;
99
100         while (!list_empty(txlist)) {
101                 tx = list_entry(txlist->next, kib_tx_t, tx_list);
102
103                 list_del(&tx->tx_list);
104                 /* complete now */
105                 tx->tx_waiting = 0;
106                 tx->tx_status = status;
107                 kiblnd_tx_done(ni, tx);
108         }
109 }
110
111 static kib_tx_t *
112 kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
113 {
114         kib_net_t               *net = (kib_net_t *)ni->ni_data;
115         struct list_head        *node;
116         kib_tx_t                *tx;
117         kib_tx_poolset_t        *tps;
118
119         tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
120         node = kiblnd_pool_alloc_node(&tps->tps_poolset);
121         if (node == NULL)
122                 return NULL;
123         tx = container_of(node, kib_tx_t, tx_list);
124
125         LASSERT (tx->tx_nwrq == 0);
126         LASSERT (!tx->tx_queued);
127         LASSERT (tx->tx_sending == 0);
128         LASSERT (!tx->tx_waiting);
129         LASSERT (tx->tx_status == 0);
130         LASSERT (tx->tx_conn == NULL);
131         LASSERT (tx->tx_lntmsg[0] == NULL);
132         LASSERT (tx->tx_lntmsg[1] == NULL);
133         LASSERT (tx->tx_nfrags == 0);
134
135         return tx;
136 }
137
138 static void
139 kiblnd_drop_rx(kib_rx_t *rx)
140 {
141         kib_conn_t              *conn   = rx->rx_conn;
142         struct kib_sched_info   *sched  = conn->ibc_sched;
143         unsigned long           flags;
144
145         spin_lock_irqsave(&sched->ibs_lock, flags);
146         LASSERT(conn->ibc_nrx > 0);
147         conn->ibc_nrx--;
148         spin_unlock_irqrestore(&sched->ibs_lock, flags);
149
150         kiblnd_conn_decref(conn);
151 }
152
153 int
154 kiblnd_post_rx (kib_rx_t *rx, int credit)
155 {
156         kib_conn_t         *conn = rx->rx_conn;
157         kib_net_t          *net = conn->ibc_peer->ibp_ni->ni_data;
158         struct ib_recv_wr  *bad_wrq = NULL;
159         struct ib_mr       *mr = conn->ibc_hdev->ibh_mrs;
160         int                 rc;
161
162         LASSERT (net != NULL);
163         LASSERT (!in_interrupt());
164         LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
165                  credit == IBLND_POSTRX_PEER_CREDIT ||
166                  credit == IBLND_POSTRX_RSRVD_CREDIT);
167         LASSERT(mr != NULL);
168
169         rx->rx_sge.lkey   = mr->lkey;
170         rx->rx_sge.addr   = rx->rx_msgaddr;
171         rx->rx_sge.length = IBLND_MSG_SIZE;
172
173         rx->rx_wrq.next = NULL;
174         rx->rx_wrq.sg_list = &rx->rx_sge;
175         rx->rx_wrq.num_sge = 1;
176         rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
177
178         LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
179         LASSERT (rx->rx_nob >= 0);              /* not posted */
180
181         if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
182                 kiblnd_drop_rx(rx);             /* No more posts for this rx */
183                 return 0;
184         }
185
186         rx->rx_nob = -1;                        /* flag posted */
187
188         /* NB: need an extra reference after ib_post_recv because we don't
189          * own this rx (and rx::rx_conn) anymore, LU-5678.
190          */
191         kiblnd_conn_addref(conn);
192         rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
193         if (unlikely(rc != 0)) {
194                 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
195                        libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
196                 rx->rx_nob = 0;
197         }
198
199         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
200                 goto out;
201
202         if (unlikely(rc != 0)) {
203                 kiblnd_close_conn(conn, rc);
204                 kiblnd_drop_rx(rx);     /* No more posts for this rx */
205                 goto out;
206         }
207
208         if (credit == IBLND_POSTRX_NO_CREDIT)
209                 goto out;
210
211         spin_lock(&conn->ibc_lock);
212         if (credit == IBLND_POSTRX_PEER_CREDIT)
213                 conn->ibc_outstanding_credits++;
214         else
215                 conn->ibc_reserved_credits++;
216         kiblnd_check_sends_locked(conn);
217         spin_unlock(&conn->ibc_lock);
218
219 out:
220         kiblnd_conn_decref(conn);
221         return rc;
222 }
223
224 static kib_tx_t *
225 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
226 {
227         struct list_head *tmp;
228
229         list_for_each(tmp, &conn->ibc_active_txs) {
230                 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
231
232                 LASSERT(!tx->tx_queued);
233                 LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
234
235                 if (tx->tx_cookie != cookie)
236                         continue;
237
238                 if (tx->tx_waiting &&
239                     tx->tx_msg->ibm_type == txtype)
240                         return tx;
241
242                 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
243                       tx->tx_waiting ? "" : "NOT ",
244                       tx->tx_msg->ibm_type, txtype);
245         }
246         return NULL;
247 }
248
249 static void
250 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
251 {
252         kib_tx_t    *tx;
253         struct lnet_ni   *ni = conn->ibc_peer->ibp_ni;
254         int          idle;
255
256         spin_lock(&conn->ibc_lock);
257
258         tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
259         if (tx == NULL) {
260                 spin_unlock(&conn->ibc_lock);
261
262                 CWARN("Unmatched completion type %x cookie %#llx from %s\n",
263                       txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
264                 kiblnd_close_conn(conn, -EPROTO);
265                 return;
266         }
267
268         if (tx->tx_status == 0) {               /* success so far */
269                 if (status < 0) {               /* failed? */
270                         tx->tx_status = status;
271                 } else if (txtype == IBLND_MSG_GET_REQ) {
272                         lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
273                 }
274         }
275
276         tx->tx_waiting = 0;
277
278         idle = !tx->tx_queued && (tx->tx_sending == 0);
279         if (idle)
280                 list_del(&tx->tx_list);
281
282         spin_unlock(&conn->ibc_lock);
283
284         if (idle)
285                 kiblnd_tx_done(ni, tx);
286 }
287
288 static void
289 kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
290 {
291         struct lnet_ni   *ni = conn->ibc_peer->ibp_ni;
292         kib_tx_t    *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
293
294         if (tx == NULL) {
295                 CERROR("Can't get tx for completion %x for %s\n",
296                        type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
297                 return;
298         }
299
300         tx->tx_msg->ibm_u.completion.ibcm_status = status;
301         tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
302         kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
303
304         kiblnd_queue_tx(tx, conn);
305 }
306
307 static void
308 kiblnd_handle_rx (kib_rx_t *rx)
309 {
310         kib_msg_t    *msg = rx->rx_msg;
311         kib_conn_t   *conn = rx->rx_conn;
312         struct lnet_ni    *ni = conn->ibc_peer->ibp_ni;
313         int           credits = msg->ibm_credits;
314         kib_tx_t     *tx;
315         int           rc = 0;
316         int           rc2;
317         int           post_credit;
318
319         LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
320
321         CDEBUG (D_NET, "Received %x[%d] from %s\n",
322                 msg->ibm_type, credits,
323                 libcfs_nid2str(conn->ibc_peer->ibp_nid));
324
325         if (credits != 0) {
326                 /* Have I received credits that will let me send? */
327                 spin_lock(&conn->ibc_lock);
328
329                 if (conn->ibc_credits + credits >
330                     conn->ibc_queue_depth) {
331                         rc2 = conn->ibc_credits;
332                         spin_unlock(&conn->ibc_lock);
333
334                         CERROR("Bad credits from %s: %d + %d > %d\n",
335                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
336                                rc2, credits,
337                                conn->ibc_queue_depth);
338
339                         kiblnd_close_conn(conn, -EPROTO);
340                         kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
341                         return;
342                 }
343
344                 conn->ibc_credits += credits;
345
346                 /* This ensures the credit taken by NOOP can be returned */
347                 if (msg->ibm_type == IBLND_MSG_NOOP &&
348                     !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
349                         conn->ibc_outstanding_credits++;
350
351                 kiblnd_check_sends_locked(conn);
352                 spin_unlock(&conn->ibc_lock);
353         }
354
355         switch (msg->ibm_type) {
356         default:
357                 CERROR("Bad IBLND message type %x from %s\n",
358                        msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
359                 post_credit = IBLND_POSTRX_NO_CREDIT;
360                 rc = -EPROTO;
361                 break;
362
363         case IBLND_MSG_NOOP:
364                 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
365                         post_credit = IBLND_POSTRX_NO_CREDIT;
366                         break;
367                 }
368
369                 if (credits != 0) /* credit already posted */
370                         post_credit = IBLND_POSTRX_NO_CREDIT;
371                 else              /* a keepalive NOOP */
372                         post_credit = IBLND_POSTRX_PEER_CREDIT;
373                 break;
374
375         case IBLND_MSG_IMMEDIATE:
376                 post_credit = IBLND_POSTRX_DONT_POST;
377                 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
378                                 msg->ibm_srcnid, rx, 0);
379                 if (rc < 0)                     /* repost on error */
380                         post_credit = IBLND_POSTRX_PEER_CREDIT;
381                 break;
382
383         case IBLND_MSG_PUT_REQ:
384                 post_credit = IBLND_POSTRX_DONT_POST;
385                 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
386                                 msg->ibm_srcnid, rx, 1);
387                 if (rc < 0)                     /* repost on error */
388                         post_credit = IBLND_POSTRX_PEER_CREDIT;
389                 break;
390
391         case IBLND_MSG_PUT_NAK:
392                 CWARN ("PUT_NACK from %s\n",
393                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
394                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
395                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
396                                          msg->ibm_u.completion.ibcm_status,
397                                          msg->ibm_u.completion.ibcm_cookie);
398                 break;
399
400         case IBLND_MSG_PUT_ACK:
401                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
402
403                 spin_lock(&conn->ibc_lock);
404                 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
405                                         msg->ibm_u.putack.ibpam_src_cookie);
406                 if (tx != NULL)
407                         list_del(&tx->tx_list);
408                 spin_unlock(&conn->ibc_lock);
409
410                 if (tx == NULL) {
411                         CERROR("Unmatched PUT_ACK from %s\n",
412                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
413                         rc = -EPROTO;
414                         break;
415                 }
416
417                 LASSERT (tx->tx_waiting);
418                 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
419                  * (a) I can overwrite tx_msg since my peer_ni has received it!
420                  * (b) tx_waiting set tells tx_complete() it's not done. */
421
422                 tx->tx_nwrq = 0;                /* overwrite PUT_REQ */
423
424                 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
425                                        kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
426                                        &msg->ibm_u.putack.ibpam_rd,
427                                        msg->ibm_u.putack.ibpam_dst_cookie);
428                 if (rc2 < 0)
429                         CERROR("Can't setup rdma for PUT to %s: %d\n",
430                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
431
432                 spin_lock(&conn->ibc_lock);
433                 tx->tx_waiting = 0;     /* clear waiting and queue atomically */
434                 kiblnd_queue_tx_locked(tx, conn);
435                 spin_unlock(&conn->ibc_lock);
436                 break;
437
438         case IBLND_MSG_PUT_DONE:
439                 post_credit = IBLND_POSTRX_PEER_CREDIT;
440                 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
441                                          msg->ibm_u.completion.ibcm_status,
442                                          msg->ibm_u.completion.ibcm_cookie);
443                 break;
444
445         case IBLND_MSG_GET_REQ:
446                 post_credit = IBLND_POSTRX_DONT_POST;
447                 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
448                                 msg->ibm_srcnid, rx, 1);
449                 if (rc < 0)                     /* repost on error */
450                         post_credit = IBLND_POSTRX_PEER_CREDIT;
451                 break;
452
453         case IBLND_MSG_GET_DONE:
454                 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
455                 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
456                                          msg->ibm_u.completion.ibcm_status,
457                                          msg->ibm_u.completion.ibcm_cookie);
458                 break;
459         }
460
461         if (rc < 0)                             /* protocol error */
462                 kiblnd_close_conn(conn, rc);
463
464         if (post_credit != IBLND_POSTRX_DONT_POST)
465                 kiblnd_post_rx(rx, post_credit);
466 }
467
468 static void
469 kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
470 {
471         kib_msg_t    *msg = rx->rx_msg;
472         kib_conn_t   *conn = rx->rx_conn;
473         struct lnet_ni    *ni = conn->ibc_peer->ibp_ni;
474         kib_net_t    *net = ni->ni_data;
475         int           rc;
476         int           err = -EIO;
477
478         LASSERT (net != NULL);
479         LASSERT (rx->rx_nob < 0);               /* was posted */
480         rx->rx_nob = 0;                         /* isn't now */
481
482         if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
483                 goto ignore;
484
485         if (status != IB_WC_SUCCESS) {
486                 CNETERR("Rx from %s failed: %d\n",
487                         libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
488                 goto failed;
489         }
490
491         LASSERT (nob >= 0);
492         rx->rx_nob = nob;
493
494         rc = kiblnd_unpack_msg(msg, rx->rx_nob);
495         if (rc != 0) {
496                 CERROR ("Error %d unpacking rx from %s\n",
497                         rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
498                 goto failed;
499         }
500
501         if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
502             msg->ibm_dstnid != ni->ni_nid ||
503             msg->ibm_srcstamp != conn->ibc_incarnation ||
504             msg->ibm_dststamp != net->ibn_incarnation) {
505                 CERROR ("Stale rx from %s\n",
506                         libcfs_nid2str(conn->ibc_peer->ibp_nid));
507                 err = -ESTALE;
508                 goto failed;
509         }
510
511         /* set time last known alive */
512         kiblnd_peer_alive(conn->ibc_peer);
513
514         /* racing with connection establishment/teardown! */
515
516         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
517                 rwlock_t  *g_lock = &kiblnd_data.kib_global_lock;
518                 unsigned long  flags;
519
520                 write_lock_irqsave(g_lock, flags);
521                 /* must check holding global lock to eliminate race */
522                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
523                         list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
524                         write_unlock_irqrestore(g_lock, flags);
525                         return;
526                 }
527                 write_unlock_irqrestore(g_lock, flags);
528         }
529         kiblnd_handle_rx(rx);
530         return;
531
532  failed:
533         CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
534         kiblnd_close_conn(conn, err);
535  ignore:
536         kiblnd_drop_rx(rx);                     /* Don't re-post rx. */
537 }
538
539 static struct page *
540 kiblnd_kvaddr_to_page (unsigned long vaddr)
541 {
542         struct page *page;
543
544         if (is_vmalloc_addr((void *)vaddr)) {
545                 page = vmalloc_to_page ((void *)vaddr);
546                 LASSERT (page != NULL);
547                 return page;
548         }
549 #ifdef CONFIG_HIGHMEM
550         if (vaddr >= PKMAP_BASE &&
551             vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
552                 /* No highmem pages only used for bulk (kiov) I/O */
553                 CERROR("find page for address in highmem\n");
554                 LBUG();
555         }
556 #endif
557         page = virt_to_page (vaddr);
558         LASSERT (page != NULL);
559         return page;
560 }
561
562 static int
563 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
564 {
565         kib_hca_dev_t           *hdev;
566         kib_fmr_poolset_t       *fps;
567         int                     cpt;
568         int                     rc;
569
570         LASSERT(tx->tx_pool != NULL);
571         LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
572
573         hdev = tx->tx_pool->tpo_hdev;
574         cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
575
576         fps = net->ibn_fmr_ps[cpt];
577         rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
578         if (rc != 0) {
579                 CERROR("Can't map %u pages: %d\n", nob, rc);
580                 return rc;
581         }
582
583         /* If rd is not tx_rd, it's going to get sent to a peer_ni, who will need
584          * the rkey */
585         rd->rd_key = tx->fmr.fmr_key;
586         rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
587         rd->rd_frags[0].rf_nob   = nob;
588         rd->rd_nfrags = 1;
589
590         return 0;
591 }
592
593 static void
594 kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx)
595 {
596         kib_net_t  *net = ni->ni_data;
597
598         LASSERT(net != NULL);
599
600         if (net->ibn_fmr_ps != NULL)
601                 kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
602
603         if (tx->tx_nfrags != 0) {
604                 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
605                                     tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
606                 tx->tx_nfrags = 0;
607         }
608 }
609
610 static int
611 kiblnd_map_tx(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags)
612 {
613         kib_net_t     *net   = ni->ni_data;
614         kib_hca_dev_t *hdev  = net->ibn_dev->ibd_hdev;
615         struct ib_mr  *mr    = NULL;
616         __u32 nob;
617         int i;
618
619         /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the
620          * RDMA sink */
621         tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
622         tx->tx_nfrags = nfrags;
623
624         rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
625                                           tx->tx_nfrags, tx->tx_dmadir);
626
627         for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
628                 rd->rd_frags[i].rf_nob  = kiblnd_sg_dma_len(
629                         hdev->ibh_ibdev, &tx->tx_frags[i]);
630                 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
631                         hdev->ibh_ibdev, &tx->tx_frags[i]);
632                 nob += rd->rd_frags[i].rf_nob;
633         }
634
635         mr = kiblnd_find_rd_dma_mr(ni, rd,
636                                    (tx->tx_conn != NULL) ?
637                                    tx->tx_conn->ibc_max_frags : -1);
638         if (mr != NULL) {
639                 /* found pre-mapping MR */
640                 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
641                 return 0;
642         }
643
644         if (net->ibn_fmr_ps != NULL)
645                 return kiblnd_fmr_map_tx(net, tx, rd, nob);
646
647         return -EINVAL;
648 }
649
650
651 static int
652 kiblnd_setup_rd_iov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
653                     unsigned int niov, struct kvec *iov, int offset, int nob)
654 {
655         kib_net_t          *net = ni->ni_data;
656         struct page        *page;
657         struct scatterlist *sg;
658         unsigned long       vaddr;
659         int                 fragnob;
660         int                 page_offset;
661
662         LASSERT (nob > 0);
663         LASSERT (niov > 0);
664         LASSERT (net != NULL);
665
666         while (offset >= iov->iov_len) {
667                 offset -= iov->iov_len;
668                 niov--;
669                 iov++;
670                 LASSERT (niov > 0);
671         }
672
673         sg = tx->tx_frags;
674         do {
675                 LASSERT (niov > 0);
676
677                 vaddr = ((unsigned long)iov->iov_base) + offset;
678                 page_offset = vaddr & (PAGE_SIZE - 1);
679                 page = kiblnd_kvaddr_to_page(vaddr);
680                 if (page == NULL) {
681                         CERROR ("Can't find page\n");
682                         return -EFAULT;
683                 }
684
685                 fragnob = min((int)(iov->iov_len - offset), nob);
686                 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
687
688                 sg_set_page(sg, page, fragnob, page_offset);
689                 sg = sg_next(sg);
690                 if (!sg) {
691                         CERROR("lacking enough sg entries to map tx\n");
692                         return -EFAULT;
693                 }
694
695                 if (offset + fragnob < iov->iov_len) {
696                         offset += fragnob;
697                 } else {
698                         offset = 0;
699                         iov++;
700                         niov--;
701                 }
702                 nob -= fragnob;
703         } while (nob > 0);
704
705         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
706 }
707
708 static int
709 kiblnd_setup_rd_kiov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
710                      int nkiov, lnet_kiov_t *kiov, int offset, int nob)
711 {
712         kib_net_t          *net = ni->ni_data;
713         struct scatterlist *sg;
714         int                 fragnob;
715
716         CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
717
718         LASSERT (nob > 0);
719         LASSERT (nkiov > 0);
720         LASSERT (net != NULL);
721
722         while (offset >= kiov->kiov_len) {
723                 offset -= kiov->kiov_len;
724                 nkiov--;
725                 kiov++;
726                 LASSERT (nkiov > 0);
727         }
728
729         sg = tx->tx_frags;
730         do {
731                 LASSERT (nkiov > 0);
732
733                 fragnob = min((int)(kiov->kiov_len - offset), nob);
734
735                 sg_set_page(sg, kiov->kiov_page, fragnob,
736                             kiov->kiov_offset + offset);
737                 sg = sg_next(sg);
738                 if (!sg) {
739                         CERROR("lacking enough sg entries to map tx\n");
740                         return -EFAULT;
741                 }
742
743                 offset = 0;
744                 kiov++;
745                 nkiov--;
746                 nob -= fragnob;
747         } while (nob > 0);
748
749         return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
750 }
751
752 static int
753 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
754 __must_hold(&conn->ibc_lock)
755 {
756         kib_msg_t *msg = tx->tx_msg;
757         kib_peer_ni_t *peer_ni = conn->ibc_peer;
758         struct lnet_ni *ni = peer_ni->ibp_ni;
759         int ver = conn->ibc_version;
760         int rc;
761         int done;
762
763         LASSERT(tx->tx_queued);
764         /* We rely on this for QP sizing */
765         LASSERT(tx->tx_nwrq > 0);
766         LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
767
768         LASSERT(credit == 0 || credit == 1);
769         LASSERT(conn->ibc_outstanding_credits >= 0);
770         LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
771         LASSERT(conn->ibc_credits >= 0);
772         LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
773
774         if (conn->ibc_nsends_posted ==
775             kiblnd_concurrent_sends(ver, ni)) {
776                 /* tx completions outstanding... */
777                 CDEBUG(D_NET, "%s: posted enough\n",
778                        libcfs_nid2str(peer_ni->ibp_nid));
779                 return -EAGAIN;
780         }
781
782         if (credit != 0 && conn->ibc_credits == 0) {   /* no credits */
783                 CDEBUG(D_NET, "%s: no credits\n",
784                        libcfs_nid2str(peer_ni->ibp_nid));
785                 return -EAGAIN;
786         }
787
788         if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
789             conn->ibc_credits == 1 &&   /* last credit reserved */
790             msg->ibm_type != IBLND_MSG_NOOP) {      /* for NOOP */
791                 CDEBUG(D_NET, "%s: not using last credit\n",
792                        libcfs_nid2str(peer_ni->ibp_nid));
793                 return -EAGAIN;
794         }
795
796         /* NB don't drop ibc_lock before bumping tx_sending */
797         list_del(&tx->tx_list);
798         tx->tx_queued = 0;
799
800         if (msg->ibm_type == IBLND_MSG_NOOP &&
801             (!kiblnd_need_noop(conn) ||     /* redundant NOOP */
802              (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
803               conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
804                 /* OK to drop when posted enough NOOPs, since
805                  * kiblnd_check_sends_locked will queue NOOP again when
806                  * posted NOOPs complete */
807                 spin_unlock(&conn->ibc_lock);
808                 kiblnd_tx_done(peer_ni->ibp_ni, tx);
809                 spin_lock(&conn->ibc_lock);
810                 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
811                        libcfs_nid2str(peer_ni->ibp_nid),
812                        conn->ibc_noops_posted);
813                 return 0;
814         }
815
816         kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
817                         peer_ni->ibp_nid, conn->ibc_incarnation);
818
819         conn->ibc_credits -= credit;
820         conn->ibc_outstanding_credits = 0;
821         conn->ibc_nsends_posted++;
822         if (msg->ibm_type == IBLND_MSG_NOOP)
823                 conn->ibc_noops_posted++;
824
825         /* CAVEAT EMPTOR!  This tx could be the PUT_DONE of an RDMA
826          * PUT.  If so, it was first queued here as a PUT_REQ, sent and
827          * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
828          * and then re-queued here.  It's (just) possible that
829          * tx_sending is non-zero if we've not done the tx_complete()
830          * from the first send; hence the ++ rather than = below. */
831         tx->tx_sending++;
832         list_add(&tx->tx_list, &conn->ibc_active_txs);
833
834         /* I'm still holding ibc_lock! */
835         if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
836                 rc = -ECONNABORTED;
837         } else if (tx->tx_pool->tpo_pool.po_failed ||
838                  conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
839                 /* close_conn will launch failover */
840                 rc = -ENETDOWN;
841         } else {
842                 struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
843                 struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
844                 struct ib_send_wr *wr  = &tx->tx_wrq[0].wr;
845
846                 if (frd != NULL) {
847                         if (!frd->frd_valid) {
848                                 wr = &frd->frd_inv_wr.wr;
849                                 wr->next = &frd->frd_fastreg_wr.wr;
850                         } else {
851                                 wr = &frd->frd_fastreg_wr.wr;
852                         }
853                         frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
854                 }
855
856                 LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
857                          "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n",
858                          bad->wr_id, bad->opcode, bad->send_flags,
859                          libcfs_nid2str(conn->ibc_peer->ibp_nid));
860
861                 bad = NULL;
862                 rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
863         }
864
865         conn->ibc_last_send = jiffies;
866
867         if (rc == 0)
868                 return 0;
869
870         /* NB credits are transferred in the actual
871          * message, which can only be the last work item */
872         conn->ibc_credits += credit;
873         conn->ibc_outstanding_credits += msg->ibm_credits;
874         conn->ibc_nsends_posted--;
875         if (msg->ibm_type == IBLND_MSG_NOOP)
876                 conn->ibc_noops_posted--;
877
878         tx->tx_status = rc;
879         tx->tx_waiting = 0;
880         tx->tx_sending--;
881
882         done = (tx->tx_sending == 0);
883         if (done)
884                 list_del(&tx->tx_list);
885
886         spin_unlock(&conn->ibc_lock);
887
888         if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
889                 CERROR("Error %d posting transmit to %s\n",
890                        rc, libcfs_nid2str(peer_ni->ibp_nid));
891         else
892                 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
893                        rc, libcfs_nid2str(peer_ni->ibp_nid));
894
895         kiblnd_close_conn(conn, rc);
896
897         if (done)
898                 kiblnd_tx_done(peer_ni->ibp_ni, tx);
899
900         spin_lock(&conn->ibc_lock);
901
902         return -EIO;
903 }
904
905 static void
906 kiblnd_check_sends_locked(kib_conn_t *conn)
907 {
908         int        ver = conn->ibc_version;
909         struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
910         kib_tx_t  *tx;
911
912         /* Don't send anything until after the connection is established */
913         if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
914                 CDEBUG(D_NET, "%s too soon\n",
915                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
916                 return;
917         }
918
919         LASSERT(conn->ibc_nsends_posted <=
920                 kiblnd_concurrent_sends(ver, ni));
921         LASSERT (!IBLND_OOB_CAPABLE(ver) ||
922                  conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
923         LASSERT (conn->ibc_reserved_credits >= 0);
924
925         while (conn->ibc_reserved_credits > 0 &&
926                !list_empty(&conn->ibc_tx_queue_rsrvd)) {
927                 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
928                                     kib_tx_t, tx_list);
929                 list_del(&tx->tx_list);
930                 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
931                 conn->ibc_reserved_credits--;
932         }
933
934         if (kiblnd_need_noop(conn)) {
935                 spin_unlock(&conn->ibc_lock);
936
937                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
938                 if (tx != NULL)
939                         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
940
941                 spin_lock(&conn->ibc_lock);
942                 if (tx != NULL)
943                         kiblnd_queue_tx_locked(tx, conn);
944         }
945
946         for (;;) {
947                 int credit;
948
949                 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
950                         credit = 0;
951                         tx = list_entry(conn->ibc_tx_queue_nocred.next,
952                                             kib_tx_t, tx_list);
953                 } else if (!list_empty(&conn->ibc_tx_noops)) {
954                         LASSERT (!IBLND_OOB_CAPABLE(ver));
955                         credit = 1;
956                         tx = list_entry(conn->ibc_tx_noops.next,
957                                         kib_tx_t, tx_list);
958                 } else if (!list_empty(&conn->ibc_tx_queue)) {
959                         credit = 1;
960                         tx = list_entry(conn->ibc_tx_queue.next,
961                                             kib_tx_t, tx_list);
962                 } else
963                         break;
964
965                 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
966                         break;
967         }
968 }
969
970 static void
971 kiblnd_tx_complete (kib_tx_t *tx, int status)
972 {
973         int           failed = (status != IB_WC_SUCCESS);
974         kib_conn_t   *conn = tx->tx_conn;
975         int           idle;
976
977         LASSERT (tx->tx_sending > 0);
978
979         if (failed) {
980                 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
981                         CNETERR("Tx -> %s cookie %#llx"
982                                 " sending %d waiting %d: failed %d\n",
983                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
984                                 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
985                                 status);
986
987                 kiblnd_close_conn(conn, -EIO);
988         } else {
989                 kiblnd_peer_alive(conn->ibc_peer);
990         }
991
992         spin_lock(&conn->ibc_lock);
993
994         /* I could be racing with rdma completion.  Whoever makes 'tx' idle
995          * gets to free it, which also drops its ref on 'conn'. */
996
997         tx->tx_sending--;
998         conn->ibc_nsends_posted--;
999         if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1000                 conn->ibc_noops_posted--;
1001
1002         if (failed) {
1003                 tx->tx_waiting = 0;             /* don't wait for peer_ni */
1004                 tx->tx_status = -EIO;
1005         }
1006
1007         idle = (tx->tx_sending == 0) &&         /* This is the final callback */
1008                !tx->tx_waiting &&               /* Not waiting for peer_ni */
1009                !tx->tx_queued;                  /* Not re-queued (PUT_DONE) */
1010         if (idle)
1011                 list_del(&tx->tx_list);
1012
1013         kiblnd_check_sends_locked(conn);
1014         spin_unlock(&conn->ibc_lock);
1015
1016         if (idle)
1017                 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
1018 }
1019
1020 static void
1021 kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, int type, int body_nob)
1022 {
1023         kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
1024         struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
1025         struct ib_rdma_wr *wrq;
1026         int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
1027         struct ib_mr *mr = hdev->ibh_mrs;
1028
1029         LASSERT(tx->tx_nwrq >= 0);
1030         LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1031         LASSERT(nob <= IBLND_MSG_SIZE);
1032         LASSERT(mr != NULL);
1033
1034         kiblnd_init_msg(tx->tx_msg, type, body_nob);
1035
1036         sge->lkey   = mr->lkey;
1037         sge->addr   = tx->tx_msgaddr;
1038         sge->length = nob;
1039
1040         wrq = &tx->tx_wrq[tx->tx_nwrq];
1041         memset(wrq, 0, sizeof(*wrq));
1042
1043         wrq->wr.next            = NULL;
1044         wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1045         wrq->wr.sg_list         = sge;
1046         wrq->wr.num_sge         = 1;
1047         wrq->wr.opcode          = IB_WR_SEND;
1048         wrq->wr.send_flags      = IB_SEND_SIGNALED;
1049
1050         tx->tx_nwrq++;
1051 }
1052
1053 static int
1054 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
1055                  int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
1056 {
1057         kib_msg_t         *ibmsg = tx->tx_msg;
1058         kib_rdma_desc_t   *srcrd = tx->tx_rd;
1059         struct ib_sge     *sge = &tx->tx_sge[0];
1060         struct ib_rdma_wr *wrq;
1061         int                rc  = resid;
1062         int                srcidx;
1063         int                dstidx;
1064         int                wrknob;
1065
1066         LASSERT (!in_interrupt());
1067         LASSERT (tx->tx_nwrq == 0);
1068         LASSERT (type == IBLND_MSG_GET_DONE ||
1069                  type == IBLND_MSG_PUT_DONE);
1070
1071         srcidx = dstidx = 0;
1072
1073         while (resid > 0) {
1074                 if (srcidx >= srcrd->rd_nfrags) {
1075                         CERROR("Src buffer exhausted: %d frags\n", srcidx);
1076                         rc = -EPROTO;
1077                         break;
1078                 }
1079
1080                 if (dstidx == dstrd->rd_nfrags) {
1081                         CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1082                         rc = -EPROTO;
1083                         break;
1084                 }
1085
1086                 if (tx->tx_nwrq >= conn->ibc_max_frags) {
1087                         CERROR("RDMA has too many fragments for peer_ni %s (%d), "
1088                                "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
1089                                libcfs_nid2str(conn->ibc_peer->ibp_nid),
1090                                conn->ibc_max_frags,
1091                                srcidx, srcrd->rd_nfrags,
1092                                dstidx, dstrd->rd_nfrags);
1093                         rc = -EMSGSIZE;
1094                         break;
1095                 }
1096
1097                 wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1098                                  kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1099
1100                 sge = &tx->tx_sge[tx->tx_nwrq];
1101                 sge->addr   = kiblnd_rd_frag_addr(srcrd, srcidx);
1102                 sge->lkey   = kiblnd_rd_frag_key(srcrd, srcidx);
1103                 sge->length = wrknob;
1104
1105                 wrq = &tx->tx_wrq[tx->tx_nwrq];
1106
1107                 wrq->wr.next            = &(wrq + 1)->wr;
1108                 wrq->wr.wr_id           = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1109                 wrq->wr.sg_list         = sge;
1110                 wrq->wr.num_sge         = 1;
1111                 wrq->wr.opcode          = IB_WR_RDMA_WRITE;
1112                 wrq->wr.send_flags      = 0;
1113
1114 #ifdef HAVE_IB_RDMA_WR
1115                 wrq->remote_addr        = kiblnd_rd_frag_addr(dstrd, dstidx);
1116                 wrq->rkey               = kiblnd_rd_frag_key(dstrd, dstidx);
1117 #else
1118                 wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
1119                 wrq->wr.wr.rdma.rkey    = kiblnd_rd_frag_key(dstrd, dstidx);
1120 #endif
1121
1122                 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
1123                 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
1124
1125                 resid -= wrknob;
1126
1127                 tx->tx_nwrq++;
1128                 wrq++;
1129                 sge++;
1130         }
1131
1132         if (rc < 0)                             /* no RDMA if completing with failure */
1133                 tx->tx_nwrq = 0;
1134
1135         ibmsg->ibm_u.completion.ibcm_status = rc;
1136         ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1137         kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1138                            type, sizeof (kib_completion_msg_t));
1139
1140         return rc;
1141 }
1142
1143 static void
1144 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
1145 {
1146         struct list_head *q;
1147
1148         LASSERT(tx->tx_nwrq > 0);       /* work items set up */
1149         LASSERT(!tx->tx_queued);        /* not queued for sending already */
1150         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1151
1152         tx->tx_queued = 1;
1153         tx->tx_deadline = jiffies +
1154                           msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
1155                                            MSEC_PER_SEC);
1156
1157         if (tx->tx_conn == NULL) {
1158                 kiblnd_conn_addref(conn);
1159                 tx->tx_conn = conn;
1160                 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1161         } else {
1162                 /* PUT_DONE first attached to conn as a PUT_REQ */
1163                 LASSERT (tx->tx_conn == conn);
1164                 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1165         }
1166
1167         switch (tx->tx_msg->ibm_type) {
1168         default:
1169                 LBUG();
1170
1171         case IBLND_MSG_PUT_REQ:
1172         case IBLND_MSG_GET_REQ:
1173                 q = &conn->ibc_tx_queue_rsrvd;
1174                 break;
1175
1176         case IBLND_MSG_PUT_NAK:
1177         case IBLND_MSG_PUT_ACK:
1178         case IBLND_MSG_PUT_DONE:
1179         case IBLND_MSG_GET_DONE:
1180                 q = &conn->ibc_tx_queue_nocred;
1181                 break;
1182
1183         case IBLND_MSG_NOOP:
1184                 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1185                         q = &conn->ibc_tx_queue_nocred;
1186                 else
1187                         q = &conn->ibc_tx_noops;
1188                 break;
1189
1190         case IBLND_MSG_IMMEDIATE:
1191                 q = &conn->ibc_tx_queue;
1192                 break;
1193         }
1194
1195         list_add_tail(&tx->tx_list, q);
1196 }
1197
1198 static void
1199 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1200 {
1201         spin_lock(&conn->ibc_lock);
1202         kiblnd_queue_tx_locked(tx, conn);
1203         kiblnd_check_sends_locked(conn);
1204         spin_unlock(&conn->ibc_lock);
1205 }
1206
1207 static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
1208                                struct sockaddr_in *srcaddr,
1209                                struct sockaddr_in *dstaddr,
1210                                int timeout_ms)
1211 {
1212         unsigned short port;
1213         int rc;
1214
1215         /* allow the port to be reused */
1216         rc = rdma_set_reuseaddr(cmid, 1);
1217         if (rc != 0) {
1218                 CERROR("Unable to set reuse on cmid: %d\n", rc);
1219                 return rc;
1220         }
1221
1222         /* look for a free privileged port */
1223         for (port = PROT_SOCK-1; port > 0; port--) {
1224                 srcaddr->sin_port = htons(port);
1225                 rc = rdma_resolve_addr(cmid,
1226                                        (struct sockaddr *)srcaddr,
1227                                        (struct sockaddr *)dstaddr,
1228                                        timeout_ms);
1229                 if (rc == 0) {
1230                         CDEBUG(D_NET, "bound to port %hu\n", port);
1231                         return 0;
1232                 } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
1233                         CDEBUG(D_NET, "bind to port %hu failed: %d\n",
1234                                port, rc);
1235                 } else {
1236                         return rc;
1237                 }
1238         }
1239
1240         CERROR("Failed to bind to a free privileged port\n");
1241         return rc;
1242 }
1243
1244 static void
1245 kiblnd_connect_peer (kib_peer_ni_t *peer_ni)
1246 {
1247         struct rdma_cm_id *cmid;
1248         kib_dev_t         *dev;
1249         kib_net_t         *net = peer_ni->ibp_ni->ni_data;
1250         struct sockaddr_in srcaddr;
1251         struct sockaddr_in dstaddr;
1252         int                rc;
1253
1254         LASSERT (net != NULL);
1255         LASSERT (peer_ni->ibp_connecting > 0);
1256         LASSERT(!peer_ni->ibp_reconnecting);
1257
1258         cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
1259                                      IB_QPT_RC);
1260
1261         if (IS_ERR(cmid)) {
1262                 CERROR("Can't create CMID for %s: %ld\n",
1263                        libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid));
1264                 rc = PTR_ERR(cmid);
1265                 goto failed;
1266         }
1267
1268         dev = net->ibn_dev;
1269         memset(&srcaddr, 0, sizeof(srcaddr));
1270         srcaddr.sin_family = AF_INET;
1271         srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1272
1273         memset(&dstaddr, 0, sizeof(dstaddr));
1274         dstaddr.sin_family = AF_INET;
1275         dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1276         dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid));
1277
1278         kiblnd_peer_addref(peer_ni);               /* cmid's ref */
1279
1280         if (*kiblnd_tunables.kib_use_priv_port) {
1281                 rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
1282                                          *kiblnd_tunables.kib_timeout * 1000);
1283         } else {
1284                 rc = rdma_resolve_addr(cmid,
1285                                        (struct sockaddr *)&srcaddr,
1286                                        (struct sockaddr *)&dstaddr,
1287                                        *kiblnd_tunables.kib_timeout * 1000);
1288         }
1289         if (rc != 0) {
1290                 /* Can't initiate address resolution:  */
1291                 CERROR("Can't resolve addr for %s: %d\n",
1292                        libcfs_nid2str(peer_ni->ibp_nid), rc);
1293                 goto failed2;
1294         }
1295
1296         LASSERT (cmid->device != NULL);
1297         CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
1298                libcfs_nid2str(peer_ni->ibp_nid), dev->ibd_ifname,
1299                &dev->ibd_ifip, cmid->device->name);
1300
1301         return;
1302
1303  failed2:
1304         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1305         kiblnd_peer_decref(peer_ni);               /* cmid's ref */
1306         rdma_destroy_id(cmid);
1307         return;
1308  failed:
1309         kiblnd_peer_connect_failed(peer_ni, 1, rc);
1310 }
1311
1312 bool
1313 kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni)
1314 {
1315         rwlock_t         *glock = &kiblnd_data.kib_global_lock;
1316         char             *reason = NULL;
1317         struct list_head  txs;
1318         unsigned long     flags;
1319
1320         INIT_LIST_HEAD(&txs);
1321
1322         write_lock_irqsave(glock, flags);
1323         if (peer_ni->ibp_reconnecting == 0) {
1324                 if (peer_ni->ibp_accepting)
1325                         reason = "accepting";
1326                 else if (peer_ni->ibp_connecting)
1327                         reason = "connecting";
1328                 else if (!list_empty(&peer_ni->ibp_conns))
1329                         reason = "connected";
1330                 else /* connected then closed */
1331                         reason = "closed";
1332
1333                 goto no_reconnect;
1334         }
1335
1336         LASSERT(!peer_ni->ibp_accepting && !peer_ni->ibp_connecting &&
1337                 list_empty(&peer_ni->ibp_conns));
1338         peer_ni->ibp_reconnecting = 0;
1339
1340         if (!kiblnd_peer_active(peer_ni)) {
1341                 list_splice_init(&peer_ni->ibp_tx_queue, &txs);
1342                 reason = "unlinked";
1343                 goto no_reconnect;
1344         }
1345
1346         peer_ni->ibp_connecting++;
1347         peer_ni->ibp_reconnected++;
1348
1349         write_unlock_irqrestore(glock, flags);
1350
1351         kiblnd_connect_peer(peer_ni);
1352         return true;
1353
1354  no_reconnect:
1355         write_unlock_irqrestore(glock, flags);
1356
1357         CWARN("Abort reconnection of %s: %s\n",
1358               libcfs_nid2str(peer_ni->ibp_nid), reason);
1359         kiblnd_txlist_done(peer_ni->ibp_ni, &txs, -ECONNABORTED);
1360         return false;
1361 }
1362
1363 void
1364 kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid)
1365 {
1366         kib_peer_ni_t        *peer_ni;
1367         kib_peer_ni_t        *peer2;
1368         kib_conn_t        *conn;
1369         rwlock_t        *g_lock = &kiblnd_data.kib_global_lock;
1370         unsigned long      flags;
1371         int                rc;
1372
1373         /* If I get here, I've committed to send, so I complete the tx with
1374          * failure on any problems */
1375
1376         LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1377         LASSERT (tx == NULL || tx->tx_nwrq > 0);     /* work items have been set up */
1378
1379         /* First time, just use a read lock since I expect to find my peer_ni
1380          * connected */
1381         read_lock_irqsave(g_lock, flags);
1382
1383         peer_ni = kiblnd_find_peer_locked(ni, nid);
1384         if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
1385                 /* Found a peer_ni with an established connection */
1386                 conn = kiblnd_get_conn_locked(peer_ni);
1387                 kiblnd_conn_addref(conn); /* 1 ref for me... */
1388
1389                 read_unlock_irqrestore(g_lock, flags);
1390
1391                 if (tx != NULL)
1392                         kiblnd_queue_tx(tx, conn);
1393                 kiblnd_conn_decref(conn); /* ...to here */
1394                 return;
1395         }
1396
1397         read_unlock(g_lock);
1398         /* Re-try with a write lock */
1399         write_lock(g_lock);
1400
1401         peer_ni = kiblnd_find_peer_locked(ni, nid);
1402         if (peer_ni != NULL) {
1403                 if (list_empty(&peer_ni->ibp_conns)) {
1404                         /* found a peer_ni, but it's still connecting... */
1405                         LASSERT(kiblnd_peer_connecting(peer_ni));
1406                         if (tx != NULL)
1407                                 list_add_tail(&tx->tx_list,
1408                                                   &peer_ni->ibp_tx_queue);
1409                         write_unlock_irqrestore(g_lock, flags);
1410                 } else {
1411                         conn = kiblnd_get_conn_locked(peer_ni);
1412                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1413
1414                         write_unlock_irqrestore(g_lock, flags);
1415
1416                         if (tx != NULL)
1417                                 kiblnd_queue_tx(tx, conn);
1418                         kiblnd_conn_decref(conn); /* ...to here */
1419                 }
1420                 return;
1421         }
1422
1423         write_unlock_irqrestore(g_lock, flags);
1424
1425         /* Allocate a peer_ni ready to add to the peer_ni table and retry */
1426         rc = kiblnd_create_peer(ni, &peer_ni, nid);
1427         if (rc != 0) {
1428                 CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid));
1429                 if (tx != NULL) {
1430                         tx->tx_status = -EHOSTUNREACH;
1431                         tx->tx_waiting = 0;
1432                         kiblnd_tx_done(ni, tx);
1433                 }
1434                 return;
1435         }
1436
1437         write_lock_irqsave(g_lock, flags);
1438
1439         peer2 = kiblnd_find_peer_locked(ni, nid);
1440         if (peer2 != NULL) {
1441                 if (list_empty(&peer2->ibp_conns)) {
1442                         /* found a peer_ni, but it's still connecting... */
1443                         LASSERT(kiblnd_peer_connecting(peer2));
1444                         if (tx != NULL)
1445                                 list_add_tail(&tx->tx_list,
1446                                                   &peer2->ibp_tx_queue);
1447                         write_unlock_irqrestore(g_lock, flags);
1448                 } else {
1449                         conn = kiblnd_get_conn_locked(peer2);
1450                         kiblnd_conn_addref(conn); /* 1 ref for me... */
1451
1452                         write_unlock_irqrestore(g_lock, flags);
1453
1454                         if (tx != NULL)
1455                                 kiblnd_queue_tx(tx, conn);
1456                         kiblnd_conn_decref(conn); /* ...to here */
1457                 }
1458
1459                 kiblnd_peer_decref(peer_ni);
1460                 return;
1461         }
1462
1463         /* Brand new peer_ni */
1464         LASSERT (peer_ni->ibp_connecting == 0);
1465         peer_ni->ibp_connecting = 1;
1466
1467         /* always called with a ref on ni, which prevents ni being shutdown */
1468         LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
1469
1470         if (tx != NULL)
1471                 list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
1472
1473         kiblnd_peer_addref(peer_ni);
1474         list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
1475
1476         write_unlock_irqrestore(g_lock, flags);
1477
1478         kiblnd_connect_peer(peer_ni);
1479         kiblnd_peer_decref(peer_ni);
1480 }
1481
1482 int
1483 kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
1484 {
1485         struct lnet_hdr *hdr = &lntmsg->msg_hdr;
1486         int               type = lntmsg->msg_type;
1487         struct lnet_process_id target = lntmsg->msg_target;
1488         int               target_is_router = lntmsg->msg_target_is_router;
1489         int               routing = lntmsg->msg_routing;
1490         unsigned int      payload_niov = lntmsg->msg_niov;
1491         struct kvec      *payload_iov = lntmsg->msg_iov;
1492         lnet_kiov_t      *payload_kiov = lntmsg->msg_kiov;
1493         unsigned int      payload_offset = lntmsg->msg_offset;
1494         unsigned int      payload_nob = lntmsg->msg_len;
1495         kib_msg_t        *ibmsg;
1496         kib_rdma_desc_t  *rd;
1497         kib_tx_t         *tx;
1498         int               nob;
1499         int               rc;
1500
1501         /* NB 'private' is different depending on what we're sending.... */
1502
1503         CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1504                payload_nob, payload_niov, libcfs_id2str(target));
1505
1506         LASSERT (payload_nob == 0 || payload_niov > 0);
1507         LASSERT (payload_niov <= LNET_MAX_IOV);
1508
1509         /* Thread context */
1510         LASSERT (!in_interrupt());
1511         /* payload is either all vaddrs or all pages */
1512         LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1513
1514         switch (type) {
1515         default:
1516                 LBUG();
1517                 return (-EIO);
1518
1519         case LNET_MSG_ACK:
1520                 LASSERT (payload_nob == 0);
1521                 break;
1522
1523         case LNET_MSG_GET:
1524                 if (routing || target_is_router)
1525                         break;                  /* send IMMEDIATE */
1526
1527                 /* is the REPLY message too small for RDMA? */
1528                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1529                 if (nob <= IBLND_MSG_SIZE)
1530                         break;                  /* send IMMEDIATE */
1531
1532                 tx = kiblnd_get_idle_tx(ni, target.nid);
1533                 if (tx == NULL) {
1534                         CERROR("Can't allocate txd for GET to %s\n",
1535                                libcfs_nid2str(target.nid));
1536                         return -ENOMEM;
1537                 }
1538
1539                 ibmsg = tx->tx_msg;
1540                 rd = &ibmsg->ibm_u.get.ibgm_rd;
1541                 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1542                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1543                                                  lntmsg->msg_md->md_niov,
1544                                                  lntmsg->msg_md->md_iov.iov,
1545                                                  0, lntmsg->msg_md->md_length);
1546                 else
1547                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1548                                                   lntmsg->msg_md->md_niov,
1549                                                   lntmsg->msg_md->md_iov.kiov,
1550                                                   0, lntmsg->msg_md->md_length);
1551                 if (rc != 0) {
1552                         CERROR("Can't setup GET sink for %s: %d\n",
1553                                libcfs_nid2str(target.nid), rc);
1554                         kiblnd_tx_done(ni, tx);
1555                         return -EIO;
1556                 }
1557
1558                 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
1559                 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1560                 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1561
1562                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1563
1564                 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1565                 if (tx->tx_lntmsg[1] == NULL) {
1566                         CERROR("Can't create reply for GET -> %s\n",
1567                                libcfs_nid2str(target.nid));
1568                         kiblnd_tx_done(ni, tx);
1569                         return -EIO;
1570                 }
1571
1572                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg[0,1] on completion */
1573                 tx->tx_waiting = 1;             /* waiting for GET_DONE */
1574                 kiblnd_launch_tx(ni, tx, target.nid);
1575                 return 0;
1576
1577         case LNET_MSG_REPLY:
1578         case LNET_MSG_PUT:
1579                 /* Is the payload small enough not to need RDMA? */
1580                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
1581                 if (nob <= IBLND_MSG_SIZE)
1582                         break;                  /* send IMMEDIATE */
1583
1584                 tx = kiblnd_get_idle_tx(ni, target.nid);
1585                 if (tx == NULL) {
1586                         CERROR("Can't allocate %s txd for %s\n",
1587                                type == LNET_MSG_PUT ? "PUT" : "REPLY",
1588                                libcfs_nid2str(target.nid));
1589                         return -ENOMEM;
1590                 }
1591
1592                 if (payload_kiov == NULL)
1593                         rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1594                                                  payload_niov, payload_iov,
1595                                                  payload_offset, payload_nob);
1596                 else
1597                         rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1598                                                   payload_niov, payload_kiov,
1599                                                   payload_offset, payload_nob);
1600                 if (rc != 0) {
1601                         CERROR("Can't setup PUT src for %s: %d\n",
1602                                libcfs_nid2str(target.nid), rc);
1603                         kiblnd_tx_done(ni, tx);
1604                         return -EIO;
1605                 }
1606
1607                 ibmsg = tx->tx_msg;
1608                 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1609                 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1610                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1611
1612                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1613                 tx->tx_waiting = 1;             /* waiting for PUT_{ACK,NAK} */
1614                 kiblnd_launch_tx(ni, tx, target.nid);
1615                 return 0;
1616         }
1617
1618         /* send IMMEDIATE */
1619
1620         LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
1621                  <= IBLND_MSG_SIZE);
1622
1623         tx = kiblnd_get_idle_tx(ni, target.nid);
1624         if (tx == NULL) {
1625                 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1626                         type, libcfs_nid2str(target.nid));
1627                 return -ENOMEM;
1628         }
1629
1630         ibmsg = tx->tx_msg;
1631         ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1632
1633         if (payload_kiov != NULL)
1634                 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1635                                     offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1636                                     payload_niov, payload_kiov,
1637                                     payload_offset, payload_nob);
1638         else
1639                 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1640                                    offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1641                                    payload_niov, payload_iov,
1642                                    payload_offset, payload_nob);
1643
1644         nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
1645         kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1646
1647         tx->tx_lntmsg[0] = lntmsg;              /* finalise lntmsg on completion */
1648         kiblnd_launch_tx(ni, tx, target.nid);
1649         return 0;
1650 }
1651
1652 static void
1653 kiblnd_reply(struct lnet_ni *ni, kib_rx_t *rx, struct lnet_msg *lntmsg)
1654 {
1655         struct lnet_process_id target = lntmsg->msg_target;
1656         unsigned int      niov = lntmsg->msg_niov;
1657         struct kvec      *iov = lntmsg->msg_iov;
1658         lnet_kiov_t      *kiov = lntmsg->msg_kiov;
1659         unsigned int      offset = lntmsg->msg_offset;
1660         unsigned int      nob = lntmsg->msg_len;
1661         kib_tx_t         *tx;
1662         int               rc;
1663
1664         tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
1665         if (tx == NULL) {
1666                 CERROR("Can't get tx for REPLY to %s\n",
1667                        libcfs_nid2str(target.nid));
1668                 goto failed_0;
1669         }
1670
1671         if (nob == 0)
1672                 rc = 0;
1673         else if (kiov == NULL)
1674                 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1675                                          niov, iov, offset, nob);
1676         else
1677                 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1678                                           niov, kiov, offset, nob);
1679
1680         if (rc != 0) {
1681                 CERROR("Can't setup GET src for %s: %d\n",
1682                        libcfs_nid2str(target.nid), rc);
1683                 goto failed_1;
1684         }
1685
1686         rc = kiblnd_init_rdma(rx->rx_conn, tx,
1687                               IBLND_MSG_GET_DONE, nob,
1688                               &rx->rx_msg->ibm_u.get.ibgm_rd,
1689                               rx->rx_msg->ibm_u.get.ibgm_cookie);
1690         if (rc < 0) {
1691                 CERROR("Can't setup rdma for GET from %s: %d\n",
1692                        libcfs_nid2str(target.nid), rc);
1693                 goto failed_1;
1694         }
1695         
1696         if (nob == 0) {
1697                 /* No RDMA: local completion may happen now! */
1698                 lnet_finalize(ni, lntmsg, 0);
1699         } else {
1700                 /* RDMA: lnet_finalize(lntmsg) when it
1701                  * completes */
1702                 tx->tx_lntmsg[0] = lntmsg;
1703         }
1704
1705         kiblnd_queue_tx(tx, rx->rx_conn);
1706         return;
1707
1708  failed_1:
1709         kiblnd_tx_done(ni, tx);
1710  failed_0:
1711         lnet_finalize(ni, lntmsg, -EIO);
1712 }
1713
1714 int
1715 kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1716             int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
1717             unsigned int offset, unsigned int mlen, unsigned int rlen)
1718 {
1719         kib_rx_t    *rx = private;
1720         kib_msg_t   *rxmsg = rx->rx_msg;
1721         kib_conn_t  *conn = rx->rx_conn;
1722         kib_tx_t    *tx;
1723         int          nob;
1724         int          post_credit = IBLND_POSTRX_PEER_CREDIT;
1725         int          rc = 0;
1726
1727         LASSERT (mlen <= rlen);
1728         LASSERT (!in_interrupt());
1729         /* Either all pages or all vaddrs */
1730         LASSERT (!(kiov != NULL && iov != NULL));
1731
1732         switch (rxmsg->ibm_type) {
1733         default:
1734                 LBUG();
1735
1736         case IBLND_MSG_IMMEDIATE:
1737                 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
1738                 if (nob > rx->rx_nob) {
1739                         CERROR ("Immediate message from %s too big: %d(%d)\n",
1740                                 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1741                                 nob, rx->rx_nob);
1742                         rc = -EPROTO;
1743                         break;
1744                 }
1745
1746                 if (kiov != NULL)
1747                         lnet_copy_flat2kiov(niov, kiov, offset,
1748                                             IBLND_MSG_SIZE, rxmsg,
1749                                             offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1750                                             mlen);
1751                 else
1752                         lnet_copy_flat2iov(niov, iov, offset,
1753                                            IBLND_MSG_SIZE, rxmsg,
1754                                            offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1755                                            mlen);
1756                 lnet_finalize (ni, lntmsg, 0);
1757                 break;
1758
1759         case IBLND_MSG_PUT_REQ: {
1760                 kib_msg_t       *txmsg;
1761                 kib_rdma_desc_t *rd;
1762
1763                 if (mlen == 0) {
1764                         lnet_finalize(ni, lntmsg, 0);
1765                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
1766                                                rxmsg->ibm_u.putreq.ibprm_cookie);
1767                         break;
1768                 }
1769
1770                 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
1771                 if (tx == NULL) {
1772                         CERROR("Can't allocate tx for %s\n",
1773                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
1774                         /* Not replying will break the connection */
1775                         rc = -ENOMEM;
1776                         break;
1777                 }
1778
1779                 txmsg = tx->tx_msg;
1780                 rd = &txmsg->ibm_u.putack.ibpam_rd;
1781                 if (kiov == NULL)
1782                         rc = kiblnd_setup_rd_iov(ni, tx, rd,
1783                                                  niov, iov, offset, mlen);
1784                 else
1785                         rc = kiblnd_setup_rd_kiov(ni, tx, rd,
1786                                                   niov, kiov, offset, mlen);
1787                 if (rc != 0) {
1788                         CERROR("Can't setup PUT sink for %s: %d\n",
1789                                libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1790                         kiblnd_tx_done(ni, tx);
1791                         /* tell peer_ni it's over */
1792                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
1793                                                rxmsg->ibm_u.putreq.ibprm_cookie);
1794                         break;
1795                 }
1796
1797                 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
1798                 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1799                 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1800
1801                 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1802
1803                 tx->tx_lntmsg[0] = lntmsg;      /* finalise lntmsg on completion */
1804                 tx->tx_waiting = 1;             /* waiting for PUT_DONE */
1805                 kiblnd_queue_tx(tx, conn);
1806
1807                 /* reposted buffer reserved for PUT_DONE */
1808                 post_credit = IBLND_POSTRX_NO_CREDIT;
1809                 break;
1810                 }
1811
1812         case IBLND_MSG_GET_REQ:
1813                 if (lntmsg != NULL) {
1814                         /* Optimized GET; RDMA lntmsg's payload */
1815                         kiblnd_reply(ni, rx, lntmsg);
1816                 } else {
1817                         /* GET didn't match anything */
1818                         kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1819                                                -ENODATA,
1820                                                rxmsg->ibm_u.get.ibgm_cookie);
1821                 }
1822                 break;
1823         }
1824
1825         kiblnd_post_rx(rx, post_credit);
1826         return rc;
1827 }
1828
1829 int
1830 kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
1831 {
1832         struct task_struct *task = kthread_run(fn, arg, name);
1833
1834         if (IS_ERR(task))
1835                 return PTR_ERR(task);
1836
1837         atomic_inc(&kiblnd_data.kib_nthreads);
1838         return 0;
1839 }
1840
1841 static void
1842 kiblnd_thread_fini (void)
1843 {
1844         atomic_dec (&kiblnd_data.kib_nthreads);
1845 }
1846
1847 static void
1848 kiblnd_peer_alive (kib_peer_ni_t *peer_ni)
1849 {
1850         /* This is racy, but everyone's only writing cfs_time_current() */
1851         peer_ni->ibp_last_alive = cfs_time_current();
1852         smp_mb();
1853 }
1854
1855 static void
1856 kiblnd_peer_notify (kib_peer_ni_t *peer_ni)
1857 {
1858         int           error = 0;
1859         cfs_time_t    last_alive = 0;
1860         unsigned long flags;
1861
1862         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1863
1864         if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
1865                 error = peer_ni->ibp_error;
1866                 peer_ni->ibp_error = 0;
1867
1868                 last_alive = peer_ni->ibp_last_alive;
1869         }
1870
1871         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1872
1873         if (error != 0)
1874                 lnet_notify(peer_ni->ibp_ni,
1875                             peer_ni->ibp_nid, 0, last_alive);
1876 }
1877
1878 void
1879 kiblnd_close_conn_locked (kib_conn_t *conn, int error)
1880 {
1881         /* This just does the immediate housekeeping.  'error' is zero for a
1882          * normal shutdown which can happen only after the connection has been
1883          * established.  If the connection is established, schedule the
1884          * connection to be finished off by the connd.  Otherwise the connd is
1885          * already dealing with it (either to set it up or tear it down).
1886          * Caller holds kib_global_lock exclusively in irq context */
1887         kib_peer_ni_t       *peer_ni = conn->ibc_peer;
1888         kib_dev_t        *dev;
1889         unsigned long     flags;
1890
1891         LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1892
1893         if (error != 0 && conn->ibc_comms_error == 0)
1894                 conn->ibc_comms_error = error;
1895
1896         if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
1897                 return; /* already being handled  */
1898
1899         if (error == 0 &&
1900             list_empty(&conn->ibc_tx_noops) &&
1901             list_empty(&conn->ibc_tx_queue) &&
1902             list_empty(&conn->ibc_tx_queue_rsrvd) &&
1903             list_empty(&conn->ibc_tx_queue_nocred) &&
1904             list_empty(&conn->ibc_active_txs)) {
1905                 CDEBUG(D_NET, "closing conn to %s\n", 
1906                        libcfs_nid2str(peer_ni->ibp_nid));
1907         } else {
1908                 CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
1909                        libcfs_nid2str(peer_ni->ibp_nid), error,
1910                        list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1911                        list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
1912                        list_empty(&conn->ibc_tx_queue_rsrvd) ?
1913                                                 "" : "(sending_rsrvd)",
1914                        list_empty(&conn->ibc_tx_queue_nocred) ?
1915                                                  "" : "(sending_nocred)",
1916                        list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1917         }
1918
1919         dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev;
1920         list_del(&conn->ibc_list);
1921         /* connd (see below) takes over ibc_list's ref */
1922
1923         if (list_empty(&peer_ni->ibp_conns) &&    /* no more conns */
1924             kiblnd_peer_active(peer_ni)) {         /* still in peer_ni table */
1925                 kiblnd_unlink_peer_locked(peer_ni);
1926
1927                 /* set/clear error on last conn */
1928                 peer_ni->ibp_error = conn->ibc_comms_error;
1929         }
1930
1931         kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
1932
1933         if (error != 0 &&
1934             kiblnd_dev_can_failover(dev)) {
1935                 list_add_tail(&dev->ibd_fail_list,
1936                               &kiblnd_data.kib_failed_devs);
1937                 wake_up(&kiblnd_data.kib_failover_waitq);
1938         }
1939
1940         spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
1941
1942         list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
1943         wake_up(&kiblnd_data.kib_connd_waitq);
1944
1945         spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
1946 }
1947
1948 void
1949 kiblnd_close_conn(kib_conn_t *conn, int error)
1950 {
1951         unsigned long flags;
1952
1953         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1954
1955         kiblnd_close_conn_locked(conn, error);
1956
1957         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1958 }
1959
1960 static void
1961 kiblnd_handle_early_rxs(kib_conn_t *conn)
1962 {
1963         unsigned long    flags;
1964         kib_rx_t        *rx;
1965
1966         LASSERT(!in_interrupt());
1967         LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1968
1969         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1970         while (!list_empty(&conn->ibc_early_rxs)) {
1971                 rx = list_entry(conn->ibc_early_rxs.next,
1972                                     kib_rx_t, rx_list);
1973                 list_del(&rx->rx_list);
1974                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1975
1976                 kiblnd_handle_rx(rx);
1977
1978                 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1979         }
1980         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1981 }
1982
1983 static void
1984 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
1985 {
1986         struct list_head         zombies = LIST_HEAD_INIT(zombies);
1987         struct list_head        *tmp;
1988         struct list_head        *nxt;
1989         kib_tx_t                *tx;
1990
1991         spin_lock(&conn->ibc_lock);
1992
1993         list_for_each_safe(tmp, nxt, txs) {
1994                 tx = list_entry(tmp, kib_tx_t, tx_list);
1995
1996                 if (txs == &conn->ibc_active_txs) {
1997                         LASSERT(!tx->tx_queued);
1998                         LASSERT(tx->tx_waiting ||
1999                                 tx->tx_sending != 0);
2000                 } else {
2001                         LASSERT(tx->tx_queued);
2002                 }
2003
2004                 tx->tx_status = -ECONNABORTED;
2005                 tx->tx_waiting = 0;
2006
2007                 if (tx->tx_sending == 0) {
2008                         tx->tx_queued = 0;
2009                         list_del(&tx->tx_list);
2010                         list_add(&tx->tx_list, &zombies);
2011                 }
2012         }
2013
2014         spin_unlock(&conn->ibc_lock);
2015
2016         kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
2017 }
2018
2019 static void
2020 kiblnd_finalise_conn (kib_conn_t *conn)
2021 {
2022         LASSERT (!in_interrupt());
2023         LASSERT (conn->ibc_state > IBLND_CONN_INIT);
2024
2025         kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
2026
2027         /* abort_receives moves QP state to IB_QPS_ERR.  This is only required
2028          * for connections that didn't get as far as being connected, because
2029          * rdma_disconnect() does this for free. */
2030         kiblnd_abort_receives(conn);
2031
2032         /* Complete all tx descs not waiting for sends to complete.
2033          * NB we should be safe from RDMA now that the QP has changed state */
2034
2035         kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
2036         kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
2037         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
2038         kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
2039         kiblnd_abort_txs(conn, &conn->ibc_active_txs);
2040
2041         kiblnd_handle_early_rxs(conn);
2042 }
2043
2044 static void
2045 kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error)
2046 {
2047         struct list_head zombies = LIST_HEAD_INIT(zombies);
2048         unsigned long   flags;
2049
2050         LASSERT (error != 0);
2051         LASSERT (!in_interrupt());
2052
2053         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2054
2055         if (active) {
2056                 LASSERT(peer_ni->ibp_connecting > 0);
2057                 peer_ni->ibp_connecting--;
2058         } else {
2059                 LASSERT (peer_ni->ibp_accepting > 0);
2060                 peer_ni->ibp_accepting--;
2061         }
2062
2063         if (kiblnd_peer_connecting(peer_ni)) {
2064                 /* another connection attempt under way... */
2065                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2066                                         flags);
2067                 return;
2068         }
2069
2070         peer_ni->ibp_reconnected = 0;
2071         if (list_empty(&peer_ni->ibp_conns)) {
2072                 /* Take peer_ni's blocked transmits to complete with error */
2073                 list_add(&zombies, &peer_ni->ibp_tx_queue);
2074                 list_del_init(&peer_ni->ibp_tx_queue);
2075
2076                 if (kiblnd_peer_active(peer_ni))
2077                         kiblnd_unlink_peer_locked(peer_ni);
2078
2079                 peer_ni->ibp_error = error;
2080         } else {
2081                 /* Can't have blocked transmits if there are connections */
2082                 LASSERT(list_empty(&peer_ni->ibp_tx_queue));
2083         }
2084
2085         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2086
2087         kiblnd_peer_notify(peer_ni);
2088
2089         if (list_empty(&zombies))
2090                 return;
2091
2092         CNETERR("Deleting messages for %s: connection failed\n",
2093                 libcfs_nid2str(peer_ni->ibp_nid));
2094
2095         kiblnd_txlist_done(peer_ni->ibp_ni, &zombies, -EHOSTUNREACH);
2096 }
2097
2098 static void
2099 kiblnd_connreq_done(kib_conn_t *conn, int status)
2100 {
2101         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2102         kib_tx_t         *tx;
2103         struct list_head txs;
2104         unsigned long    flags;
2105         int              active;
2106
2107         active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2108
2109         CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2110                libcfs_nid2str(peer_ni->ibp_nid), active,
2111                conn->ibc_version, status);
2112
2113         LASSERT (!in_interrupt());
2114         LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2115                   peer_ni->ibp_connecting > 0) ||
2116                  (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2117                   peer_ni->ibp_accepting > 0));
2118
2119         LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2120         conn->ibc_connvars = NULL;
2121
2122         if (status != 0) {
2123                 /* failed to establish connection */
2124                 kiblnd_peer_connect_failed(peer_ni, active, status);
2125                 kiblnd_finalise_conn(conn);
2126                 return;
2127         }
2128
2129         /* connection established */
2130         write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2131
2132         conn->ibc_last_send = jiffies;
2133         kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2134         kiblnd_peer_alive(peer_ni);
2135
2136         /* Add conn to peer_ni's list and nuke any dangling conns from a different
2137          * peer_ni instance... */
2138         kiblnd_conn_addref(conn);       /* +1 ref for ibc_list */
2139         list_add(&conn->ibc_list, &peer_ni->ibp_conns);
2140         peer_ni->ibp_reconnected = 0;
2141         if (active)
2142                 peer_ni->ibp_connecting--;
2143         else
2144                 peer_ni->ibp_accepting--;
2145
2146         if (peer_ni->ibp_version == 0) {
2147                 peer_ni->ibp_version     = conn->ibc_version;
2148                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2149         }
2150
2151         if (peer_ni->ibp_version     != conn->ibc_version ||
2152             peer_ni->ibp_incarnation != conn->ibc_incarnation) {
2153                 kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version,
2154                                                 conn->ibc_incarnation);
2155                 peer_ni->ibp_version     = conn->ibc_version;
2156                 peer_ni->ibp_incarnation = conn->ibc_incarnation;
2157         }
2158
2159         /* grab pending txs while I have the lock */
2160         list_add(&txs, &peer_ni->ibp_tx_queue);
2161         list_del_init(&peer_ni->ibp_tx_queue);
2162
2163         if (!kiblnd_peer_active(peer_ni) ||        /* peer_ni has been deleted */
2164             conn->ibc_comms_error != 0) {       /* error has happened already */
2165                 struct lnet_ni *ni = peer_ni->ibp_ni;
2166
2167                 /* start to shut down connection */
2168                 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2169                 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2170
2171                 kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
2172
2173                 return;
2174         }
2175
2176         /* +1 ref for myself, this connection is visible to other threads
2177          * now, refcount of peer:ibp_conns can be released by connection
2178          * close from either a different thread, or the calling of
2179          * kiblnd_check_sends_locked() below. See bz21911 for details.
2180          */
2181         kiblnd_conn_addref(conn);
2182         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2183
2184         /* Schedule blocked txs */
2185         spin_lock(&conn->ibc_lock);
2186         while (!list_empty(&txs)) {
2187                 tx = list_entry(txs.next, kib_tx_t, tx_list);
2188                 list_del(&tx->tx_list);
2189
2190                 kiblnd_queue_tx_locked(tx, conn);
2191         }
2192         kiblnd_check_sends_locked(conn);
2193         spin_unlock(&conn->ibc_lock);
2194
2195         /* schedule blocked rxs */
2196         kiblnd_handle_early_rxs(conn);
2197         kiblnd_conn_decref(conn);
2198 }
2199
2200 static void
2201 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
2202 {
2203         int          rc;
2204
2205         rc = rdma_reject(cmid, rej, sizeof(*rej));
2206
2207         if (rc != 0)
2208                 CWARN("Error %d sending reject\n", rc);
2209 }
2210
2211 static int
2212 kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
2213 {
2214         rwlock_t                *g_lock = &kiblnd_data.kib_global_lock;
2215         kib_msg_t             *reqmsg = priv;
2216         kib_msg_t             *ackmsg;
2217         kib_dev_t             *ibdev;
2218         kib_peer_ni_t            *peer_ni;
2219         kib_peer_ni_t            *peer2;
2220         kib_conn_t            *conn;
2221         struct lnet_ni             *ni  = NULL;
2222         kib_net_t             *net = NULL;
2223         lnet_nid_t             nid;
2224         struct rdma_conn_param cp;
2225         kib_rej_t              rej;
2226         int                    version = IBLND_MSG_VERSION;
2227         unsigned long          flags;
2228         int                    rc;
2229         struct sockaddr_in    *peer_addr;
2230         LASSERT (!in_interrupt());
2231
2232         /* cmid inherits 'context' from the corresponding listener id */
2233         ibdev = (kib_dev_t *)cmid->context;
2234         LASSERT (ibdev != NULL);
2235
2236         memset(&rej, 0, sizeof(rej));
2237         rej.ibr_magic                = IBLND_MSG_MAGIC;
2238         rej.ibr_why                  = IBLND_REJECT_FATAL;
2239         rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2240
2241         peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
2242         if (*kiblnd_tunables.kib_require_priv_port &&
2243             ntohs(peer_addr->sin_port) >= PROT_SOCK) {
2244                 __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
2245                 CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
2246                        &ip, ntohs(peer_addr->sin_port));
2247                 goto failed;
2248         }
2249
2250         if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
2251                 CERROR("Short connection request\n");
2252                 goto failed;
2253         }
2254
2255         /* Future protocol version compatibility support!  If the
2256          * o2iblnd-specific protocol changes, or when LNET unifies
2257          * protocols over all LNDs, the initial connection will
2258          * negotiate a protocol version.  I trap this here to avoid
2259          * console errors; the reject tells the peer_ni which protocol I
2260          * speak. */
2261         if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2262             reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2263                 goto failed;
2264         if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2265             reqmsg->ibm_version != IBLND_MSG_VERSION &&
2266             reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2267                 goto failed;
2268         if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2269             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2270             reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2271                 goto failed;
2272
2273         rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2274         if (rc != 0) {
2275                 CERROR("Can't parse connection request: %d\n", rc);
2276                 goto failed;
2277         }
2278
2279         nid = reqmsg->ibm_srcnid;
2280         ni  = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
2281
2282         if (ni != NULL) {
2283                 net = (kib_net_t *)ni->ni_data;
2284                 rej.ibr_incarnation = net->ibn_incarnation;
2285         }
2286
2287         if (ni == NULL ||                         /* no matching net */
2288             ni->ni_nid != reqmsg->ibm_dstnid ||   /* right NET, wrong NID! */
2289             net->ibn_dev != ibdev) {              /* wrong device */
2290                 CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
2291                        "bad dst nid %s\n", libcfs_nid2str(nid),
2292                        ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
2293                        ibdev->ibd_ifname, ibdev->ibd_nnets,
2294                         &ibdev->ibd_ifip,
2295                        libcfs_nid2str(reqmsg->ibm_dstnid));
2296
2297                 goto failed;
2298         }
2299
2300        /* check time stamp as soon as possible */
2301         if (reqmsg->ibm_dststamp != 0 &&
2302             reqmsg->ibm_dststamp != net->ibn_incarnation) {
2303                 CWARN("Stale connection request\n");
2304                 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2305                 goto failed;
2306         }
2307
2308         /* I can accept peer_ni's version */
2309         version = reqmsg->ibm_version;
2310
2311         if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2312                 CERROR("Unexpected connreq msg type: %x from %s\n",
2313                        reqmsg->ibm_type, libcfs_nid2str(nid));
2314                 goto failed;
2315         }
2316
2317         if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
2318             kiblnd_msg_queue_size(version, ni)) {
2319                 CERROR("Can't accept conn from %s, queue depth too large: "
2320                        " %d (<=%d wanted)\n",
2321                        libcfs_nid2str(nid),
2322                        reqmsg->ibm_u.connparams.ibcp_queue_depth,
2323                        kiblnd_msg_queue_size(version, ni));
2324
2325                 if (version == IBLND_MSG_VERSION)
2326                         rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2327
2328                 goto failed;
2329         }
2330
2331         if (reqmsg->ibm_u.connparams.ibcp_max_frags >
2332             kiblnd_rdma_frags(version, ni)) {
2333                 CWARN("Can't accept conn from %s (version %x): "
2334                       "max_frags %d too large (%d wanted)\n",
2335                       libcfs_nid2str(nid), version,
2336                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2337                       kiblnd_rdma_frags(version, ni));
2338
2339                 if (version >= IBLND_MSG_VERSION)
2340                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2341
2342                 goto failed;
2343         } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
2344                    kiblnd_rdma_frags(version, ni) &&
2345                    net->ibn_fmr_ps == NULL) {
2346                 CWARN("Can't accept conn from %s (version %x): "
2347                       "max_frags %d incompatible without FMR pool "
2348                       "(%d wanted)\n",
2349                       libcfs_nid2str(nid), version,
2350                       reqmsg->ibm_u.connparams.ibcp_max_frags,
2351                       kiblnd_rdma_frags(version, ni));
2352
2353                 if (version == IBLND_MSG_VERSION)
2354                         rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2355
2356                 goto failed;
2357         }
2358
2359         if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2360                 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2361                        libcfs_nid2str(nid),
2362                        reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2363                        IBLND_MSG_SIZE);
2364                 goto failed;
2365         }
2366
2367         /* assume 'nid' is a new peer_ni; create  */
2368         rc = kiblnd_create_peer(ni, &peer_ni, nid);
2369         if (rc != 0) {
2370                 CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid));
2371                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2372                 goto failed;
2373         }
2374
2375         /* We have validated the peer's parameters so use those */
2376         peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
2377         peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
2378
2379         write_lock_irqsave(g_lock, flags);
2380
2381         peer2 = kiblnd_find_peer_locked(ni, nid);
2382         if (peer2 != NULL) {
2383                 if (peer2->ibp_version == 0) {
2384                         peer2->ibp_version     = version;
2385                         peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2386                 }
2387
2388                 /* not the guy I've talked with */
2389                 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2390                     peer2->ibp_version     != version) {
2391                         kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2392
2393                         if (kiblnd_peer_active(peer2)) {
2394                                 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2395                                 peer2->ibp_version = version;
2396                         }
2397                         write_unlock_irqrestore(g_lock, flags);
2398
2399                         CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
2400                               libcfs_nid2str(nid), peer2->ibp_version, version,
2401                               peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
2402
2403                         kiblnd_peer_decref(peer_ni);
2404                         rej.ibr_why = IBLND_REJECT_CONN_STALE;
2405                         goto failed;
2406                 }
2407
2408                 /* Tie-break connection race in favour of the higher NID.
2409                  * If we keep running into a race condition multiple times,
2410                  * we have to assume that the connection attempt with the
2411                  * higher NID is stuck in a connecting state and will never
2412                  * recover.  As such, we pass through this if-block and let
2413                  * the lower NID connection win so we can move forward.
2414                  */
2415                 if (peer2->ibp_connecting != 0 &&
2416                     nid < ni->ni_nid && peer2->ibp_races <
2417                     MAX_CONN_RACES_BEFORE_ABORT) {
2418                         peer2->ibp_races++;
2419                         write_unlock_irqrestore(g_lock, flags);
2420
2421                         CDEBUG(D_NET, "Conn race %s\n",
2422                                libcfs_nid2str(peer2->ibp_nid));
2423
2424                         kiblnd_peer_decref(peer_ni);
2425                         rej.ibr_why = IBLND_REJECT_CONN_RACE;
2426                         goto failed;
2427                 }
2428                 if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
2429                         CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
2430                                 libcfs_nid2str(peer2->ibp_nid),
2431                                 MAX_CONN_RACES_BEFORE_ABORT);
2432                 /*
2433                  * passive connection is allowed even this peer_ni is waiting for
2434                  * reconnection.
2435                  */
2436                 peer2->ibp_reconnecting = 0;
2437                 peer2->ibp_races = 0;
2438                 peer2->ibp_accepting++;
2439                 kiblnd_peer_addref(peer2);
2440
2441                 /* Race with kiblnd_launch_tx (active connect) to create peer_ni
2442                  * so copy validated parameters since we now know what the
2443                  * peer_ni's limits are */
2444                 peer2->ibp_max_frags = peer_ni->ibp_max_frags;
2445                 peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
2446
2447                 write_unlock_irqrestore(g_lock, flags);
2448                 kiblnd_peer_decref(peer_ni);
2449                 peer_ni = peer2;
2450         } else {
2451                 /* Brand new peer_ni */
2452                 LASSERT (peer_ni->ibp_accepting == 0);
2453                 LASSERT (peer_ni->ibp_version == 0 &&
2454                          peer_ni->ibp_incarnation == 0);
2455
2456                 peer_ni->ibp_accepting   = 1;
2457                 peer_ni->ibp_version     = version;
2458                 peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
2459
2460                 /* I have a ref on ni that prevents it being shutdown */
2461                 LASSERT (net->ibn_shutdown == 0);
2462
2463                 kiblnd_peer_addref(peer_ni);
2464                 list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
2465
2466                 write_unlock_irqrestore(g_lock, flags);
2467         }
2468
2469         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2470         if (conn == NULL) {
2471                 kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
2472                 kiblnd_peer_decref(peer_ni);
2473                 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2474                 goto failed;
2475         }
2476
2477         /* conn now "owns" cmid, so I return success from here on to ensure the
2478          * CM callback doesn't destroy cmid. */
2479         conn->ibc_incarnation      = reqmsg->ibm_srcstamp;
2480         conn->ibc_credits          = conn->ibc_queue_depth;
2481         conn->ibc_reserved_credits = conn->ibc_queue_depth;
2482         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2483                 IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
2484
2485         ackmsg = &conn->ibc_connvars->cv_msg;
2486         memset(ackmsg, 0, sizeof(*ackmsg));
2487
2488         kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2489                         sizeof(ackmsg->ibm_u.connparams));
2490         ackmsg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2491         ackmsg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2492         ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2493
2494         kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2495
2496         memset(&cp, 0, sizeof(cp));
2497         cp.private_data        = ackmsg;
2498         cp.private_data_len    = ackmsg->ibm_nob;
2499         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2500         cp.initiator_depth     = 0;
2501         cp.flow_control        = 1;
2502         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2503         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2504
2505         CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2506
2507         rc = rdma_accept(cmid, &cp);
2508         if (rc != 0) {
2509                 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2510                 rej.ibr_version = version;
2511                 rej.ibr_why     = IBLND_REJECT_FATAL;
2512
2513                 kiblnd_reject(cmid, &rej);
2514                 kiblnd_connreq_done(conn, rc);
2515                 kiblnd_conn_decref(conn);
2516         }
2517
2518         lnet_ni_decref(ni);
2519         return 0;
2520
2521  failed:
2522         if (ni != NULL) {
2523                 rej.ibr_cp.ibcp_queue_depth =
2524                         kiblnd_msg_queue_size(version, ni);
2525                 rej.ibr_cp.ibcp_max_frags   = kiblnd_rdma_frags(version, ni);
2526                 lnet_ni_decref(ni);
2527         }
2528
2529         rej.ibr_version = version;
2530         kiblnd_reject(cmid, &rej);
2531
2532         return -ECONNREFUSED;
2533 }
2534
2535 static void
2536 kiblnd_check_reconnect(kib_conn_t *conn, int version,
2537                        __u64 incarnation, int why, kib_connparams_t *cp)
2538 {
2539         rwlock_t        *glock = &kiblnd_data.kib_global_lock;
2540         kib_peer_ni_t   *peer_ni = conn->ibc_peer;
2541         char            *reason;
2542         int              msg_size = IBLND_MSG_SIZE;
2543         int              frag_num = -1;
2544         int              queue_dep = -1;
2545         bool             reconnect;
2546         unsigned long    flags;
2547
2548         LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2549         LASSERT(peer_ni->ibp_connecting > 0);   /* 'conn' at least */
2550         LASSERT(!peer_ni->ibp_reconnecting);
2551
2552         if (cp) {
2553                 msg_size        = cp->ibcp_max_msg_size;
2554                 frag_num        = cp->ibcp_max_frags;
2555                 queue_dep       = cp->ibcp_queue_depth;
2556         }
2557
2558         write_lock_irqsave(glock, flags);
2559         /* retry connection if it's still needed and no other connection
2560          * attempts (active or passive) are in progress
2561          * NB: reconnect is still needed even when ibp_tx_queue is
2562          * empty if ibp_version != version because reconnect may be
2563          * initiated by kiblnd_query() */
2564         reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
2565                      peer_ni->ibp_version != version) &&
2566                     peer_ni->ibp_connecting == 1 &&
2567                     peer_ni->ibp_accepting == 0;
2568         if (!reconnect) {
2569                 reason = "no need";
2570                 goto out;
2571         }
2572
2573         switch (why) {
2574         default:
2575                 reason = "Unknown";
2576                 break;
2577
2578         case IBLND_REJECT_RDMA_FRAGS: {
2579                 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
2580
2581                 if (!cp) {
2582                         reason = "can't negotiate max frags";
2583                         goto out;
2584                 }
2585                 tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
2586                 if (!tunables->lnd_map_on_demand) {
2587                         reason = "map_on_demand must be enabled";
2588                         goto out;
2589                 }
2590                 if (conn->ibc_max_frags <= frag_num) {
2591                         reason = "unsupported max frags";
2592                         goto out;
2593                 }
2594
2595                 peer_ni->ibp_max_frags = frag_num;
2596                 reason = "rdma fragments";
2597                 break;
2598         }
2599         case IBLND_REJECT_MSG_QUEUE_SIZE:
2600                 if (!cp) {
2601                         reason = "can't negotiate queue depth";
2602                         goto out;
2603                 }
2604                 if (conn->ibc_queue_depth <= queue_dep) {
2605                         reason = "unsupported queue depth";
2606                         goto out;
2607                 }
2608
2609                 peer_ni->ibp_queue_depth = queue_dep;
2610                 reason = "queue depth";
2611                 break;
2612
2613         case IBLND_REJECT_CONN_STALE:
2614                 reason = "stale";
2615                 break;
2616
2617         case IBLND_REJECT_CONN_RACE:
2618                 reason = "conn race";
2619                 break;
2620
2621         case IBLND_REJECT_CONN_UNCOMPAT:
2622                 reason = "version negotiation";
2623                 break;
2624
2625         case IBLND_REJECT_INVALID_SRV_ID:
2626                 reason = "invalid service id";
2627                 break;
2628         }
2629
2630         conn->ibc_reconnect = 1;
2631         peer_ni->ibp_reconnecting = 1;
2632         peer_ni->ibp_version = version;
2633         if (incarnation != 0)
2634                 peer_ni->ibp_incarnation = incarnation;
2635  out:
2636         write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2637
2638         CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
2639                 libcfs_nid2str(peer_ni->ibp_nid),
2640                 reconnect ? "reconnect" : "don't reconnect",
2641                 reason, IBLND_MSG_VERSION, version, msg_size,
2642                 conn->ibc_queue_depth, queue_dep,
2643                 conn->ibc_max_frags, frag_num);
2644         /*
2645          * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni
2646          * while destroying the zombie
2647          */
2648 }
2649
2650 static void
2651 kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
2652 {
2653         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2654
2655         LASSERT (!in_interrupt());
2656         LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2657
2658         switch (reason) {
2659         case IB_CM_REJ_STALE_CONN:
2660                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2661                                        IBLND_REJECT_CONN_STALE, NULL);
2662                 break;
2663
2664         case IB_CM_REJ_INVALID_SERVICE_ID:
2665                 kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
2666                                        IBLND_REJECT_INVALID_SRV_ID, NULL);
2667                 CNETERR("%s rejected: no listener at %d\n",
2668                         libcfs_nid2str(peer_ni->ibp_nid),
2669                         *kiblnd_tunables.kib_service);
2670                 break;
2671
2672         case IB_CM_REJ_CONSUMER_DEFINED:
2673                 if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
2674                         kib_rej_t        *rej         = priv;
2675                         kib_connparams_t *cp          = NULL;
2676                         int               flip        = 0;
2677                         __u64             incarnation = -1;
2678
2679                         /* NB. default incarnation is -1 because:
2680                          * a) V1 will ignore dst incarnation in connreq.
2681                          * b) V2 will provide incarnation while rejecting me,
2682                          *    -1 will be overwrote.
2683                          *
2684                          * if I try to connect to a V1 peer_ni with V2 protocol,
2685                          * it rejected me then upgrade to V2, I have no idea
2686                          * about the upgrading and try to reconnect with V1,
2687                          * in this case upgraded V2 can find out I'm trying to
2688                          * talk to the old guy and reject me(incarnation is -1). 
2689                          */
2690
2691                         if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2692                             rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2693                                 __swab32s(&rej->ibr_magic);
2694                                 __swab16s(&rej->ibr_version);
2695                                 flip = 1;
2696                         }
2697
2698                         if (priv_nob >= sizeof(kib_rej_t) &&
2699                             rej->ibr_version > IBLND_MSG_VERSION_1) {
2700                                 /* priv_nob is always 148 in current version
2701                                  * of OFED, so we still need to check version.
2702                                  * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2703                                 cp = &rej->ibr_cp;
2704
2705                                 if (flip) {
2706                                         __swab64s(&rej->ibr_incarnation);
2707                                         __swab16s(&cp->ibcp_queue_depth);
2708                                         __swab16s(&cp->ibcp_max_frags);
2709                                         __swab32s(&cp->ibcp_max_msg_size);
2710                                 }
2711
2712                                 incarnation = rej->ibr_incarnation;
2713                         }
2714
2715                         if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2716                             rej->ibr_magic != LNET_PROTO_MAGIC) {
2717                                 CERROR("%s rejected: consumer defined fatal error\n",
2718                                        libcfs_nid2str(peer_ni->ibp_nid));
2719                                 break;
2720                         }
2721
2722                         if (rej->ibr_version != IBLND_MSG_VERSION &&
2723                             rej->ibr_version != IBLND_MSG_VERSION_1) {
2724                                 CERROR("%s rejected: o2iblnd version %x error\n",
2725                                        libcfs_nid2str(peer_ni->ibp_nid),
2726                                        rej->ibr_version);
2727                                 break;
2728                         }
2729
2730                         if (rej->ibr_why     == IBLND_REJECT_FATAL &&
2731                             rej->ibr_version == IBLND_MSG_VERSION_1) {
2732                                 CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
2733                                        libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
2734
2735                                 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2736                                         rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2737                         }
2738
2739                         switch (rej->ibr_why) {
2740                         case IBLND_REJECT_CONN_RACE:
2741                         case IBLND_REJECT_CONN_STALE:
2742                         case IBLND_REJECT_CONN_UNCOMPAT:
2743                         case IBLND_REJECT_MSG_QUEUE_SIZE:
2744                         case IBLND_REJECT_RDMA_FRAGS:
2745                                 kiblnd_check_reconnect(conn, rej->ibr_version,
2746                                                 incarnation, rej->ibr_why, cp);
2747                                 break;
2748
2749                         case IBLND_REJECT_NO_RESOURCES:
2750                                 CERROR("%s rejected: o2iblnd no resources\n",
2751                                        libcfs_nid2str(peer_ni->ibp_nid));
2752                                 break;
2753
2754                         case IBLND_REJECT_FATAL:
2755                                 CERROR("%s rejected: o2iblnd fatal error\n",
2756                                        libcfs_nid2str(peer_ni->ibp_nid));
2757                                 break;
2758
2759                         default:
2760                                 CERROR("%s rejected: o2iblnd reason %d\n",
2761                                        libcfs_nid2str(peer_ni->ibp_nid),
2762                                        rej->ibr_why);
2763                                 break;
2764                         }
2765                         break;
2766                 }
2767                 /* fall through */
2768         default:
2769                 CNETERR("%s rejected: reason %d, size %d\n",
2770                         libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
2771                 break;
2772         }
2773
2774         kiblnd_connreq_done(conn, -ECONNREFUSED);
2775 }
2776
2777 static void
2778 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
2779 {
2780         kib_peer_ni_t    *peer_ni = conn->ibc_peer;
2781         struct lnet_ni *ni   = peer_ni->ibp_ni;
2782         kib_net_t     *net  = ni->ni_data;
2783         kib_msg_t     *msg  = priv;
2784         int            ver  = conn->ibc_version;
2785         int            rc   = kiblnd_unpack_msg(msg, priv_nob);
2786         unsigned long  flags;
2787
2788         LASSERT (net != NULL);
2789
2790         if (rc != 0) {
2791                 CERROR("Can't unpack connack from %s: %d\n",
2792                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2793                 goto failed;
2794         }
2795
2796         if (msg->ibm_type != IBLND_MSG_CONNACK) {
2797                 CERROR("Unexpected message %d from %s\n",
2798                        msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid));
2799                 rc = -EPROTO;
2800                 goto failed;
2801         }
2802
2803         if (ver != msg->ibm_version) {
2804                 CERROR("%s replied version %x is different with "
2805                        "requested version %x\n",
2806                        libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver);
2807                 rc = -EPROTO;
2808                 goto failed;
2809         }
2810
2811         if (msg->ibm_u.connparams.ibcp_queue_depth >
2812             conn->ibc_queue_depth) {
2813                 CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
2814                        libcfs_nid2str(peer_ni->ibp_nid),
2815                        msg->ibm_u.connparams.ibcp_queue_depth,
2816                        conn->ibc_queue_depth);
2817                 rc = -EPROTO;
2818                 goto failed;
2819         }
2820
2821         if (msg->ibm_u.connparams.ibcp_max_frags >
2822             conn->ibc_max_frags) {
2823                 CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
2824                        libcfs_nid2str(peer_ni->ibp_nid),
2825                        msg->ibm_u.connparams.ibcp_max_frags,
2826                        conn->ibc_max_frags);
2827                 rc = -EPROTO;
2828                 goto failed;
2829         }
2830
2831         if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2832                 CERROR("%s max message size %d too big (%d max)\n",
2833                        libcfs_nid2str(peer_ni->ibp_nid),
2834                        msg->ibm_u.connparams.ibcp_max_msg_size,
2835                        IBLND_MSG_SIZE);
2836                 rc = -EPROTO;
2837                 goto failed;
2838         }
2839
2840         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2841         if (msg->ibm_dstnid == ni->ni_nid &&
2842             msg->ibm_dststamp == net->ibn_incarnation)
2843                 rc = 0;
2844         else
2845                 rc = -ESTALE;
2846         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2847
2848         if (rc != 0) {
2849                 CERROR("Bad connection reply from %s, rc = %d, "
2850                        "version: %x max_frags: %d\n",
2851                        libcfs_nid2str(peer_ni->ibp_nid), rc,
2852                        msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
2853                 goto failed;
2854         }
2855
2856         conn->ibc_incarnation      = msg->ibm_srcstamp;
2857         conn->ibc_credits          = msg->ibm_u.connparams.ibcp_queue_depth;
2858         conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
2859         conn->ibc_queue_depth      = msg->ibm_u.connparams.ibcp_queue_depth;
2860         conn->ibc_max_frags        = msg->ibm_u.connparams.ibcp_max_frags;
2861         LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
2862                 IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
2863
2864         kiblnd_connreq_done(conn, 0);
2865         return;
2866
2867  failed:
2868         /* NB My QP has already established itself, so I handle anything going
2869          * wrong here by setting ibc_comms_error.
2870          * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2871          * immediately tears it down. */
2872
2873         LASSERT (rc != 0);
2874         conn->ibc_comms_error = rc;
2875         kiblnd_connreq_done(conn, 0);
2876 }
2877
2878 static int
2879 kiblnd_active_connect (struct rdma_cm_id *cmid)
2880 {
2881         kib_peer_ni_t              *peer_ni = (kib_peer_ni_t *)cmid->context;
2882         kib_conn_t              *conn;
2883         kib_msg_t               *msg;
2884         struct rdma_conn_param   cp;
2885         int                      version;
2886         __u64                    incarnation;
2887         unsigned long            flags;
2888         int                      rc;
2889
2890         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2891
2892         incarnation = peer_ni->ibp_incarnation;
2893         version     = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION :
2894                                                  peer_ni->ibp_version;
2895
2896         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2897
2898         conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT,
2899                                   version);
2900         if (conn == NULL) {
2901                 kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM);
2902                 kiblnd_peer_decref(peer_ni); /* lose cmid's ref */
2903                 return -ENOMEM;
2904         }
2905
2906         /* conn "owns" cmid now, so I return success from here on to ensure the
2907          * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2908          * on peer_ni */
2909
2910         msg = &conn->ibc_connvars->cv_msg;
2911
2912         memset(msg, 0, sizeof(*msg));
2913         kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
2914         msg->ibm_u.connparams.ibcp_queue_depth  = conn->ibc_queue_depth;
2915         msg->ibm_u.connparams.ibcp_max_frags    = conn->ibc_max_frags;
2916         msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2917
2918         kiblnd_pack_msg(peer_ni->ibp_ni, msg, version,
2919                         0, peer_ni->ibp_nid, incarnation);
2920
2921         memset(&cp, 0, sizeof(cp));
2922         cp.private_data        = msg;
2923         cp.private_data_len    = msg->ibm_nob;
2924         cp.responder_resources = 0;             /* No atomic ops or RDMA reads */
2925         cp.initiator_depth     = 0;
2926         cp.flow_control        = 1;
2927         cp.retry_count         = *kiblnd_tunables.kib_retry_count;
2928         cp.rnr_retry_count     = *kiblnd_tunables.kib_rnr_retry_count;
2929
2930         LASSERT(cmid->context == (void *)conn);
2931         LASSERT(conn->ibc_cmid == cmid);
2932
2933         rc = rdma_connect(cmid, &cp);
2934         if (rc != 0) {
2935                 CERROR("Can't connect to %s: %d\n",
2936                        libcfs_nid2str(peer_ni->ibp_nid), rc);
2937                 kiblnd_connreq_done(conn, rc);
2938                 kiblnd_conn_decref(conn);
2939         }
2940
2941         return 0;
2942 }
2943
2944 int
2945 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2946 {
2947         kib_peer_ni_t  *peer_ni;
2948         kib_conn_t  *conn;
2949         int          rc;
2950
2951         switch (event->event) {
2952         default:
2953                 CERROR("Unexpected event: %d, status: %d\n",
2954                        event->event, event->status);
2955                 LBUG();
2956
2957         case RDMA_CM_EVENT_CONNECT_REQUEST:
2958                 /* destroy cmid on failure */
2959                 rc = kiblnd_passive_connect(cmid, 
2960                                             (void *)KIBLND_CONN_PARAM(event),
2961                                             KIBLND_CONN_PARAM_LEN(event));
2962                 CDEBUG(D_NET, "connreq: %d\n", rc);
2963                 return rc;
2964                 
2965         case RDMA_CM_EVENT_ADDR_ERROR:
2966                 peer_ni = (kib_peer_ni_t *)cmid->context;
2967                 CNETERR("%s: ADDR ERROR %d\n",
2968                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
2969                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
2970                 kiblnd_peer_decref(peer_ni);
2971                 return -EHOSTUNREACH;      /* rc != 0 destroys cmid */
2972
2973         case RDMA_CM_EVENT_ADDR_RESOLVED:
2974                 peer_ni = (kib_peer_ni_t *)cmid->context;
2975
2976                 CDEBUG(D_NET,"%s Addr resolved: %d\n",
2977                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
2978
2979                 if (event->status != 0) {
2980                         CNETERR("Can't resolve address for %s: %d\n",
2981                                 libcfs_nid2str(peer_ni->ibp_nid), event->status);
2982                         rc = event->status;
2983                 } else {
2984                         rc = rdma_resolve_route(
2985                                 cmid, *kiblnd_tunables.kib_timeout * 1000);
2986                         if (rc == 0)
2987                                 return 0;
2988                         /* Can't initiate route resolution */
2989                         CERROR("Can't resolve route for %s: %d\n",
2990                                libcfs_nid2str(peer_ni->ibp_nid), rc);
2991                 }
2992                 kiblnd_peer_connect_failed(peer_ni, 1, rc);
2993                 kiblnd_peer_decref(peer_ni);
2994                 return rc;                      /* rc != 0 destroys cmid */
2995
2996         case RDMA_CM_EVENT_ROUTE_ERROR:
2997                 peer_ni = (kib_peer_ni_t *)cmid->context;
2998                 CNETERR("%s: ROUTE ERROR %d\n",
2999                         libcfs_nid2str(peer_ni->ibp_nid), event->status);
3000                 kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
3001                 kiblnd_peer_decref(peer_ni);
3002                 return -EHOSTUNREACH;           /* rc != 0 destroys cmid */
3003
3004         case RDMA_CM_EVENT_ROUTE_RESOLVED:
3005                 peer_ni = (kib_peer_ni_t *)cmid->context;
3006                 CDEBUG(D_NET,"%s Route resolved: %d\n",
3007                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3008
3009                 if (event->status == 0)
3010                         return kiblnd_active_connect(cmid);
3011
3012                 CNETERR("Can't resolve route for %s: %d\n",
3013                        libcfs_nid2str(peer_ni->ibp_nid), event->status);
3014                 kiblnd_peer_connect_failed(peer_ni, 1, event->status);
3015                 kiblnd_peer_decref(peer_ni);
3016                 return event->status;           /* rc != 0 destroys cmid */
3017                 
3018         case RDMA_CM_EVENT_UNREACHABLE:
3019                 conn = (kib_conn_t *)cmid->context;
3020                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3021                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3022                 CNETERR("%s: UNREACHABLE %d\n",
3023                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3024                 kiblnd_connreq_done(conn, -ENETDOWN);
3025                 kiblnd_conn_decref(conn);
3026                 return 0;
3027
3028         case RDMA_CM_EVENT_CONNECT_ERROR:
3029                 conn = (kib_conn_t *)cmid->context;
3030                 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
3031                         conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
3032                 CNETERR("%s: CONNECT ERROR %d\n",
3033                         libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
3034                 kiblnd_connreq_done(conn, -ENOTCONN);
3035                 kiblnd_conn_decref(conn);
3036                 return 0;
3037
3038         case RDMA_CM_EVENT_REJECTED:
3039                 conn = (kib_conn_t *)cmid->context;
3040                 switch (conn->ibc_state) {
3041                 default:
3042                         LBUG();
3043
3044                 case IBLND_CONN_PASSIVE_WAIT:
3045                         CERROR ("%s: REJECTED %d\n",
3046                                 libcfs_nid2str(conn->ibc_peer->ibp_nid),
3047                                 event->status);
3048                         kiblnd_connreq_done(conn, -ECONNRESET);
3049                         break;
3050
3051                 case IBLND_CONN_ACTIVE_CONNECT:
3052                         kiblnd_rejected(conn, event->status,
3053                                         (void *)KIBLND_CONN_PARAM(event),
3054                                         KIBLND_CONN_PARAM_LEN(event));
3055                         break;
3056                 }
3057                 kiblnd_conn_decref(conn);
3058                 return 0;
3059
3060         case RDMA_CM_EVENT_ESTABLISHED:
3061                 conn = (kib_conn_t *)cmid->context;
3062                 switch (conn->ibc_state) {
3063                 default:
3064                         LBUG();
3065
3066                 case IBLND_CONN_PASSIVE_WAIT:
3067                         CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
3068                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3069                         kiblnd_connreq_done(conn, 0);
3070                         break;
3071
3072                 case IBLND_CONN_ACTIVE_CONNECT:
3073                         CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
3074                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3075                         kiblnd_check_connreply(conn,
3076                                                (void *)KIBLND_CONN_PARAM(event),
3077                                                KIBLND_CONN_PARAM_LEN(event));
3078                         break;
3079                 }
3080                 /* net keeps its ref on conn! */
3081                 return 0;
3082
3083         case RDMA_CM_EVENT_TIMEWAIT_EXIT:
3084                 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
3085                 return 0;
3086
3087         case RDMA_CM_EVENT_DISCONNECTED:
3088                 conn = (kib_conn_t *)cmid->context;
3089                 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
3090                         CERROR("%s DISCONNECTED\n",
3091                                libcfs_nid2str(conn->ibc_peer->ibp_nid));
3092                         kiblnd_connreq_done(conn, -ECONNRESET);
3093                 } else {
3094                         kiblnd_close_conn(conn, 0);
3095                 }
3096                 kiblnd_conn_decref(conn);
3097                 cmid->context = NULL;
3098                 return 0;
3099
3100         case RDMA_CM_EVENT_DEVICE_REMOVAL:
3101                 LCONSOLE_ERROR_MSG(0x131,
3102                                    "Received notification of device removal\n"
3103                                    "Please shutdown LNET to allow this to proceed\n");
3104                 /* Can't remove network from underneath LNET for now, so I have
3105                  * to ignore this */
3106                 return 0;
3107
3108         case RDMA_CM_EVENT_ADDR_CHANGE:
3109                 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
3110                 return 0;
3111         }
3112 }
3113
3114 static int
3115 kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
3116 {
3117         kib_tx_t         *tx;
3118         struct list_head *ttmp;
3119
3120         list_for_each(ttmp, txs) {
3121                 tx = list_entry(ttmp, kib_tx_t, tx_list);
3122
3123                 if (txs != &conn->ibc_active_txs) {
3124                         LASSERT(tx->tx_queued);
3125                 } else {
3126                         LASSERT(!tx->tx_queued);
3127                         LASSERT(tx->tx_waiting || tx->tx_sending != 0);
3128                 }
3129
3130                 if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
3131                         CERROR("Timed out tx: %s, %lu seconds\n",
3132                                kiblnd_queue2str(conn, txs),
3133                                cfs_duration_sec(jiffies - tx->tx_deadline));
3134                         return 1;
3135                 }
3136         }
3137
3138         return 0;
3139 }
3140
3141 static int
3142 kiblnd_conn_timed_out_locked(kib_conn_t *conn)
3143 {
3144         return  kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
3145                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
3146                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
3147                 kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
3148                 kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
3149 }
3150
3151 static void
3152 kiblnd_check_conns (int idx)
3153 {
3154         struct list_head  closes = LIST_HEAD_INIT(closes);
3155         struct list_head  checksends = LIST_HEAD_INIT(checksends);
3156         struct list_head *peers = &kiblnd_data.kib_peers[idx];
3157         struct list_head *ptmp;
3158         kib_peer_ni_t    *peer_ni;
3159         kib_conn_t       *conn;
3160         struct list_head *ctmp;
3161         unsigned long     flags;
3162
3163         /* NB. We expect to have a look at all the peers and not find any
3164          * RDMAs to time out, so we just use a shared lock while we
3165          * take a look... */
3166         read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
3167
3168         list_for_each(ptmp, peers) {
3169                 peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
3170
3171                 list_for_each(ctmp, &peer_ni->ibp_conns) {
3172                         int timedout;
3173                         int sendnoop;
3174
3175                         conn = list_entry(ctmp, kib_conn_t, ibc_list);
3176
3177                         LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
3178
3179                         spin_lock(&conn->ibc_lock);
3180
3181                         sendnoop = kiblnd_need_noop(conn);
3182                         timedout = kiblnd_conn_timed_out_locked(conn);
3183                         if (!sendnoop && !timedout) {
3184                                 spin_unlock(&conn->ibc_lock);
3185                                 continue;
3186                         }
3187
3188                         if (timedout) {
3189                                 CERROR("Timed out RDMA with %s (%lu): "
3190                                        "c: %u, oc: %u, rc: %u\n",
3191                                        libcfs_nid2str(peer_ni->ibp_nid),
3192                                        cfs_duration_sec(cfs_time_current() -
3193                                                         peer_ni->ibp_last_alive),
3194                                        conn->ibc_credits,
3195                                        conn->ibc_outstanding_credits,
3196                                        conn->ibc_reserved_credits);
3197                                 list_add(&conn->ibc_connd_list, &closes);
3198                         } else {
3199                                 list_add(&conn->ibc_connd_list, &checksends);
3200                         }
3201                         /* +ref for 'closes' or 'checksends' */
3202                         kiblnd_conn_addref(conn);
3203
3204                         spin_unlock(&conn->ibc_lock);
3205                 }
3206         }
3207
3208         read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
3209
3210         /* Handle timeout by closing the whole
3211          * connection. We can only be sure RDMA activity
3212          * has ceased once the QP has been modified. */
3213         while (!list_empty(&closes)) {
3214                 conn = list_entry(closes.next,
3215                                   kib_conn_t, ibc_connd_list);
3216                 list_del(&conn->ibc_connd_list);
3217                 kiblnd_close_conn(conn, -ETIMEDOUT);
3218                 kiblnd_conn_decref(conn);
3219         }
3220
3221         /* In case we have enough credits to return via a
3222          * NOOP, but there were no non-blocking tx descs
3223          * free to do it last time... */
3224         while (!list_empty(&checksends)) {
3225                 conn = list_entry(checksends.next,
3226                                   kib_conn_t, ibc_connd_list);
3227                 list_del(&conn->ibc_connd_list);
3228
3229                 spin_lock(&conn->ibc_lock);
3230                 kiblnd_check_sends_locked(conn);
3231                 spin_unlock(&conn->ibc_lock);
3232
3233                 kiblnd_conn_decref(conn);
3234         }
3235 }
3236
3237 static void
3238 kiblnd_disconnect_conn (kib_conn_t *conn)
3239 {
3240         LASSERT (!in_interrupt());
3241         LASSERT (current == kiblnd_data.kib_connd);
3242         LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
3243
3244         rdma_disconnect(conn->ibc_cmid);
3245         kiblnd_finalise_conn(conn);
3246
3247         kiblnd_peer_notify(conn->ibc_peer);
3248 }
3249
3250 /*
3251  * High-water for reconnection to the same peer_ni, reconnection attempt should
3252  * be delayed after trying more than KIB_RECONN_HIGH_RACE.
3253  */
3254 #define KIB_RECONN_HIGH_RACE    10
3255 /*
3256  * Allow connd to take a break and handle other things after consecutive
3257  * reconnection attemps.
3258  */
3259 #define KIB_RECONN_BREAK        100
3260
3261 int
3262 kiblnd_connd (void *arg)
3263 {
3264         spinlock_t        *lock= &kiblnd_data.kib_connd_lock;
3265         wait_queue_t       wait;
3266         unsigned long      flags;
3267         kib_conn_t        *conn;
3268         int                timeout;
3269         int                i;
3270         int                dropped_lock;
3271         int                peer_index = 0;
3272         unsigned long      deadline = jiffies;
3273
3274         cfs_block_allsigs();
3275
3276         init_waitqueue_entry(&wait, current);
3277         kiblnd_data.kib_connd = current;
3278
3279         spin_lock_irqsave(lock, flags);
3280
3281         while (!kiblnd_data.kib_shutdown) {
3282                 int reconn = 0;
3283
3284                 dropped_lock = 0;
3285
3286                 if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
3287                         kib_peer_ni_t *peer_ni = NULL;
3288
3289                         conn = list_entry(kiblnd_data.kib_connd_zombies.next,
3290                                           kib_conn_t, ibc_list);
3291                         list_del(&conn->ibc_list);
3292                         if (conn->ibc_reconnect) {
3293                                 peer_ni = conn->ibc_peer;
3294                                 kiblnd_peer_addref(peer_ni);
3295                         }
3296
3297                         spin_unlock_irqrestore(lock, flags);
3298                         dropped_lock = 1;
3299
3300                         kiblnd_destroy_conn(conn, !peer_ni);
3301
3302                         spin_lock_irqsave(lock, flags);
3303                         if (!peer_ni)
3304                                 continue;
3305
3306                         conn->ibc_peer = peer_ni;
3307                         if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3308                                 list_add_tail(&conn->ibc_list,
3309                                               &kiblnd_data.kib_reconn_list);
3310                         else
3311                                 list_add_tail(&conn->ibc_list,
3312                                               &kiblnd_data.kib_reconn_wait);
3313                 }
3314
3315                 if (!list_empty(&kiblnd_data.kib_connd_conns)) {
3316                         conn = list_entry(kiblnd_data.kib_connd_conns.next,
3317                                               kib_conn_t, ibc_list);
3318                         list_del(&conn->ibc_list);
3319
3320                         spin_unlock_irqrestore(lock, flags);
3321                         dropped_lock = 1;
3322
3323                         kiblnd_disconnect_conn(conn);
3324                         kiblnd_conn_decref(conn);
3325
3326                         spin_lock_irqsave(lock, flags);
3327                 }
3328
3329                 while (reconn < KIB_RECONN_BREAK) {
3330                         if (kiblnd_data.kib_reconn_sec !=
3331                             ktime_get_real_seconds()) {
3332                                 kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
3333                                 list_splice_init(&kiblnd_data.kib_reconn_wait,
3334                                                  &kiblnd_data.kib_reconn_list);
3335                         }
3336
3337                         if (list_empty(&kiblnd_data.kib_reconn_list))
3338                                 break;
3339
3340                         conn = list_entry(kiblnd_data.kib_reconn_list.next,
3341                                           kib_conn_t, ibc_list);
3342                         list_del(&conn->ibc_list);
3343
3344                         spin_unlock_irqrestore(lock, flags);
3345                         dropped_lock = 1;
3346
3347                         reconn += kiblnd_reconnect_peer(conn->ibc_peer);
3348                         kiblnd_peer_decref(conn->ibc_peer);
3349                         LIBCFS_FREE(conn, sizeof(*conn));
3350
3351                         spin_lock_irqsave(lock, flags);
3352                 }
3353
3354                 /* careful with the jiffy wrap... */
3355                 timeout = (int)(deadline - jiffies);
3356                 if (timeout <= 0) {
3357                         const int n = 4;
3358                         const int p = 1;
3359                         int       chunk = kiblnd_data.kib_peer_hash_size;
3360
3361                         spin_unlock_irqrestore(lock, flags);
3362                         dropped_lock = 1;
3363
3364                         /* Time to check for RDMA timeouts on a few more
3365                          * peers: I do checks every 'p' seconds on a
3366                          * proportion of the peer_ni table and I need to check
3367                          * every connection 'n' times within a timeout
3368                          * interval, to ensure I detect a timeout on any
3369                          * connection within (n+1)/n times the timeout
3370                          * interval. */
3371
3372                         if (*kiblnd_tunables.kib_timeout > n * p)
3373                                 chunk = (chunk * n * p) /
3374                                         *kiblnd_tunables.kib_timeout;
3375                         if (chunk == 0)
3376                                 chunk = 1;
3377
3378                         for (i = 0; i < chunk; i++) {
3379                                 kiblnd_check_conns(peer_index);
3380                                 peer_index = (peer_index + 1) %
3381                                              kiblnd_data.kib_peer_hash_size;
3382                         }
3383
3384                         deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
3385                         spin_lock_irqsave(lock, flags);
3386                 }
3387
3388                 if (dropped_lock)
3389                         continue;
3390
3391                 /* Nothing to do for 'timeout'  */
3392                 set_current_state(TASK_INTERRUPTIBLE);
3393                 add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3394                 spin_unlock_irqrestore(lock, flags);
3395
3396                 schedule_timeout(timeout);
3397
3398                 set_current_state(TASK_RUNNING);
3399                 remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
3400                 spin_lock_irqsave(lock, flags);
3401         }
3402
3403         spin_unlock_irqrestore(lock, flags);
3404
3405         kiblnd_thread_fini();
3406         return 0;
3407 }
3408
3409 void
3410 kiblnd_qp_event(struct ib_event *event, void *arg)
3411 {
3412         kib_conn_t *conn = arg;
3413
3414         switch (event->event) {
3415         case IB_EVENT_COMM_EST:
3416                 CDEBUG(D_NET, "%s established\n",
3417                        libcfs_nid2str(conn->ibc_peer->ibp_nid));
3418                 /* We received a packet but connection isn't established
3419                  * probably handshake packet was lost, so free to
3420                  * force make connection established */
3421                 rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
3422                 return;
3423
3424         default:
3425                 CERROR("%s: Async QP event type %d\n",
3426                        libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3427                 return;
3428         }
3429 }
3430
3431 static void
3432 kiblnd_complete (struct ib_wc *wc)
3433 {
3434         switch (kiblnd_wreqid2type(wc->wr_id)) {
3435         default:
3436                 LBUG();
3437
3438         case IBLND_WID_MR:
3439                 if (wc->status != IB_WC_SUCCESS &&
3440                     wc->status != IB_WC_WR_FLUSH_ERR)
3441                         CNETERR("FastReg failed: %d\n", wc->status);
3442                 return;
3443
3444         case IBLND_WID_RDMA:
3445                 /* We only get RDMA completion notification if it fails.  All
3446                  * subsequent work items, including the final SEND will fail
3447                  * too.  However we can't print out any more info about the
3448                  * failing RDMA because 'tx' might be back on the idle list or
3449                  * even reused already if we didn't manage to post all our work
3450                  * items */
3451                 CNETERR("RDMA (tx: %p) failed: %d\n",
3452                         kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3453                 return;
3454
3455         case IBLND_WID_TX:
3456                 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3457                 return;
3458
3459         case IBLND_WID_RX:
3460                 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3461                                    wc->byte_len);
3462                 return;
3463         }
3464 }
3465
3466 void
3467 kiblnd_cq_completion(struct ib_cq *cq, void *arg)
3468 {
3469         /* NB I'm not allowed to schedule this conn once its refcount has
3470          * reached 0.  Since fundamentally I'm racing with scheduler threads
3471          * consuming my CQ I could be called after all completions have
3472          * occurred.  But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3473          * and this CQ is about to be destroyed so I NOOP. */
3474         kib_conn_t              *conn = (kib_conn_t *)arg;
3475         struct kib_sched_info   *sched = conn->ibc_sched;
3476         unsigned long           flags;
3477
3478         LASSERT(cq == conn->ibc_cq);
3479
3480         spin_lock_irqsave(&sched->ibs_lock, flags);
3481
3482         conn->ibc_ready = 1;
3483
3484         if (!conn->ibc_scheduled &&
3485             (conn->ibc_nrx > 0 ||
3486              conn->ibc_nsends_posted > 0)) {
3487                 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3488                 conn->ibc_scheduled = 1;
3489                 list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
3490
3491                 if (waitqueue_active(&sched->ibs_waitq))
3492                         wake_up(&sched->ibs_waitq);
3493         }
3494
3495         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3496 }
3497
3498 void
3499 kiblnd_cq_event(struct ib_event *event, void *arg)
3500 {
3501         kib_conn_t *conn = arg;
3502
3503         CERROR("%s: async CQ event type %d\n",
3504                libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3505 }
3506
3507 int
3508 kiblnd_scheduler(void *arg)
3509 {
3510         long                    id = (long)arg;
3511         struct kib_sched_info   *sched;
3512         kib_conn_t              *conn;
3513         wait_queue_t            wait;
3514         unsigned long           flags;
3515         struct ib_wc            wc;
3516         int                     did_something;
3517         int                     busy_loops = 0;
3518         int                     rc;
3519
3520         cfs_block_allsigs();
3521
3522         init_waitqueue_entry(&wait, current);
3523
3524         sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
3525
3526         rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
3527         if (rc != 0) {
3528                 CWARN("Unable to bind on CPU partition %d, please verify "
3529                       "whether all CPUs are healthy and reload modules if "
3530                       "necessary, otherwise your system might under risk of "
3531                       "low performance\n", sched->ibs_cpt);
3532         }
3533
3534         spin_lock_irqsave(&sched->ibs_lock, flags);
3535
3536         while (!kiblnd_data.kib_shutdown) {
3537                 if (busy_loops++ >= IBLND_RESCHED) {
3538                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3539
3540                         cond_resched();
3541                         busy_loops = 0;
3542
3543                         spin_lock_irqsave(&sched->ibs_lock, flags);
3544                 }
3545
3546                 did_something = 0;
3547
3548                 if (!list_empty(&sched->ibs_conns)) {
3549                         conn = list_entry(sched->ibs_conns.next,
3550                                               kib_conn_t, ibc_sched_list);
3551                         /* take over kib_sched_conns' ref on conn... */
3552                         LASSERT(conn->ibc_scheduled);
3553                         list_del(&conn->ibc_sched_list);
3554                         conn->ibc_ready = 0;
3555
3556                         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3557
3558                         wc.wr_id = IBLND_WID_INVAL;
3559
3560                         rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3561                         if (rc == 0) {
3562                                 rc = ib_req_notify_cq(conn->ibc_cq,
3563                                                       IB_CQ_NEXT_COMP);
3564                                 if (rc < 0) {
3565                                         CWARN("%s: ib_req_notify_cq failed: %d, "
3566                                               "closing connection\n",
3567                                               libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3568                                         kiblnd_close_conn(conn, -EIO);
3569                                         kiblnd_conn_decref(conn);
3570                                         spin_lock_irqsave(&sched->ibs_lock,
3571                                                               flags);
3572                                         continue;
3573                                 }
3574
3575                                 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3576                         }
3577
3578                         if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
3579                                 LCONSOLE_ERROR(
3580                                         "ib_poll_cq (rc: %d) returned invalid "
3581                                         "wr_id, opcode %d, status: %d, "
3582                                         "vendor_err: %d, conn: %s status: %d\n"
3583                                         "please upgrade firmware and OFED or "
3584                                         "contact vendor.\n", rc,
3585                                         wc.opcode, wc.status, wc.vendor_err,
3586                                         libcfs_nid2str(conn->ibc_peer->ibp_nid),
3587                                         conn->ibc_state);
3588                                 rc = -EINVAL;
3589                         }
3590
3591                         if (rc < 0) {
3592                                 CWARN("%s: ib_poll_cq failed: %d, "
3593                                       "closing connection\n",
3594                                       libcfs_nid2str(conn->ibc_peer->ibp_nid),
3595                                       rc);
3596                                 kiblnd_close_conn(conn, -EIO);
3597                                 kiblnd_conn_decref(conn);
3598                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3599                                 continue;
3600                         }
3601
3602                         spin_lock_irqsave(&sched->ibs_lock, flags);
3603
3604                         if (rc != 0 || conn->ibc_ready) {
3605                                 /* There may be another completion waiting; get
3606                                  * another scheduler to check while I handle
3607                                  * this one... */
3608                                 /* +1 ref for sched_conns */
3609                                 kiblnd_conn_addref(conn);
3610                                 list_add_tail(&conn->ibc_sched_list,
3611                                                   &sched->ibs_conns);
3612                                 if (waitqueue_active(&sched->ibs_waitq))
3613                                         wake_up(&sched->ibs_waitq);
3614                         } else {
3615                                 conn->ibc_scheduled = 0;
3616                         }
3617
3618                         if (rc != 0) {
3619                                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3620                                 kiblnd_complete(&wc);
3621
3622                                 spin_lock_irqsave(&sched->ibs_lock, flags);
3623                         }
3624
3625                         kiblnd_conn_decref(conn); /* ...drop my ref from above */
3626                         did_something = 1;
3627                 }
3628
3629                 if (did_something)
3630                         continue;
3631
3632                 set_current_state(TASK_INTERRUPTIBLE);
3633                 add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
3634                 spin_unlock_irqrestore(&sched->ibs_lock, flags);
3635
3636                 schedule();
3637                 busy_loops = 0;
3638
3639                 remove_wait_queue(&sched->ibs_waitq, &wait);
3640                 set_current_state(TASK_RUNNING);
3641                 spin_lock_irqsave(&sched->ibs_lock, flags);
3642         }
3643
3644         spin_unlock_irqrestore(&sched->ibs_lock, flags);
3645
3646         kiblnd_thread_fini();
3647         return 0;
3648 }
3649
3650 int
3651 kiblnd_failover_thread(void *arg)
3652 {
3653         rwlock_t                *glock = &kiblnd_data.kib_global_lock;
3654         kib_dev_t               *dev;
3655         wait_queue_t            wait;
3656         unsigned long           flags;
3657         int                     rc;
3658
3659         LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
3660
3661         cfs_block_allsigs();
3662
3663         init_waitqueue_entry(&wait, current);
3664         write_lock_irqsave(glock, flags);
3665
3666         while (!kiblnd_data.kib_shutdown) {
3667                 int     do_failover = 0;
3668                 int     long_sleep;
3669
3670                 list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
3671                                     ibd_fail_list) {
3672                         if (cfs_time_before(cfs_time_current(),
3673                                             dev->ibd_next_failover))
3674                                 continue;
3675                         do_failover = 1;
3676                         break;
3677                 }
3678
3679                 if (do_failover) {
3680                         list_del_init(&dev->ibd_fail_list);
3681                         dev->ibd_failover = 1;
3682                         write_unlock_irqrestore(glock, flags);
3683
3684                         rc = kiblnd_dev_failover(dev);
3685
3686                         write_lock_irqsave(glock, flags);
3687
3688                         LASSERT (dev->ibd_failover);
3689                         dev->ibd_failover = 0;
3690                         if (rc >= 0) { /* Device is OK or failover succeed */
3691                                 dev->ibd_next_failover = cfs_time_shift(3);
3692                                 continue;
3693                         }
3694
3695                         /* failed to failover, retry later */
3696                         dev->ibd_next_failover =
3697                                 cfs_time_shift(min(dev->ibd_failed_failover, 10));
3698                         if (kiblnd_dev_can_failover(dev)) {
3699                                 list_add_tail(&dev->ibd_fail_list,
3700                                               &kiblnd_data.kib_failed_devs);
3701                         }
3702
3703                         continue;
3704                 }
3705
3706                 /* long sleep if no more pending failover */
3707                 long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
3708
3709                 set_current_state(TASK_INTERRUPTIBLE);
3710                 add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3711                 write_unlock_irqrestore(glock, flags);
3712
3713                 rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
3714                                                    cfs_time_seconds(1));
3715                 set_current_state(TASK_RUNNING);
3716                 remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
3717                 write_lock_irqsave(glock, flags);
3718
3719                 if (!long_sleep || rc != 0)
3720                         continue;
3721
3722                 /* have a long sleep, routine check all active devices,
3723                  * we need checking like this because if there is not active
3724                  * connection on the dev and no SEND from local, we may listen
3725                  * on wrong HCA for ever while there is a bonding failover */
3726                 list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
3727                         if (kiblnd_dev_can_failover(dev)) {
3728                                 list_add_tail(&dev->ibd_fail_list,
3729                                               &kiblnd_data.kib_failed_devs);
3730                         }
3731                 }
3732         }
3733
3734         write_unlock_irqrestore(glock, flags);
3735
3736         kiblnd_thread_fini();
3737         return 0;
3738 }