1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd_cb.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
44 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
46 lnet_msg_t *lntmsg[2];
47 kib_net_t *net = ni->ni_data;
51 LASSERT (net != NULL);
52 LASSERT (!in_interrupt());
53 LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
54 LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
55 LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
56 LASSERT (tx->tx_pool != NULL);
58 kiblnd_unmap_tx(ni, tx);
60 /* tx may have up to 2 lnet msgs to finalise */
61 lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
62 lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
65 if (tx->tx_conn != NULL) {
66 LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
68 kiblnd_conn_decref(tx->tx_conn);
75 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
77 /* delay finalize until my descs have been freed */
78 for (i = 0; i < 2; i++) {
79 if (lntmsg[i] == NULL)
82 lnet_finalize(ni, lntmsg[i], rc);
87 kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
91 while (!list_empty (txlist)) {
92 tx = list_entry (txlist->next, kib_tx_t, tx_list);
94 list_del(&tx->tx_list);
97 tx->tx_status = status;
98 kiblnd_tx_done(ni, tx);
103 kiblnd_get_idle_tx (lnet_ni_t *ni)
105 kib_net_t *net = (kib_net_t *)ni->ni_data;
106 struct list_head *node;
109 node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset);
112 tx = container_of(node, kib_tx_t, tx_list);
114 LASSERT (tx->tx_nwrq == 0);
115 LASSERT (!tx->tx_queued);
116 LASSERT (tx->tx_sending == 0);
117 LASSERT (!tx->tx_waiting);
118 LASSERT (tx->tx_status == 0);
119 LASSERT (tx->tx_conn == NULL);
120 LASSERT (tx->tx_lntmsg[0] == NULL);
121 LASSERT (tx->tx_lntmsg[1] == NULL);
122 LASSERT (tx->tx_u.pmr == NULL);
123 LASSERT (tx->tx_nfrags == 0);
129 kiblnd_drop_rx (kib_rx_t *rx)
131 kib_conn_t *conn = rx->rx_conn;
134 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
135 LASSERT (conn->ibc_nrx > 0);
137 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
139 kiblnd_conn_decref(conn);
143 kiblnd_post_rx (kib_rx_t *rx, int credit)
145 kib_conn_t *conn = rx->rx_conn;
146 kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
147 struct ib_recv_wr *bad_wrq = NULL;
151 LASSERT (net != NULL);
152 LASSERT (!in_interrupt());
153 LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
154 credit == IBLND_POSTRX_PEER_CREDIT ||
155 credit == IBLND_POSTRX_RSRVD_CREDIT);
157 mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
158 LASSERT (mr != NULL);
160 rx->rx_sge.lkey = mr->lkey;
161 rx->rx_sge.addr = rx->rx_msgaddr;
162 rx->rx_sge.length = IBLND_MSG_SIZE;
164 rx->rx_wrq.next = NULL;
165 rx->rx_wrq.sg_list = &rx->rx_sge;
166 rx->rx_wrq.num_sge = 1;
167 rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
169 LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
170 LASSERT (rx->rx_nob >= 0); /* not posted */
172 if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
173 kiblnd_drop_rx(rx); /* No more posts for this rx */
177 rx->rx_nob = -1; /* flag posted */
179 rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
181 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
182 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
186 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
190 kiblnd_close_conn(conn, rc);
191 kiblnd_drop_rx(rx); /* No more posts for this rx */
195 if (credit == IBLND_POSTRX_NO_CREDIT)
198 spin_lock(&conn->ibc_lock);
199 if (credit == IBLND_POSTRX_PEER_CREDIT)
200 conn->ibc_outstanding_credits++;
202 conn->ibc_reserved_credits++;
203 spin_unlock(&conn->ibc_lock);
205 kiblnd_check_sends(conn);
210 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
212 struct list_head *tmp;
214 list_for_each(tmp, &conn->ibc_active_txs) {
215 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
217 LASSERT (!tx->tx_queued);
218 LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
220 if (tx->tx_cookie != cookie)
223 if (tx->tx_waiting &&
224 tx->tx_msg->ibm_type == txtype)
227 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
228 tx->tx_waiting ? "" : "NOT ",
229 tx->tx_msg->ibm_type, txtype);
235 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
238 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
241 spin_lock(&conn->ibc_lock);
243 tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
245 spin_unlock(&conn->ibc_lock);
247 CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
248 txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
249 kiblnd_close_conn(conn, -EPROTO);
253 if (tx->tx_status == 0) { /* success so far */
254 if (status < 0) { /* failed? */
255 tx->tx_status = status;
256 } else if (txtype == IBLND_MSG_GET_REQ) {
257 lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
263 idle = !tx->tx_queued && (tx->tx_sending == 0);
265 list_del(&tx->tx_list);
267 spin_unlock(&conn->ibc_lock);
270 kiblnd_tx_done(ni, tx);
274 kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
276 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
277 kib_tx_t *tx = kiblnd_get_idle_tx(ni);
280 CERROR("Can't get tx for completion %x for %s\n",
281 type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
285 tx->tx_msg->ibm_u.completion.ibcm_status = status;
286 tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
287 kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
289 kiblnd_queue_tx(tx, conn);
293 kiblnd_handle_rx (kib_rx_t *rx)
295 kib_msg_t *msg = rx->rx_msg;
296 kib_conn_t *conn = rx->rx_conn;
297 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
298 int credits = msg->ibm_credits;
304 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
306 CDEBUG (D_NET, "Received %x[%d] from %s\n",
307 msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
310 /* Have I received credits that will let me send? */
311 spin_lock(&conn->ibc_lock);
313 if (conn->ibc_credits + credits >
314 IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
315 rc2 = conn->ibc_credits;
316 spin_unlock(&conn->ibc_lock);
318 CERROR("Bad credits from %s: %d + %d > %d\n",
319 libcfs_nid2str(conn->ibc_peer->ibp_nid),
320 rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
322 kiblnd_close_conn(conn, -EPROTO);
323 kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
327 conn->ibc_credits += credits;
329 spin_unlock(&conn->ibc_lock);
330 kiblnd_check_sends(conn);
333 switch (msg->ibm_type) {
335 CERROR("Bad IBLND message type %x from %s\n",
336 msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
337 post_credit = IBLND_POSTRX_NO_CREDIT;
342 if (IBLND_OOB_CAPABLE(conn->ibc_version))
343 post_credit = IBLND_POSTRX_NO_CREDIT;
345 post_credit = IBLND_POSTRX_PEER_CREDIT;
348 case IBLND_MSG_IMMEDIATE:
349 post_credit = IBLND_POSTRX_DONT_POST;
350 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
351 msg->ibm_srcnid, rx, 0);
352 if (rc < 0) /* repost on error */
353 post_credit = IBLND_POSTRX_PEER_CREDIT;
356 case IBLND_MSG_PUT_REQ:
357 post_credit = IBLND_POSTRX_DONT_POST;
358 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
359 msg->ibm_srcnid, rx, 1);
360 if (rc < 0) /* repost on error */
361 post_credit = IBLND_POSTRX_PEER_CREDIT;
364 case IBLND_MSG_PUT_NAK:
365 CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
366 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
367 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
368 msg->ibm_u.completion.ibcm_status,
369 msg->ibm_u.completion.ibcm_cookie);
372 case IBLND_MSG_PUT_ACK:
373 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
375 spin_lock(&conn->ibc_lock);
376 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
377 msg->ibm_u.putack.ibpam_src_cookie);
379 list_del(&tx->tx_list);
380 spin_unlock(&conn->ibc_lock);
383 CERROR("Unmatched PUT_ACK from %s\n",
384 libcfs_nid2str(conn->ibc_peer->ibp_nid));
389 LASSERT (tx->tx_waiting);
390 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
391 * (a) I can overwrite tx_msg since my peer has received it!
392 * (b) tx_waiting set tells tx_complete() it's not done. */
394 tx->tx_nwrq = 0; /* overwrite PUT_REQ */
396 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
397 kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
398 &msg->ibm_u.putack.ibpam_rd,
399 msg->ibm_u.putack.ibpam_dst_cookie);
401 CERROR("Can't setup rdma for PUT to %s: %d\n",
402 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
404 spin_lock(&conn->ibc_lock);
405 tx->tx_waiting = 0; /* clear waiting and queue atomically */
406 kiblnd_queue_tx_locked(tx, conn);
407 spin_unlock(&conn->ibc_lock);
410 case IBLND_MSG_PUT_DONE:
411 post_credit = IBLND_POSTRX_PEER_CREDIT;
412 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
413 msg->ibm_u.completion.ibcm_status,
414 msg->ibm_u.completion.ibcm_cookie);
417 case IBLND_MSG_GET_REQ:
418 post_credit = IBLND_POSTRX_DONT_POST;
419 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
420 msg->ibm_srcnid, rx, 1);
421 if (rc < 0) /* repost on error */
422 post_credit = IBLND_POSTRX_PEER_CREDIT;
425 case IBLND_MSG_GET_DONE:
426 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
427 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
428 msg->ibm_u.completion.ibcm_status,
429 msg->ibm_u.completion.ibcm_cookie);
433 if (rc < 0) /* protocol error */
434 kiblnd_close_conn(conn, rc);
436 if (post_credit != IBLND_POSTRX_DONT_POST)
437 kiblnd_post_rx(rx, post_credit);
441 kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
443 kib_msg_t *msg = rx->rx_msg;
444 kib_conn_t *conn = rx->rx_conn;
445 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
446 kib_net_t *net = ni->ni_data;
450 LASSERT (net != NULL);
451 LASSERT (rx->rx_nob < 0); /* was posted */
452 rx->rx_nob = 0; /* isn't now */
454 if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
457 if (status != IB_WC_SUCCESS) {
458 CNETERR("Rx from %s failed: %d\n",
459 libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
466 rc = kiblnd_unpack_msg(msg, rx->rx_nob);
468 CERROR ("Error %d unpacking rx from %s\n",
469 rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
473 if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
474 msg->ibm_dstnid != ni->ni_nid ||
475 msg->ibm_srcstamp != conn->ibc_incarnation ||
476 msg->ibm_dststamp != net->ibn_incarnation) {
477 CERROR ("Stale rx from %s\n",
478 libcfs_nid2str(conn->ibc_peer->ibp_nid));
483 /* set time last known alive */
484 kiblnd_peer_alive(conn->ibc_peer);
486 /* racing with connection establishment/teardown! */
488 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
489 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
492 write_lock_irqsave(g_lock, flags);
493 /* must check holding global lock to eliminate race */
494 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
495 list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
496 write_unlock_irqrestore(g_lock, flags);
499 write_unlock_irqrestore(g_lock, flags);
501 kiblnd_handle_rx(rx);
505 CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
506 kiblnd_close_conn(conn, err);
508 kiblnd_drop_rx(rx); /* Don't re-post rx. */
512 kiblnd_kvaddr_to_page (unsigned long vaddr)
516 if (vaddr >= VMALLOC_START &&
517 vaddr < VMALLOC_END) {
518 page = vmalloc_to_page ((void *)vaddr);
519 LASSERT (page != NULL);
522 #ifdef CONFIG_HIGHMEM
523 if (vaddr >= PKMAP_BASE &&
524 vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
525 /* No highmem pages only used for bulk (kiov) I/O */
526 CERROR("find page for address in highmem\n");
530 page = virt_to_page (vaddr);
531 LASSERT (page != NULL);
536 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
538 kib_dev_t *ibdev = net->ibn_dev;
539 __u64 *pages = tx->tx_pages;
545 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
546 for (size = 0; size < rd->rd_frags[i].rf_nob;
547 size += ibdev->ibd_page_size) {
548 pages[npages ++] = (rd->rd_frags[i].rf_addr &
549 ibdev->ibd_page_mask) + size;
553 rc = kiblnd_fmr_pool_map(&net->ibn_fmr_ps, pages, npages, 0, &tx->tx_u.fmr);
555 CERROR ("Can't map %d pages: %d\n", npages, rc);
559 /* If rd is not tx_rd, it's going to get sent to a peer, who will need
561 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
562 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
563 rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
564 rd->rd_frags[0].rf_nob = nob;
571 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
576 iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
578 rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
580 CERROR("Failed to create MR by phybuf: %d\n", rc);
584 /* If rd is not tx_rd, it's going to get sent to a peer, who will need
586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
587 tx->tx_u.pmr->pmr_mr->lkey;
589 rd->rd_frags[0].rf_addr = iova;
590 rd->rd_frags[0].rf_nob = nob;
596 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
598 kib_net_t *net = ni->ni_data;
600 LASSERT (net != NULL);
602 if (net->ibn_with_fmr && tx->tx_u.fmr.fmr_pfmr != NULL) {
603 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
604 tx->tx_u.fmr.fmr_pfmr = NULL;
605 } else if (net->ibn_with_pmr && tx->tx_u.pmr != NULL) {
606 kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
610 if (tx->tx_nfrags != 0) {
611 kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
612 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
618 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
619 kib_rdma_desc_t *rd, int nfrags)
621 kib_net_t *net = ni->ni_data;
622 struct ib_mr *mr = NULL;
626 /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
628 tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
629 tx->tx_nfrags = nfrags;
632 kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
633 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
635 for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
636 rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
637 net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
638 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
639 net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
640 nob += rd->rd_frags[i].rf_nob;
643 /* looking for pre-mapping MR */
644 mr = kiblnd_find_rd_dma_mr(net, rd);
646 /* found pre-mapping MR */
647 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
651 if (net->ibn_with_fmr)
652 return kiblnd_fmr_map_tx(net, tx, rd, nob);
653 else if (net->ibn_with_pmr)
654 return kiblnd_pmr_map_tx(net, tx, rd, nob);
661 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
662 unsigned int niov, struct iovec *iov, int offset, int nob)
664 kib_net_t *net = ni->ni_data;
666 struct scatterlist *sg;
673 LASSERT (net != NULL);
675 while (offset >= iov->iov_len) {
676 offset -= iov->iov_len;
686 vaddr = ((unsigned long)iov->iov_base) + offset;
687 page_offset = vaddr & (PAGE_SIZE - 1);
688 page = kiblnd_kvaddr_to_page(vaddr);
690 CERROR ("Can't find page\n");
694 fragnob = min((int)(iov->iov_len - offset), nob);
695 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
697 sg_set_page(sg, page, fragnob, page_offset);
700 if (offset + fragnob < iov->iov_len) {
710 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
714 get_kiov_length (int nkiov, lnet_kiov_t *kiov, int offset, int nob)
722 fragnob = min((int)(kiov->kiov_len - offset), nob);
736 kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
737 int nkiov, lnet_kiov_t *kiov, int offset, int nob)
739 kib_net_t *net = ni->ni_data;
740 struct scatterlist *sg;
743 CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
747 LASSERT (net != NULL);
749 while (offset >= kiov->kiov_len) {
750 offset -= kiov->kiov_len;
757 tx->tx_nfrags = get_kiov_length(nkiov, kiov, offset, nob);
758 sg_init_table(sg, tx->tx_nfrags);
762 fragnob = min((int)(kiov->kiov_len - offset), nob);
764 sg_set_page(sg, kiov->kiov_page, fragnob,
765 kiov->kiov_offset + offset);
774 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
778 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
780 kib_msg_t *msg = tx->tx_msg;
781 kib_peer_t *peer = conn->ibc_peer;
782 int ver = conn->ibc_version;
785 struct ib_send_wr *bad_wrq;
787 LASSERT (tx->tx_queued);
788 /* We rely on this for QP sizing */
789 LASSERT (tx->tx_nwrq > 0);
790 LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
792 LASSERT (credit == 0 || credit == 1);
793 LASSERT (conn->ibc_outstanding_credits >= 0);
794 LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
795 LASSERT (conn->ibc_credits >= 0);
796 LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
798 if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
799 /* tx completions outstanding... */
800 CDEBUG(D_NET, "%s: posted enough\n",
801 libcfs_nid2str(peer->ibp_nid));
805 if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
806 CDEBUG(D_NET, "%s: no credits\n",
807 libcfs_nid2str(peer->ibp_nid));
811 if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
812 conn->ibc_credits == 1 && /* last credit reserved for */
813 conn->ibc_outstanding_credits == 0) { /* giving back credits */
814 CDEBUG(D_NET, "%s: not using last credit\n",
815 libcfs_nid2str(peer->ibp_nid));
819 /* NB don't drop ibc_lock before bumping tx_sending */
820 list_del(&tx->tx_list);
823 if (msg->ibm_type == IBLND_MSG_NOOP &&
824 (!kiblnd_send_noop(conn) || /* redundant NOOP */
825 (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
826 conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
827 /* OK to drop when posted enough NOOPs, since
828 * kiblnd_check_sends will queue NOOP again when
829 * posted NOOPs complete */
830 spin_unlock(&conn->ibc_lock);
831 kiblnd_tx_done(peer->ibp_ni, tx);
832 spin_lock(&conn->ibc_lock);
833 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
834 libcfs_nid2str(peer->ibp_nid),
835 conn->ibc_noops_posted);
839 kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
840 peer->ibp_nid, conn->ibc_incarnation);
842 conn->ibc_credits -= credit;
843 conn->ibc_outstanding_credits = 0;
844 conn->ibc_nsends_posted++;
845 if (msg->ibm_type == IBLND_MSG_NOOP)
846 conn->ibc_noops_posted++;
848 /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
849 * PUT. If so, it was first queued here as a PUT_REQ, sent and
850 * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
851 * and then re-queued here. It's (just) possible that
852 * tx_sending is non-zero if we've not done the tx_complete()
853 * from the first send; hence the ++ rather than = below. */
855 list_add(&tx->tx_list, &conn->ibc_active_txs);
857 /* I'm still holding ibc_lock! */
858 if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
861 rc = ib_post_send(conn->ibc_cmid->qp,
862 tx->tx_wrq, &bad_wrq);
863 conn->ibc_last_send = jiffies;
868 /* NB credits are transferred in the actual
869 * message, which can only be the last work item */
870 conn->ibc_credits += credit;
871 conn->ibc_outstanding_credits += msg->ibm_credits;
872 conn->ibc_nsends_posted--;
873 if (msg->ibm_type == IBLND_MSG_NOOP)
874 conn->ibc_noops_posted--;
880 done = (tx->tx_sending == 0);
882 list_del(&tx->tx_list);
884 spin_unlock(&conn->ibc_lock);
886 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
887 CERROR("Error %d posting transmit to %s\n",
888 rc, libcfs_nid2str(peer->ibp_nid));
890 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
891 rc, libcfs_nid2str(peer->ibp_nid));
893 kiblnd_close_conn(conn, rc);
896 kiblnd_tx_done(peer->ibp_ni, tx);
898 spin_lock(&conn->ibc_lock);
904 kiblnd_check_sends (kib_conn_t *conn)
906 int ver = conn->ibc_version;
907 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
910 /* Don't send anything until after the connection is established */
911 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
912 CDEBUG(D_NET, "%s too soon\n",
913 libcfs_nid2str(conn->ibc_peer->ibp_nid));
917 spin_lock(&conn->ibc_lock);
919 LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
920 LASSERT (!IBLND_OOB_CAPABLE(ver) ||
921 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
922 LASSERT (conn->ibc_reserved_credits >= 0);
924 while (conn->ibc_reserved_credits > 0 &&
925 !list_empty(&conn->ibc_tx_queue_rsrvd)) {
926 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
928 list_del(&tx->tx_list);
929 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
930 conn->ibc_reserved_credits--;
933 if (kiblnd_send_noop(conn)) {
934 spin_unlock(&conn->ibc_lock);
936 tx = kiblnd_get_idle_tx(ni);
938 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
940 spin_lock(&conn->ibc_lock);
942 kiblnd_queue_tx_locked(tx, conn);
945 kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
950 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
952 tx = list_entry(conn->ibc_tx_queue_nocred.next,
954 } else if (!list_empty(&conn->ibc_tx_queue)) {
956 tx = list_entry(conn->ibc_tx_queue.next,
961 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
965 spin_unlock(&conn->ibc_lock);
967 kiblnd_conn_decref(conn); /* ...until here */
971 kiblnd_tx_complete (kib_tx_t *tx, int status)
973 int failed = (status != IB_WC_SUCCESS);
974 kib_conn_t *conn = tx->tx_conn;
977 LASSERT (tx->tx_sending > 0);
980 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
981 CNETERR("Tx -> %s cookie "LPX64
982 " sending %d waiting %d: failed %d\n",
983 libcfs_nid2str(conn->ibc_peer->ibp_nid),
984 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
987 kiblnd_close_conn(conn, -EIO);
989 kiblnd_peer_alive(conn->ibc_peer);
992 spin_lock(&conn->ibc_lock);
994 /* I could be racing with rdma completion. Whoever makes 'tx' idle
995 * gets to free it, which also drops its ref on 'conn'. */
998 conn->ibc_nsends_posted--;
999 if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
1000 conn->ibc_noops_posted--;
1003 tx->tx_waiting = 0; /* don't wait for peer */
1004 tx->tx_status = -EIO;
1007 idle = (tx->tx_sending == 0) && /* This is the final callback */
1008 !tx->tx_waiting && /* Not waiting for peer */
1009 !tx->tx_queued; /* Not re-queued (PUT_DONE) */
1011 list_del(&tx->tx_list);
1013 kiblnd_conn_addref(conn); /* 1 ref for me.... */
1015 spin_unlock(&conn->ibc_lock);
1018 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
1020 kiblnd_check_sends(conn);
1022 kiblnd_conn_decref(conn); /* ...until here */
1026 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1028 kib_net_t *net = ni->ni_data;
1029 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
1030 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
1031 int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
1034 LASSERT (net != NULL);
1035 LASSERT (tx->tx_nwrq >= 0);
1036 LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1037 LASSERT (nob <= IBLND_MSG_SIZE);
1039 kiblnd_init_msg(tx->tx_msg, type, body_nob);
1041 mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
1042 LASSERT (mr != NULL);
1044 sge->lkey = mr->lkey;
1045 sge->addr = tx->tx_msgaddr;
1048 memset(wrq, 0, sizeof(*wrq));
1051 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1054 wrq->opcode = IB_WR_SEND;
1055 wrq->send_flags = IB_SEND_SIGNALED;
1061 kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1062 int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
1064 kib_msg_t *ibmsg = tx->tx_msg;
1065 kib_rdma_desc_t *srcrd = tx->tx_rd;
1066 struct ib_sge *sge = &tx->tx_sge[0];
1067 struct ib_send_wr *wrq = &tx->tx_wrq[0];
1073 LASSERT (!in_interrupt());
1074 LASSERT (tx->tx_nwrq == 0);
1075 LASSERT (type == IBLND_MSG_GET_DONE ||
1076 type == IBLND_MSG_PUT_DONE);
1078 srcidx = dstidx = 0;
1081 if (srcidx >= srcrd->rd_nfrags) {
1082 CERROR("Src buffer exhausted: %d frags\n", srcidx);
1087 if (dstidx == dstrd->rd_nfrags) {
1088 CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1093 if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
1094 CERROR("RDMA too fragmented for %s (%d): "
1095 "%d/%d src %d/%d dst frags\n",
1096 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1097 IBLND_RDMA_FRAGS(conn->ibc_version),
1098 srcidx, srcrd->rd_nfrags,
1099 dstidx, dstrd->rd_nfrags);
1104 wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1105 kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1107 sge = &tx->tx_sge[tx->tx_nwrq];
1108 sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
1109 sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
1110 sge->length = wrknob;
1112 wrq = &tx->tx_wrq[tx->tx_nwrq];
1114 wrq->next = wrq + 1;
1115 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1118 wrq->opcode = IB_WR_RDMA_WRITE;
1119 wrq->send_flags = 0;
1121 wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
1122 wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
1124 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
1125 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
1134 if (rc < 0) /* no RDMA if completing with failure */
1137 ibmsg->ibm_u.completion.ibcm_status = rc;
1138 ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1139 kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1140 type, sizeof (kib_completion_msg_t));
1146 kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
1148 struct list_head *q;
1150 LASSERT (tx->tx_nwrq > 0); /* work items set up */
1151 LASSERT (!tx->tx_queued); /* not queued for sending already */
1152 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1155 tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
1157 if (tx->tx_conn == NULL) {
1158 kiblnd_conn_addref(conn);
1160 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1162 /* PUT_DONE first attached to conn as a PUT_REQ */
1163 LASSERT (tx->tx_conn == conn);
1164 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1167 switch (tx->tx_msg->ibm_type) {
1171 case IBLND_MSG_PUT_REQ:
1172 case IBLND_MSG_GET_REQ:
1173 q = &conn->ibc_tx_queue_rsrvd;
1176 case IBLND_MSG_PUT_NAK:
1177 case IBLND_MSG_PUT_ACK:
1178 case IBLND_MSG_PUT_DONE:
1179 case IBLND_MSG_GET_DONE:
1180 q = &conn->ibc_tx_queue_nocred;
1183 case IBLND_MSG_NOOP:
1184 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1185 q = &conn->ibc_tx_queue_nocred;
1187 q = &conn->ibc_tx_queue;
1190 case IBLND_MSG_IMMEDIATE:
1191 q = &conn->ibc_tx_queue;
1195 list_add_tail(&tx->tx_list, q);
1199 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1201 spin_lock(&conn->ibc_lock);
1202 kiblnd_queue_tx_locked(tx, conn);
1203 spin_unlock(&conn->ibc_lock);
1205 kiblnd_check_sends(conn);
1209 kiblnd_connect_peer (kib_peer_t *peer)
1211 struct rdma_cm_id *cmid;
1213 kib_net_t *net = peer->ibp_ni->ni_data;
1214 struct sockaddr_in srcaddr;
1215 struct sockaddr_in dstaddr;
1218 LASSERT (net != NULL);
1219 LASSERT (peer->ibp_connecting > 0);
1221 cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
1223 CERROR("Can't create CMID for %s: %ld\n",
1224 libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
1230 memset(&srcaddr, 0, sizeof(srcaddr));
1231 srcaddr.sin_family = AF_INET;
1232 srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1234 memset(&dstaddr, 0, sizeof(dstaddr));
1235 dstaddr.sin_family = AF_INET;
1236 dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1237 dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
1239 kiblnd_peer_addref(peer); /* cmid's ref */
1241 rc = rdma_resolve_addr(cmid,
1242 (struct sockaddr *)&srcaddr,
1243 (struct sockaddr *)&dstaddr,
1244 *kiblnd_tunables.kib_timeout * 1000);
1246 LASSERT (cmid->device != NULL);
1247 CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
1248 libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
1249 HIPQUAD(dev->ibd_ifip), cmid->device->name);
1253 /* Can't initiate address resolution: */
1254 CERROR("Can't resolve addr for %s: %d\n",
1255 libcfs_nid2str(peer->ibp_nid), rc);
1257 kiblnd_peer_decref(peer); /* cmid's ref */
1258 rdma_destroy_id(cmid);
1260 kiblnd_peer_connect_failed(peer, 1, rc);
1264 kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
1269 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1270 unsigned long flags;
1273 /* If I get here, I've committed to send, so I complete the tx with
1274 * failure on any problems */
1276 LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1277 LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
1279 /* First time, just use a read lock since I expect to find my peer
1281 read_lock_irqsave(g_lock, flags);
1283 peer = kiblnd_find_peer_locked(nid);
1284 if (peer != NULL && !list_empty(&peer->ibp_conns)) {
1285 /* Found a peer with an established connection */
1286 conn = kiblnd_get_conn_locked(peer);
1287 kiblnd_conn_addref(conn); /* 1 ref for me... */
1289 read_unlock_irqrestore(g_lock, flags);
1292 kiblnd_queue_tx(tx, conn);
1293 kiblnd_conn_decref(conn); /* ...to here */
1297 read_unlock(g_lock);
1298 /* Re-try with a write lock */
1301 peer = kiblnd_find_peer_locked(nid);
1303 if (list_empty(&peer->ibp_conns)) {
1304 /* found a peer, but it's still connecting... */
1305 LASSERT (peer->ibp_connecting != 0 ||
1306 peer->ibp_accepting != 0);
1308 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1309 write_unlock_irqrestore(g_lock, flags);
1311 conn = kiblnd_get_conn_locked(peer);
1312 kiblnd_conn_addref(conn); /* 1 ref for me... */
1314 write_unlock_irqrestore(g_lock, flags);
1317 kiblnd_queue_tx(tx, conn);
1318 kiblnd_conn_decref(conn); /* ...to here */
1323 write_unlock_irqrestore(g_lock, flags);
1325 /* Allocate a peer ready to add to the peer table and retry */
1326 rc = kiblnd_create_peer(ni, &peer, nid);
1328 CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
1330 tx->tx_status = -EHOSTUNREACH;
1332 kiblnd_tx_done(ni, tx);
1337 write_lock_irqsave(g_lock, flags);
1339 peer2 = kiblnd_find_peer_locked(nid);
1340 if (peer2 != NULL) {
1341 if (list_empty(&peer2->ibp_conns)) {
1342 /* found a peer, but it's still connecting... */
1343 LASSERT (peer2->ibp_connecting != 0 ||
1344 peer2->ibp_accepting != 0);
1346 list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
1347 write_unlock_irqrestore(g_lock, flags);
1349 conn = kiblnd_get_conn_locked(peer2);
1350 kiblnd_conn_addref(conn); /* 1 ref for me... */
1352 write_unlock_irqrestore(g_lock, flags);
1355 kiblnd_queue_tx(tx, conn);
1356 kiblnd_conn_decref(conn); /* ...to here */
1359 kiblnd_peer_decref(peer);
1363 /* Brand new peer */
1364 LASSERT (peer->ibp_connecting == 0);
1365 peer->ibp_connecting = 1;
1367 /* always called with a ref on ni, which prevents ni being shutdown */
1368 LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
1371 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1373 kiblnd_peer_addref(peer);
1374 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
1376 write_unlock_irqrestore(g_lock, flags);
1378 kiblnd_connect_peer(peer);
1379 kiblnd_peer_decref(peer);
1383 kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
1385 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
1386 int type = lntmsg->msg_type;
1387 lnet_process_id_t target = lntmsg->msg_target;
1388 int target_is_router = lntmsg->msg_target_is_router;
1389 int routing = lntmsg->msg_routing;
1390 unsigned int payload_niov = lntmsg->msg_niov;
1391 struct iovec *payload_iov = lntmsg->msg_iov;
1392 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
1393 unsigned int payload_offset = lntmsg->msg_offset;
1394 unsigned int payload_nob = lntmsg->msg_len;
1400 /* NB 'private' is different depending on what we're sending.... */
1402 CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1403 payload_nob, payload_niov, libcfs_id2str(target));
1405 LASSERT (payload_nob == 0 || payload_niov > 0);
1406 LASSERT (payload_niov <= LNET_MAX_IOV);
1408 /* Thread context */
1409 LASSERT (!in_interrupt());
1410 /* payload is either all vaddrs or all pages */
1411 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1419 LASSERT (payload_nob == 0);
1423 if (routing || target_is_router)
1424 break; /* send IMMEDIATE */
1426 /* is the REPLY message too small for RDMA? */
1427 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1428 if (nob <= IBLND_MSG_SIZE)
1429 break; /* send IMMEDIATE */
1431 tx = kiblnd_get_idle_tx(ni);
1433 CERROR("Can't allocate txd for GET to %s: \n",
1434 libcfs_nid2str(target.nid));
1440 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1441 rc = kiblnd_setup_rd_iov(ni, tx,
1442 &ibmsg->ibm_u.get.ibgm_rd,
1443 lntmsg->msg_md->md_niov,
1444 lntmsg->msg_md->md_iov.iov,
1445 0, lntmsg->msg_md->md_length);
1447 rc = kiblnd_setup_rd_kiov(ni, tx,
1448 &ibmsg->ibm_u.get.ibgm_rd,
1449 lntmsg->msg_md->md_niov,
1450 lntmsg->msg_md->md_iov.kiov,
1451 0, lntmsg->msg_md->md_length);
1453 CERROR("Can't setup GET sink for %s: %d\n",
1454 libcfs_nid2str(target.nid), rc);
1455 kiblnd_tx_done(ni, tx);
1459 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
1460 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1461 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1463 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1465 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1466 if (tx->tx_lntmsg[1] == NULL) {
1467 CERROR("Can't create reply for GET -> %s\n",
1468 libcfs_nid2str(target.nid));
1469 kiblnd_tx_done(ni, tx);
1473 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
1474 tx->tx_waiting = 1; /* waiting for GET_DONE */
1475 kiblnd_launch_tx(ni, tx, target.nid);
1478 case LNET_MSG_REPLY:
1480 /* Is the payload small enough not to need RDMA? */
1481 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
1482 if (nob <= IBLND_MSG_SIZE)
1483 break; /* send IMMEDIATE */
1485 tx = kiblnd_get_idle_tx(ni);
1487 CERROR("Can't allocate %s txd for %s\n",
1488 type == LNET_MSG_PUT ? "PUT" : "REPLY",
1489 libcfs_nid2str(target.nid));
1493 if (payload_kiov == NULL)
1494 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1495 payload_niov, payload_iov,
1496 payload_offset, payload_nob);
1498 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1499 payload_niov, payload_kiov,
1500 payload_offset, payload_nob);
1502 CERROR("Can't setup PUT src for %s: %d\n",
1503 libcfs_nid2str(target.nid), rc);
1504 kiblnd_tx_done(ni, tx);
1509 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1510 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1511 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1513 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1514 tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
1515 kiblnd_launch_tx(ni, tx, target.nid);
1519 /* send IMMEDIATE */
1521 LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
1524 tx = kiblnd_get_idle_tx(ni);
1526 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1527 type, libcfs_nid2str(target.nid));
1532 ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1534 if (payload_kiov != NULL)
1535 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1536 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1537 payload_niov, payload_kiov,
1538 payload_offset, payload_nob);
1540 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1541 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1542 payload_niov, payload_iov,
1543 payload_offset, payload_nob);
1545 nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
1546 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1548 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1549 kiblnd_launch_tx(ni, tx, target.nid);
1554 kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
1556 lnet_process_id_t target = lntmsg->msg_target;
1557 unsigned int niov = lntmsg->msg_niov;
1558 struct iovec *iov = lntmsg->msg_iov;
1559 lnet_kiov_t *kiov = lntmsg->msg_kiov;
1560 unsigned int offset = lntmsg->msg_offset;
1561 unsigned int nob = lntmsg->msg_len;
1565 tx = kiblnd_get_idle_tx(ni);
1567 CERROR("Can't get tx for REPLY to %s\n",
1568 libcfs_nid2str(target.nid));
1574 else if (kiov == NULL)
1575 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1576 niov, iov, offset, nob);
1578 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1579 niov, kiov, offset, nob);
1582 CERROR("Can't setup GET src for %s: %d\n",
1583 libcfs_nid2str(target.nid), rc);
1587 rc = kiblnd_init_rdma(rx->rx_conn, tx,
1588 IBLND_MSG_GET_DONE, nob,
1589 &rx->rx_msg->ibm_u.get.ibgm_rd,
1590 rx->rx_msg->ibm_u.get.ibgm_cookie);
1592 CERROR("Can't setup rdma for GET from %s: %d\n",
1593 libcfs_nid2str(target.nid), rc);
1598 /* No RDMA: local completion may happen now! */
1599 lnet_finalize(ni, lntmsg, 0);
1601 /* RDMA: lnet_finalize(lntmsg) when it
1603 tx->tx_lntmsg[0] = lntmsg;
1606 kiblnd_queue_tx(tx, rx->rx_conn);
1610 kiblnd_tx_done(ni, tx);
1612 lnet_finalize(ni, lntmsg, -EIO);
1616 kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1617 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1618 unsigned int offset, unsigned int mlen, unsigned int rlen)
1620 kib_rx_t *rx = private;
1621 kib_msg_t *rxmsg = rx->rx_msg;
1622 kib_conn_t *conn = rx->rx_conn;
1626 int post_credit = IBLND_POSTRX_PEER_CREDIT;
1629 LASSERT (mlen <= rlen);
1630 LASSERT (!in_interrupt());
1631 /* Either all pages or all vaddrs */
1632 LASSERT (!(kiov != NULL && iov != NULL));
1634 switch (rxmsg->ibm_type) {
1638 case IBLND_MSG_IMMEDIATE:
1639 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
1640 if (nob > rx->rx_nob) {
1641 CERROR ("Immediate message from %s too big: %d(%d)\n",
1642 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1649 lnet_copy_flat2kiov(niov, kiov, offset,
1650 IBLND_MSG_SIZE, rxmsg,
1651 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1654 lnet_copy_flat2iov(niov, iov, offset,
1655 IBLND_MSG_SIZE, rxmsg,
1656 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1658 lnet_finalize (ni, lntmsg, 0);
1661 case IBLND_MSG_PUT_REQ:
1663 lnet_finalize(ni, lntmsg, 0);
1664 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
1665 rxmsg->ibm_u.putreq.ibprm_cookie);
1669 tx = kiblnd_get_idle_tx(ni);
1671 CERROR("Can't allocate tx for %s\n",
1672 libcfs_nid2str(conn->ibc_peer->ibp_nid));
1673 /* Not replying will break the connection */
1680 rc = kiblnd_setup_rd_iov(ni, tx,
1681 &txmsg->ibm_u.putack.ibpam_rd,
1682 niov, iov, offset, mlen);
1684 rc = kiblnd_setup_rd_kiov(ni, tx,
1685 &txmsg->ibm_u.putack.ibpam_rd,
1686 niov, kiov, offset, mlen);
1688 CERROR("Can't setup PUT sink for %s: %d\n",
1689 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1690 kiblnd_tx_done(ni, tx);
1691 /* tell peer it's over */
1692 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
1693 rxmsg->ibm_u.putreq.ibprm_cookie);
1697 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
1698 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1699 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1701 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1703 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1704 tx->tx_waiting = 1; /* waiting for PUT_DONE */
1705 kiblnd_queue_tx(tx, conn);
1707 /* reposted buffer reserved for PUT_DONE */
1708 post_credit = IBLND_POSTRX_NO_CREDIT;
1711 case IBLND_MSG_GET_REQ:
1712 if (lntmsg != NULL) {
1713 /* Optimized GET; RDMA lntmsg's payload */
1714 kiblnd_reply(ni, rx, lntmsg);
1716 /* GET didn't match anything */
1717 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1719 rxmsg->ibm_u.get.ibgm_cookie);
1724 kiblnd_post_rx(rx, post_credit);
1729 kiblnd_thread_start (int (*fn)(void *arg), void *arg)
1731 long pid = kernel_thread (fn, arg, 0);
1736 atomic_inc (&kiblnd_data.kib_nthreads);
1741 kiblnd_thread_fini (void)
1743 atomic_dec (&kiblnd_data.kib_nthreads);
1747 kiblnd_peer_alive (kib_peer_t *peer)
1749 /* This is racy, but everyone's only writing cfs_time_current() */
1750 peer->ibp_last_alive = cfs_time_current();
1755 kiblnd_peer_notify (kib_peer_t *peer)
1758 cfs_time_t last_alive = 0;
1759 unsigned long flags;
1761 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1763 if (list_empty(&peer->ibp_conns) &&
1764 peer->ibp_accepting == 0 &&
1765 peer->ibp_connecting == 0 &&
1766 peer->ibp_error != 0) {
1767 error = peer->ibp_error;
1768 peer->ibp_error = 0;
1770 last_alive = peer->ibp_last_alive;
1773 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1776 lnet_notify(peer->ibp_ni,
1777 peer->ibp_nid, 0, last_alive);
1781 kiblnd_close_conn_locked (kib_conn_t *conn, int error)
1783 /* This just does the immediate housekeeping. 'error' is zero for a
1784 * normal shutdown which can happen only after the connection has been
1785 * established. If the connection is established, schedule the
1786 * connection to be finished off by the connd. Otherwise the connd is
1787 * already dealing with it (either to set it up or tear it down).
1788 * Caller holds kib_global_lock exclusively in irq context */
1789 unsigned long flags;
1790 kib_peer_t *peer = conn->ibc_peer;
1792 LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1794 if (error != 0 && conn->ibc_comms_error == 0)
1795 conn->ibc_comms_error = error;
1797 if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
1798 return; /* already being handled */
1801 list_empty(&conn->ibc_tx_queue) &&
1802 list_empty(&conn->ibc_tx_queue_rsrvd) &&
1803 list_empty(&conn->ibc_tx_queue_nocred) &&
1804 list_empty(&conn->ibc_active_txs)) {
1805 CDEBUG(D_NET, "closing conn to %s\n",
1806 libcfs_nid2str(peer->ibp_nid));
1808 CNETERR("Closing conn to %s: error %d%s%s%s%s\n",
1809 libcfs_nid2str(peer->ibp_nid), error,
1810 list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1811 list_empty(&conn->ibc_tx_queue_rsrvd) ?
1812 "" : "(sending_rsrvd)",
1813 list_empty(&conn->ibc_tx_queue_nocred) ?
1814 "" : "(sending_nocred)",
1815 list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1818 list_del(&conn->ibc_list);
1819 /* connd (see below) takes over ibc_list's ref */
1821 if (list_empty (&peer->ibp_conns) && /* no more conns */
1822 kiblnd_peer_active(peer)) { /* still in peer table */
1823 kiblnd_unlink_peer_locked(peer);
1825 /* set/clear error on last conn */
1826 peer->ibp_error = conn->ibc_comms_error;
1829 kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
1831 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
1833 list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
1834 wake_up (&kiblnd_data.kib_connd_waitq);
1836 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
1840 kiblnd_close_conn (kib_conn_t *conn, int error)
1842 unsigned long flags;
1844 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1846 kiblnd_close_conn_locked(conn, error);
1848 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1852 kiblnd_handle_early_rxs(kib_conn_t *conn)
1854 unsigned long flags;
1857 LASSERT (!in_interrupt());
1858 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1860 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1861 while (!list_empty(&conn->ibc_early_rxs)) {
1862 rx = list_entry(conn->ibc_early_rxs.next,
1864 list_del(&rx->rx_list);
1865 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1867 kiblnd_handle_rx(rx);
1869 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1871 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1875 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
1877 LIST_HEAD (zombies);
1878 struct list_head *tmp;
1879 struct list_head *nxt;
1882 spin_lock(&conn->ibc_lock);
1884 list_for_each_safe (tmp, nxt, txs) {
1885 tx = list_entry (tmp, kib_tx_t, tx_list);
1887 if (txs == &conn->ibc_active_txs) {
1888 LASSERT (!tx->tx_queued);
1889 LASSERT (tx->tx_waiting ||
1890 tx->tx_sending != 0);
1892 LASSERT (tx->tx_queued);
1895 tx->tx_status = -ECONNABORTED;
1898 if (tx->tx_sending == 0) {
1900 list_del (&tx->tx_list);
1901 list_add (&tx->tx_list, &zombies);
1905 spin_unlock(&conn->ibc_lock);
1907 kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
1908 &zombies, -ECONNABORTED);
1912 kiblnd_finalise_conn (kib_conn_t *conn)
1914 LASSERT (!in_interrupt());
1915 LASSERT (conn->ibc_state > IBLND_CONN_INIT);
1917 kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
1919 /* abort_receives moves QP state to IB_QPS_ERR. This is only required
1920 * for connections that didn't get as far as being connected, because
1921 * rdma_disconnect() does this for free. */
1922 kiblnd_abort_receives(conn);
1924 /* Complete all tx descs not waiting for sends to complete.
1925 * NB we should be safe from RDMA now that the QP has changed state */
1927 kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
1928 kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
1929 kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
1930 kiblnd_abort_txs(conn, &conn->ibc_active_txs);
1932 kiblnd_handle_early_rxs(conn);
1936 kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
1938 LIST_HEAD (zombies);
1939 unsigned long flags;
1941 LASSERT (error != 0);
1942 LASSERT (!in_interrupt());
1944 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1947 LASSERT (peer->ibp_connecting > 0);
1948 peer->ibp_connecting--;
1950 LASSERT (peer->ibp_accepting > 0);
1951 peer->ibp_accepting--;
1954 if (peer->ibp_connecting != 0 ||
1955 peer->ibp_accepting != 0) {
1956 /* another connection attempt under way... */
1957 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1961 if (list_empty(&peer->ibp_conns)) {
1962 /* Take peer's blocked transmits to complete with error */
1963 list_add(&zombies, &peer->ibp_tx_queue);
1964 list_del_init(&peer->ibp_tx_queue);
1966 if (kiblnd_peer_active(peer))
1967 kiblnd_unlink_peer_locked(peer);
1969 peer->ibp_error = error;
1971 /* Can't have blocked transmits if there are connections */
1972 LASSERT (list_empty(&peer->ibp_tx_queue));
1975 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1977 kiblnd_peer_notify(peer);
1979 if (list_empty (&zombies))
1982 CNETERR("Deleting messages for %s: connection failed\n",
1983 libcfs_nid2str(peer->ibp_nid));
1985 kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
1989 kiblnd_connreq_done(kib_conn_t *conn, int status)
1991 kib_peer_t *peer = conn->ibc_peer;
1993 struct list_head txs;
1994 unsigned long flags;
1997 active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
1999 CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
2000 libcfs_nid2str(peer->ibp_nid), active,
2001 conn->ibc_version, status);
2003 LASSERT (!in_interrupt());
2004 LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
2005 peer->ibp_connecting > 0) ||
2006 (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
2007 peer->ibp_accepting > 0));
2009 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
2010 conn->ibc_connvars = NULL;
2013 /* failed to establish connection */
2014 kiblnd_peer_connect_failed(peer, active, status);
2015 kiblnd_finalise_conn(conn);
2019 /* connection established */
2020 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2022 conn->ibc_last_send = jiffies;
2023 kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2024 kiblnd_peer_alive(peer);
2026 /* Add conn to peer's list and nuke any dangling conns from a different
2027 * peer instance... */
2028 kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
2029 list_add(&conn->ibc_list, &peer->ibp_conns);
2031 peer->ibp_connecting--;
2033 peer->ibp_accepting--;
2035 if (peer->ibp_version == 0) {
2036 peer->ibp_version = conn->ibc_version;
2037 peer->ibp_incarnation = conn->ibc_incarnation;
2040 if (peer->ibp_version != conn->ibc_version ||
2041 peer->ibp_incarnation != conn->ibc_incarnation) {
2042 kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
2043 conn->ibc_incarnation);
2044 peer->ibp_version = conn->ibc_version;
2045 peer->ibp_incarnation = conn->ibc_incarnation;
2048 /* grab pending txs while I have the lock */
2049 list_add(&txs, &peer->ibp_tx_queue);
2050 list_del_init(&peer->ibp_tx_queue);
2052 if (!kiblnd_peer_active(peer) || /* peer has been deleted */
2053 conn->ibc_comms_error != 0) { /* error has happened already */
2054 lnet_ni_t *ni = peer->ibp_ni;
2056 /* start to shut down connection */
2057 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2058 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2060 kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
2065 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2067 /* Schedule blocked txs */
2068 spin_lock (&conn->ibc_lock);
2069 while (!list_empty (&txs)) {
2070 tx = list_entry (txs.next, kib_tx_t, tx_list);
2071 list_del(&tx->tx_list);
2073 kiblnd_queue_tx_locked(tx, conn);
2075 spin_unlock (&conn->ibc_lock);
2077 kiblnd_check_sends(conn);
2079 /* schedule blocked rxs */
2080 kiblnd_handle_early_rxs(conn);
2084 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
2088 rc = rdma_reject(cmid, rej, sizeof(*rej));
2091 CWARN("Error %d sending reject\n", rc);
2095 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
2097 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2098 kib_msg_t *reqmsg = priv;
2104 lnet_ni_t *ni = NULL;
2105 kib_net_t *net = NULL;
2107 struct rdma_conn_param cp;
2109 int version = IBLND_MSG_VERSION;
2110 unsigned long flags;
2113 LASSERT (!in_interrupt());
2115 /* cmid inherits 'context' from the corresponding listener id */
2116 ibdev = (kib_dev_t *)cmid->context;
2117 LASSERT (ibdev != NULL);
2119 memset(&rej, 0, sizeof(rej));
2120 rej.ibr_magic = IBLND_MSG_MAGIC;
2121 rej.ibr_why = IBLND_REJECT_FATAL;
2122 rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2124 if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
2125 CERROR("Short connection request\n");
2129 /* Future protocol version compatibility support! If the
2130 * o2iblnd-specific protocol changes, or when LNET unifies
2131 * protocols over all LNDs, the initial connection will
2132 * negotiate a protocol version. I trap this here to avoid
2133 * console errors; the reject tells the peer which protocol I
2135 if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2136 reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2138 if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2139 reqmsg->ibm_version != IBLND_MSG_VERSION &&
2140 reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2142 if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2143 reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2144 reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2147 rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2149 CERROR("Can't parse connection request: %d\n", rc);
2153 nid = reqmsg->ibm_srcnid;
2154 ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
2157 net = (kib_net_t *)ni->ni_data;
2158 rej.ibr_incarnation = net->ibn_incarnation;
2161 if (ni == NULL || /* no matching net */
2162 ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
2163 net->ibn_dev != ibdev) { /* wrong device */
2164 CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): "
2165 "bad dst nid %s\n", libcfs_nid2str(nid),
2166 ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
2167 ibdev->ibd_ifname, ibdev->ibd_nnets,
2168 HIPQUAD(ibdev->ibd_ifip),
2169 libcfs_nid2str(reqmsg->ibm_dstnid));
2174 /* check time stamp as soon as possible */
2175 if (reqmsg->ibm_dststamp != 0 &&
2176 reqmsg->ibm_dststamp != net->ibn_incarnation) {
2177 CWARN("Stale connection request\n");
2178 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2182 /* I can accept peer's version */
2183 version = reqmsg->ibm_version;
2185 if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2186 CERROR("Unexpected connreq msg type: %x from %s\n",
2187 reqmsg->ibm_type, libcfs_nid2str(nid));
2191 if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
2192 IBLND_MSG_QUEUE_SIZE(version)) {
2193 CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
2194 libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
2195 IBLND_MSG_QUEUE_SIZE(version));
2197 if (version == IBLND_MSG_VERSION)
2198 rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2203 if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
2204 IBLND_RDMA_FRAGS(version)) {
2205 CERROR("Can't accept %s(version %x): "
2206 "incompatible max_frags %d (%d wanted)\n",
2207 libcfs_nid2str(nid), version,
2208 reqmsg->ibm_u.connparams.ibcp_max_frags,
2209 IBLND_RDMA_FRAGS(version));
2211 if (version == IBLND_MSG_VERSION)
2212 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2218 if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2219 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2220 libcfs_nid2str(nid),
2221 reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2226 /* assume 'nid' is a new peer; create */
2227 rc = kiblnd_create_peer(ni, &peer, nid);
2229 CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
2230 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2234 write_lock_irqsave(g_lock, flags);
2236 peer2 = kiblnd_find_peer_locked(nid);
2237 if (peer2 != NULL) {
2238 if (peer2->ibp_version == 0) {
2239 peer2->ibp_version = version;
2240 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2243 /* not the guy I've talked with */
2244 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2245 peer2->ibp_version != version) {
2246 kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2247 write_unlock_irqrestore(g_lock, flags);
2249 CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
2250 libcfs_nid2str(nid), peer2->ibp_version, version);
2252 kiblnd_peer_decref(peer);
2253 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2257 /* tie-break connection race in favour of the higher NID */
2258 if (peer2->ibp_connecting != 0 &&
2260 write_unlock_irqrestore(g_lock, flags);
2262 CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
2264 kiblnd_peer_decref(peer);
2265 rej.ibr_why = IBLND_REJECT_CONN_RACE;
2269 peer2->ibp_accepting++;
2270 kiblnd_peer_addref(peer2);
2272 write_unlock_irqrestore(g_lock, flags);
2273 kiblnd_peer_decref(peer);
2276 /* Brand new peer */
2277 LASSERT (peer->ibp_accepting == 0);
2278 LASSERT (peer->ibp_version == 0 &&
2279 peer->ibp_incarnation == 0);
2281 peer->ibp_accepting = 1;
2282 peer->ibp_version = version;
2283 peer->ibp_incarnation = reqmsg->ibm_srcstamp;
2285 /* I have a ref on ni that prevents it being shutdown */
2286 LASSERT (net->ibn_shutdown == 0);
2288 kiblnd_peer_addref(peer);
2289 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
2291 write_unlock_irqrestore(g_lock, flags);
2294 conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2296 kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
2297 kiblnd_peer_decref(peer);
2298 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2302 /* conn now "owns" cmid, so I return success from here on to ensure the
2303 * CM callback doesn't destroy cmid. */
2305 conn->ibc_incarnation = reqmsg->ibm_srcstamp;
2306 conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
2307 conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
2308 LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
2309 <= IBLND_RX_MSGS(version));
2311 ackmsg = &conn->ibc_connvars->cv_msg;
2312 memset(ackmsg, 0, sizeof(*ackmsg));
2314 kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2315 sizeof(ackmsg->ibm_u.connparams));
2316 ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2317 ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2318 ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2320 kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2322 memset(&cp, 0, sizeof(cp));
2323 cp.private_data = ackmsg;
2324 cp.private_data_len = ackmsg->ibm_nob;
2325 cp.responder_resources = 0; /* No atomic ops or RDMA reads */
2326 cp.initiator_depth = 0;
2327 cp.flow_control = 1;
2328 cp.retry_count = *kiblnd_tunables.kib_retry_count;
2329 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
2331 CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2333 rc = rdma_accept(cmid, &cp);
2335 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2336 rej.ibr_version = version;
2337 rej.ibr_why = IBLND_REJECT_FATAL;
2339 kiblnd_reject(cmid, &rej);
2340 kiblnd_connreq_done(conn, rc);
2341 kiblnd_conn_decref(conn);
2351 rej.ibr_version = version;
2352 rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2353 rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2354 kiblnd_reject(cmid, &rej);
2356 return -ECONNREFUSED;
2360 kiblnd_reconnect (kib_conn_t *conn, int version,
2361 __u64 incarnation, int why, kib_connparams_t *cp)
2363 kib_peer_t *peer = conn->ibc_peer;
2366 unsigned long flags;
2368 LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2369 LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
2371 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2373 /* retry connection if it's still needed and no other connection
2374 * attempts (active or passive) are in progress */
2375 if (!list_empty(&peer->ibp_tx_queue) &&
2376 peer->ibp_connecting == 1 &&
2377 peer->ibp_accepting == 0) {
2379 peer->ibp_connecting++;
2381 peer->ibp_version = version;
2382 peer->ibp_incarnation = incarnation;
2385 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2395 case IBLND_REJECT_CONN_STALE:
2399 case IBLND_REJECT_CONN_RACE:
2400 reason = "conn race";
2403 case IBLND_REJECT_CONN_UNCOMPAT:
2404 reason = "version negotiation";
2408 CNETERR("%s: retrying (%s), %x, %x, "
2409 "queue_dep: %d, max_frag: %d, msg_size: %d\n",
2410 libcfs_nid2str(peer->ibp_nid),
2411 reason, IBLND_MSG_VERSION, version,
2412 cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
2413 cp != NULL? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
2414 cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
2416 kiblnd_connect_peer(peer);
2420 kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
2422 kib_peer_t *peer = conn->ibc_peer;
2424 LASSERT (!in_interrupt());
2425 LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2428 case IB_CM_REJ_STALE_CONN:
2429 kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
2430 IBLND_REJECT_CONN_STALE, NULL);
2433 case IB_CM_REJ_INVALID_SERVICE_ID:
2434 CNETERR("%s rejected: no listener at %d\n",
2435 libcfs_nid2str(peer->ibp_nid),
2436 *kiblnd_tunables.kib_service);
2439 case IB_CM_REJ_CONSUMER_DEFINED:
2440 if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
2441 kib_rej_t *rej = priv;
2442 kib_connparams_t *cp = NULL;
2444 __u64 incarnation = -1;
2446 /* NB. default incarnation is -1 because:
2447 * a) V1 will ignore dst incarnation in connreq.
2448 * b) V2 will provide incarnation while rejecting me,
2449 * -1 will be overwrote.
2451 * if I try to connect to a V1 peer with V2 protocol,
2452 * it rejected me then upgrade to V2, I have no idea
2453 * about the upgrading and try to reconnect with V1,
2454 * in this case upgraded V2 can find out I'm trying to
2455 * talk to the old guy and reject me(incarnation is -1).
2458 if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2459 rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2460 __swab32s(&rej->ibr_magic);
2461 __swab16s(&rej->ibr_version);
2465 if (priv_nob >= sizeof(kib_rej_t) &&
2466 rej->ibr_version > IBLND_MSG_VERSION_1) {
2467 /* priv_nob is always 148 in current version
2468 * of OFED, so we still need to check version.
2469 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2473 __swab64s(&rej->ibr_incarnation);
2474 __swab16s(&cp->ibcp_queue_depth);
2475 __swab16s(&cp->ibcp_max_frags);
2476 __swab32s(&cp->ibcp_max_msg_size);
2479 incarnation = rej->ibr_incarnation;
2482 if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2483 rej->ibr_magic != LNET_PROTO_MAGIC) {
2484 CERROR("%s rejected: consumer defined fatal error\n",
2485 libcfs_nid2str(peer->ibp_nid));
2489 if (rej->ibr_version != IBLND_MSG_VERSION &&
2490 rej->ibr_version != IBLND_MSG_VERSION_1) {
2491 CERROR("%s rejected: o2iblnd version %x error\n",
2492 libcfs_nid2str(peer->ibp_nid),
2497 if (rej->ibr_why == IBLND_REJECT_FATAL &&
2498 rej->ibr_version == IBLND_MSG_VERSION_1) {
2499 CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
2500 libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
2502 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2503 rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2506 switch (rej->ibr_why) {
2507 case IBLND_REJECT_CONN_RACE:
2508 case IBLND_REJECT_CONN_STALE:
2509 case IBLND_REJECT_CONN_UNCOMPAT:
2510 kiblnd_reconnect(conn, rej->ibr_version,
2511 incarnation, rej->ibr_why, cp);
2514 case IBLND_REJECT_MSG_QUEUE_SIZE:
2515 CERROR("%s rejected: incompatible message queue depth %d, %d\n",
2516 libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
2517 IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
2520 case IBLND_REJECT_RDMA_FRAGS:
2521 CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
2522 libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
2523 IBLND_RDMA_FRAGS(conn->ibc_version));
2526 case IBLND_REJECT_NO_RESOURCES:
2527 CERROR("%s rejected: o2iblnd no resources\n",
2528 libcfs_nid2str(peer->ibp_nid));
2531 case IBLND_REJECT_FATAL:
2532 CERROR("%s rejected: o2iblnd fatal error\n",
2533 libcfs_nid2str(peer->ibp_nid));
2537 CERROR("%s rejected: o2iblnd reason %d\n",
2538 libcfs_nid2str(peer->ibp_nid),
2546 CNETERR("%s rejected: reason %d, size %d\n",
2547 libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
2551 kiblnd_connreq_done(conn, -ECONNREFUSED);
2555 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
2557 kib_peer_t *peer = conn->ibc_peer;
2558 lnet_ni_t *ni = peer->ibp_ni;
2559 kib_net_t *net = ni->ni_data;
2560 kib_msg_t *msg = priv;
2561 int ver = conn->ibc_version;
2562 int rc = kiblnd_unpack_msg(msg, priv_nob);
2563 unsigned long flags;
2565 LASSERT (net != NULL);
2568 CERROR("Can't unpack connack from %s: %d\n",
2569 libcfs_nid2str(peer->ibp_nid), rc);
2573 if (msg->ibm_type != IBLND_MSG_CONNACK) {
2574 CERROR("Unexpected message %d from %s\n",
2575 msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
2580 if (ver != msg->ibm_version) {
2581 CERROR("%s replied version %x is different with "
2582 "requested version %x\n",
2583 libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
2588 if (msg->ibm_u.connparams.ibcp_queue_depth !=
2589 IBLND_MSG_QUEUE_SIZE(ver)) {
2590 CERROR("%s has incompatible queue depth %d(%d wanted)\n",
2591 libcfs_nid2str(peer->ibp_nid),
2592 msg->ibm_u.connparams.ibcp_queue_depth,
2593 IBLND_MSG_QUEUE_SIZE(ver));
2598 if (msg->ibm_u.connparams.ibcp_max_frags !=
2599 IBLND_RDMA_FRAGS(ver)) {
2600 CERROR("%s has incompatible max_frags %d (%d wanted)\n",
2601 libcfs_nid2str(peer->ibp_nid),
2602 msg->ibm_u.connparams.ibcp_max_frags,
2603 IBLND_RDMA_FRAGS(ver));
2608 if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2609 CERROR("%s max message size %d too big (%d max)\n",
2610 libcfs_nid2str(peer->ibp_nid),
2611 msg->ibm_u.connparams.ibcp_max_msg_size,
2617 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2618 if (msg->ibm_dstnid == ni->ni_nid &&
2619 msg->ibm_dststamp == net->ibn_incarnation)
2623 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2626 CERROR("Bad connection reply from %s, rc = %d, "
2627 "version: %x max_frags: %d\n",
2628 libcfs_nid2str(peer->ibp_nid), rc,
2629 msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
2633 conn->ibc_incarnation = msg->ibm_srcstamp;
2635 conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
2636 LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
2637 <= IBLND_RX_MSGS(ver));
2639 kiblnd_connreq_done(conn, 0);
2643 /* NB My QP has already established itself, so I handle anything going
2644 * wrong here by setting ibc_comms_error.
2645 * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2646 * immediately tears it down. */
2649 conn->ibc_comms_error = rc;
2650 kiblnd_connreq_done(conn, 0);
2654 kiblnd_active_connect (struct rdma_cm_id *cmid)
2656 kib_peer_t *peer = (kib_peer_t *)cmid->context;
2659 struct rdma_conn_param cp;
2662 unsigned long flags;
2665 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2667 incarnation = peer->ibp_incarnation;
2668 version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
2670 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2672 conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
2674 kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
2675 kiblnd_peer_decref(peer); /* lose cmid's ref */
2679 /* conn "owns" cmid now, so I return success from here on to ensure the
2680 * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2683 msg = &conn->ibc_connvars->cv_msg;
2685 memset(msg, 0, sizeof(*msg));
2686 kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
2687 msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2688 msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2689 msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2691 kiblnd_pack_msg(peer->ibp_ni, msg, version,
2692 0, peer->ibp_nid, incarnation);
2694 memset(&cp, 0, sizeof(cp));
2695 cp.private_data = msg;
2696 cp.private_data_len = msg->ibm_nob;
2697 cp.responder_resources = 0; /* No atomic ops or RDMA reads */
2698 cp.initiator_depth = 0;
2699 cp.flow_control = 1;
2700 cp.retry_count = *kiblnd_tunables.kib_retry_count;
2701 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
2703 LASSERT(cmid->context == (void *)conn);
2704 LASSERT(conn->ibc_cmid == cmid);
2706 rc = rdma_connect(cmid, &cp);
2708 CERROR("Can't connect to %s: %d\n",
2709 libcfs_nid2str(peer->ibp_nid), rc);
2710 kiblnd_connreq_done(conn, rc);
2711 kiblnd_conn_decref(conn);
2718 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2724 switch (event->event) {
2726 CERROR("Unexpected event: %d, status: %d\n",
2727 event->event, event->status);
2730 case RDMA_CM_EVENT_CONNECT_REQUEST:
2731 /* destroy cmid on failure */
2732 rc = kiblnd_passive_connect(cmid,
2733 (void *)KIBLND_CONN_PARAM(event),
2734 KIBLND_CONN_PARAM_LEN(event));
2735 CDEBUG(D_NET, "connreq: %d\n", rc);
2738 case RDMA_CM_EVENT_ADDR_ERROR:
2739 peer = (kib_peer_t *)cmid->context;
2740 CNETERR("%s: ADDR ERROR %d\n",
2741 libcfs_nid2str(peer->ibp_nid), event->status);
2742 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2743 kiblnd_peer_decref(peer);
2744 return -EHOSTUNREACH; /* rc != 0 destroys cmid */
2746 case RDMA_CM_EVENT_ADDR_RESOLVED:
2747 peer = (kib_peer_t *)cmid->context;
2749 CDEBUG(D_NET,"%s Addr resolved: %d\n",
2750 libcfs_nid2str(peer->ibp_nid), event->status);
2752 if (event->status != 0) {
2753 CNETERR("Can't resolve address for %s: %d\n",
2754 libcfs_nid2str(peer->ibp_nid), event->status);
2757 rc = rdma_resolve_route(
2758 cmid, *kiblnd_tunables.kib_timeout * 1000);
2761 /* Can't initiate route resolution */
2762 CERROR("Can't resolve route for %s: %d\n",
2763 libcfs_nid2str(peer->ibp_nid), rc);
2765 kiblnd_peer_connect_failed(peer, 1, rc);
2766 kiblnd_peer_decref(peer);
2767 return rc; /* rc != 0 destroys cmid */
2769 case RDMA_CM_EVENT_ROUTE_ERROR:
2770 peer = (kib_peer_t *)cmid->context;
2771 CNETERR("%s: ROUTE ERROR %d\n",
2772 libcfs_nid2str(peer->ibp_nid), event->status);
2773 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2774 kiblnd_peer_decref(peer);
2775 return -EHOSTUNREACH; /* rc != 0 destroys cmid */
2777 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2778 peer = (kib_peer_t *)cmid->context;
2779 CDEBUG(D_NET,"%s Route resolved: %d\n",
2780 libcfs_nid2str(peer->ibp_nid), event->status);
2782 if (event->status == 0)
2783 return kiblnd_active_connect(cmid);
2785 CNETERR("Can't resolve route for %s: %d\n",
2786 libcfs_nid2str(peer->ibp_nid), event->status);
2787 kiblnd_peer_connect_failed(peer, 1, event->status);
2788 kiblnd_peer_decref(peer);
2789 return event->status; /* rc != 0 destroys cmid */
2791 case RDMA_CM_EVENT_UNREACHABLE:
2792 conn = (kib_conn_t *)cmid->context;
2793 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2794 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2795 CNETERR("%s: UNREACHABLE %d\n",
2796 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2797 kiblnd_connreq_done(conn, -ENETDOWN);
2798 kiblnd_conn_decref(conn);
2801 case RDMA_CM_EVENT_CONNECT_ERROR:
2802 conn = (kib_conn_t *)cmid->context;
2803 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2804 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2805 CNETERR("%s: CONNECT ERROR %d\n",
2806 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2807 kiblnd_connreq_done(conn, -ENOTCONN);
2808 kiblnd_conn_decref(conn);
2811 case RDMA_CM_EVENT_REJECTED:
2812 conn = (kib_conn_t *)cmid->context;
2813 switch (conn->ibc_state) {
2817 case IBLND_CONN_PASSIVE_WAIT:
2818 CERROR ("%s: REJECTED %d\n",
2819 libcfs_nid2str(conn->ibc_peer->ibp_nid),
2821 kiblnd_connreq_done(conn, -ECONNRESET);
2824 case IBLND_CONN_ACTIVE_CONNECT:
2825 kiblnd_rejected(conn, event->status,
2826 (void *)KIBLND_CONN_PARAM(event),
2827 KIBLND_CONN_PARAM_LEN(event));
2830 kiblnd_conn_decref(conn);
2833 case RDMA_CM_EVENT_ESTABLISHED:
2834 conn = (kib_conn_t *)cmid->context;
2835 switch (conn->ibc_state) {
2839 case IBLND_CONN_PASSIVE_WAIT:
2840 CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
2841 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2842 kiblnd_connreq_done(conn, 0);
2845 case IBLND_CONN_ACTIVE_CONNECT:
2846 CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
2847 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2848 kiblnd_check_connreply(conn,
2849 (void *)KIBLND_CONN_PARAM(event),
2850 KIBLND_CONN_PARAM_LEN(event));
2853 /* net keeps its ref on conn! */
2856 #ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT
2857 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2858 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
2861 case RDMA_CM_EVENT_DISCONNECTED:
2862 conn = (kib_conn_t *)cmid->context;
2863 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
2864 CERROR("%s DISCONNECTED\n",
2865 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2866 kiblnd_connreq_done(conn, -ECONNRESET);
2868 kiblnd_close_conn(conn, 0);
2870 kiblnd_conn_decref(conn);
2871 cmid->context = NULL;
2874 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2875 LCONSOLE_ERROR_MSG(0x131,
2876 "Received notification of device removal\n"
2877 "Please shutdown LNET to allow this to proceed\n");
2878 /* Can't remove network from underneath LNET for now, so I have
2882 #ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE
2883 case RDMA_CM_EVENT_ADDR_CHANGE:
2884 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
2891 kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
2894 struct list_head *ttmp;
2897 spin_lock(&conn->ibc_lock);
2899 list_for_each (ttmp, txs) {
2900 tx = list_entry (ttmp, kib_tx_t, tx_list);
2902 if (txs != &conn->ibc_active_txs) {
2903 LASSERT (tx->tx_queued);
2905 LASSERT (!tx->tx_queued);
2906 LASSERT (tx->tx_waiting || tx->tx_sending != 0);
2909 if (time_after_eq (jiffies, tx->tx_deadline)) {
2911 CERROR("Timed out tx: %s, %lu seconds\n",
2912 kiblnd_queue2str(conn, txs),
2913 cfs_duration_sec(jiffies - tx->tx_deadline));
2918 spin_unlock(&conn->ibc_lock);
2923 kiblnd_conn_timed_out (kib_conn_t *conn)
2925 return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
2926 kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
2927 kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
2928 kiblnd_check_txs(conn, &conn->ibc_active_txs);
2932 kiblnd_check_conns (int idx)
2934 struct list_head *peers = &kiblnd_data.kib_peers[idx];
2935 struct list_head *ptmp;
2938 struct list_head *ctmp;
2939 unsigned long flags;
2942 /* NB. We expect to have a look at all the peers and not find any
2943 * rdmas to time out, so we just use a shared lock while we
2945 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2947 list_for_each (ptmp, peers) {
2948 peer = list_entry (ptmp, kib_peer_t, ibp_list);
2950 list_for_each (ctmp, &peer->ibp_conns) {
2951 conn = list_entry (ctmp, kib_conn_t, ibc_list);
2953 LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
2955 /* In case we have enough credits to return via a
2956 * NOOP, but there were no non-blocking tx descs
2957 * free to do it last time... */
2958 kiblnd_check_sends(conn);
2960 if (!kiblnd_conn_timed_out(conn))
2963 /* Handle timeout by closing the whole connection. We
2964 * can only be sure RDMA activity has ceased once the
2965 * QP has been modified. */
2967 kiblnd_conn_addref(conn); /* 1 ref for me... */
2969 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2972 CERROR("Timed out RDMA with %s (%lu)\n",
2973 libcfs_nid2str(peer->ibp_nid),
2974 cfs_duration_sec(cfs_time_current() -
2975 peer->ibp_last_alive));
2977 kiblnd_close_conn(conn, -ETIMEDOUT);
2978 kiblnd_conn_decref(conn); /* ...until here */
2980 /* start again now I've dropped the lock */
2985 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2989 kiblnd_disconnect_conn (kib_conn_t *conn)
2991 LASSERT (!in_interrupt());
2992 LASSERT (current == kiblnd_data.kib_connd);
2993 LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
2995 rdma_disconnect(conn->ibc_cmid);
2996 kiblnd_finalise_conn(conn);
2998 kiblnd_peer_notify(conn->ibc_peer);
3002 kiblnd_connd (void *arg)
3005 unsigned long flags;
3011 unsigned long deadline = jiffies;
3013 cfs_daemonize ("kiblnd_connd");
3014 cfs_block_allsigs ();
3016 init_waitqueue_entry (&wait, current);
3017 kiblnd_data.kib_connd = current;
3019 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3021 while (!kiblnd_data.kib_shutdown) {
3025 if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
3026 conn = list_entry (kiblnd_data.kib_connd_zombies.next,
3027 kib_conn_t, ibc_list);
3028 list_del(&conn->ibc_list);
3030 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3033 kiblnd_destroy_conn(conn);
3035 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3038 if (!list_empty (&kiblnd_data.kib_connd_conns)) {
3039 conn = list_entry (kiblnd_data.kib_connd_conns.next,
3040 kib_conn_t, ibc_list);
3041 list_del(&conn->ibc_list);
3043 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3046 kiblnd_disconnect_conn(conn);
3047 kiblnd_conn_decref(conn);
3049 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3052 /* careful with the jiffy wrap... */
3053 timeout = (int)(deadline - jiffies);
3057 int chunk = kiblnd_data.kib_peer_hash_size;
3059 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
3062 /* Time to check for RDMA timeouts on a few more
3063 * peers: I do checks every 'p' seconds on a
3064 * proportion of the peer table and I need to check
3065 * every connection 'n' times within a timeout
3066 * interval, to ensure I detect a timeout on any
3067 * connection within (n+1)/n times the timeout
3070 if (*kiblnd_tunables.kib_timeout > n * p)
3071 chunk = (chunk * n * p) /
3072 *kiblnd_tunables.kib_timeout;
3076 for (i = 0; i < chunk; i++) {
3077 kiblnd_check_conns(peer_index);
3078 peer_index = (peer_index + 1) %
3079 kiblnd_data.kib_peer_hash_size;
3083 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3089 /* Nothing to do for 'timeout' */
3090 set_current_state (TASK_INTERRUPTIBLE);
3091 add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3092 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3094 schedule_timeout (timeout);
3096 set_current_state (TASK_RUNNING);
3097 remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3098 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3101 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3103 kiblnd_thread_fini();
3108 kiblnd_qp_event(struct ib_event *event, void *arg)
3110 kib_conn_t *conn = arg;
3112 switch (event->event) {
3113 case IB_EVENT_COMM_EST:
3114 CDEBUG(D_NET, "%s established\n",
3115 libcfs_nid2str(conn->ibc_peer->ibp_nid));
3119 CERROR("%s: Async QP event type %d\n",
3120 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3126 kiblnd_complete (struct ib_wc *wc)
3128 switch (kiblnd_wreqid2type(wc->wr_id)) {
3132 case IBLND_WID_RDMA:
3133 /* We only get RDMA completion notification if it fails. All
3134 * subsequent work items, including the final SEND will fail
3135 * too. However we can't print out any more info about the
3136 * failing RDMA because 'tx' might be back on the idle list or
3137 * even reused already if we didn't manage to post all our work
3139 CNETERR("RDMA (tx: %p) failed: %d\n",
3140 kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3144 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3148 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3155 kiblnd_cq_completion (struct ib_cq *cq, void *arg)
3157 /* NB I'm not allowed to schedule this conn once its refcount has
3158 * reached 0. Since fundamentally I'm racing with scheduler threads
3159 * consuming my CQ I could be called after all completions have
3160 * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3161 * and this CQ is about to be destroyed so I NOOP. */
3162 kib_conn_t *conn = (kib_conn_t *)arg;
3163 unsigned long flags;
3165 LASSERT (cq == conn->ibc_cq);
3167 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3169 conn->ibc_ready = 1;
3171 if (!conn->ibc_scheduled &&
3172 (conn->ibc_nrx > 0 ||
3173 conn->ibc_nsends_posted > 0)) {
3174 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3175 conn->ibc_scheduled = 1;
3176 list_add_tail(&conn->ibc_sched_list,
3177 &kiblnd_data.kib_sched_conns);
3178 wake_up(&kiblnd_data.kib_sched_waitq);
3181 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3185 kiblnd_cq_event(struct ib_event *event, void *arg)
3187 kib_conn_t *conn = arg;
3189 CERROR("%s: async CQ event type %d\n",
3190 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3194 kiblnd_scheduler(void *arg)
3196 long id = (long)arg;
3199 unsigned long flags;
3206 snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id);
3207 cfs_daemonize(name);
3208 cfs_block_allsigs();
3210 init_waitqueue_entry(&wait, current);
3212 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3214 while (!kiblnd_data.kib_shutdown) {
3215 if (busy_loops++ >= IBLND_RESCHED) {
3216 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3222 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3227 if (!list_empty(&kiblnd_data.kib_sched_conns)) {
3228 conn = list_entry(kiblnd_data.kib_sched_conns.next,
3229 kib_conn_t, ibc_sched_list);
3230 /* take over kib_sched_conns' ref on conn... */
3231 LASSERT(conn->ibc_scheduled);
3232 list_del(&conn->ibc_sched_list);
3233 conn->ibc_ready = 0;
3235 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3238 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3240 rc = ib_req_notify_cq(conn->ibc_cq,
3243 CWARN("%s: ib_req_notify_cq failed: %d, "
3244 "closing connection\n",
3245 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3246 kiblnd_close_conn(conn, -EIO);
3247 kiblnd_conn_decref(conn);
3248 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3252 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3256 CWARN("%s: ib_poll_cq failed: %d, "
3257 "closing connection\n",
3258 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3259 kiblnd_close_conn(conn, -EIO);
3260 kiblnd_conn_decref(conn);
3261 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3265 spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3268 if (rc != 0 || conn->ibc_ready) {
3269 /* There may be another completion waiting; get
3270 * another scheduler to check while I handle
3272 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3273 list_add_tail(&conn->ibc_sched_list,
3274 &kiblnd_data.kib_sched_conns);
3275 wake_up(&kiblnd_data.kib_sched_waitq);
3277 conn->ibc_scheduled = 0;
3281 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3284 kiblnd_complete(&wc);
3286 spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3290 kiblnd_conn_decref(conn); /* ...drop my ref from above */
3297 set_current_state(TASK_INTERRUPTIBLE);
3298 add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
3299 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3304 remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
3305 set_current_state(TASK_RUNNING);
3306 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3309 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3311 kiblnd_thread_fini();