1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lnet/klnds/o2iblnd/o2iblnd_cb.c
38 * Author: Eric Barton <eric@bartonsoftware.com>
44 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
46 lnet_msg_t *lntmsg[2];
47 kib_net_t *net = ni->ni_data;
51 LASSERT (net != NULL);
52 LASSERT (!in_interrupt());
53 LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
54 LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
55 LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
56 LASSERT (tx->tx_pool != NULL);
58 kiblnd_unmap_tx(ni, tx);
60 /* tx may have up to 2 lnet msgs to finalise */
61 lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
62 lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
65 if (tx->tx_conn != NULL) {
66 LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
68 kiblnd_conn_decref(tx->tx_conn);
75 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
77 /* delay finalize until my descs have been freed */
78 for (i = 0; i < 2; i++) {
79 if (lntmsg[i] == NULL)
82 lnet_finalize(ni, lntmsg[i], rc);
87 kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
91 while (!list_empty (txlist)) {
92 tx = list_entry (txlist->next, kib_tx_t, tx_list);
94 list_del(&tx->tx_list);
97 tx->tx_status = status;
98 kiblnd_tx_done(ni, tx);
103 kiblnd_get_idle_tx (lnet_ni_t *ni)
105 kib_net_t *net = (kib_net_t *)ni->ni_data;
106 struct list_head *node;
109 node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset);
112 tx = container_of(node, kib_tx_t, tx_list);
114 LASSERT (tx->tx_nwrq == 0);
115 LASSERT (!tx->tx_queued);
116 LASSERT (tx->tx_sending == 0);
117 LASSERT (!tx->tx_waiting);
118 LASSERT (tx->tx_status == 0);
119 LASSERT (tx->tx_conn == NULL);
120 LASSERT (tx->tx_lntmsg[0] == NULL);
121 LASSERT (tx->tx_lntmsg[1] == NULL);
122 LASSERT (tx->tx_u.pmr == NULL);
123 LASSERT (tx->tx_nfrags == 0);
129 kiblnd_drop_rx (kib_rx_t *rx)
131 kib_conn_t *conn = rx->rx_conn;
134 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
135 LASSERT (conn->ibc_nrx > 0);
137 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
139 kiblnd_conn_decref(conn);
143 kiblnd_post_rx (kib_rx_t *rx, int credit)
145 kib_conn_t *conn = rx->rx_conn;
146 kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
147 struct ib_recv_wr *bad_wrq = NULL;
151 LASSERT (net != NULL);
152 LASSERT (!in_interrupt());
153 LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
154 credit == IBLND_POSTRX_PEER_CREDIT ||
155 credit == IBLND_POSTRX_RSRVD_CREDIT);
157 mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
158 LASSERT (mr != NULL);
160 rx->rx_sge.lkey = mr->lkey;
161 rx->rx_sge.addr = rx->rx_msgaddr;
162 rx->rx_sge.length = IBLND_MSG_SIZE;
164 rx->rx_wrq.next = NULL;
165 rx->rx_wrq.sg_list = &rx->rx_sge;
166 rx->rx_wrq.num_sge = 1;
167 rx->rx_wrq.wr_id = kiblnd_ptr2wreqid(rx, IBLND_WID_RX);
169 LASSERT (conn->ibc_state >= IBLND_CONN_INIT);
170 LASSERT (rx->rx_nob >= 0); /* not posted */
172 if (conn->ibc_state > IBLND_CONN_ESTABLISHED) {
173 kiblnd_drop_rx(rx); /* No more posts for this rx */
177 rx->rx_nob = -1; /* flag posted */
179 rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
181 CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
182 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
186 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
190 kiblnd_close_conn(conn, rc);
191 kiblnd_drop_rx(rx); /* No more posts for this rx */
195 if (credit == IBLND_POSTRX_NO_CREDIT)
198 spin_lock(&conn->ibc_lock);
199 if (credit == IBLND_POSTRX_PEER_CREDIT)
200 conn->ibc_outstanding_credits++;
202 conn->ibc_reserved_credits++;
203 spin_unlock(&conn->ibc_lock);
205 kiblnd_check_sends(conn);
210 kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
212 struct list_head *tmp;
214 list_for_each(tmp, &conn->ibc_active_txs) {
215 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
217 LASSERT (!tx->tx_queued);
218 LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
220 if (tx->tx_cookie != cookie)
223 if (tx->tx_waiting &&
224 tx->tx_msg->ibm_type == txtype)
227 CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
228 tx->tx_waiting ? "" : "NOT ",
229 tx->tx_msg->ibm_type, txtype);
235 kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
238 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
241 spin_lock(&conn->ibc_lock);
243 tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
245 spin_unlock(&conn->ibc_lock);
247 CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
248 txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
249 kiblnd_close_conn(conn, -EPROTO);
253 if (tx->tx_status == 0) { /* success so far */
254 if (status < 0) { /* failed? */
255 tx->tx_status = status;
256 } else if (txtype == IBLND_MSG_GET_REQ) {
257 lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
263 idle = !tx->tx_queued && (tx->tx_sending == 0);
265 list_del(&tx->tx_list);
267 spin_unlock(&conn->ibc_lock);
270 kiblnd_tx_done(ni, tx);
274 kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
276 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
277 kib_tx_t *tx = kiblnd_get_idle_tx(ni);
280 CERROR("Can't get tx for completion %x for %s\n",
281 type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
285 tx->tx_msg->ibm_u.completion.ibcm_status = status;
286 tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
287 kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
289 kiblnd_queue_tx(tx, conn);
293 kiblnd_handle_rx (kib_rx_t *rx)
295 kib_msg_t *msg = rx->rx_msg;
296 kib_conn_t *conn = rx->rx_conn;
297 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
298 int credits = msg->ibm_credits;
304 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
306 CDEBUG (D_NET, "Received %x[%d] from %s\n",
307 msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
310 /* Have I received credits that will let me send? */
311 spin_lock(&conn->ibc_lock);
313 if (conn->ibc_credits + credits >
314 IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
315 rc2 = conn->ibc_credits;
316 spin_unlock(&conn->ibc_lock);
318 CERROR("Bad credits from %s: %d + %d > %d\n",
319 libcfs_nid2str(conn->ibc_peer->ibp_nid),
320 rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
322 kiblnd_close_conn(conn, -EPROTO);
323 kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
327 conn->ibc_credits += credits;
329 spin_unlock(&conn->ibc_lock);
330 kiblnd_check_sends(conn);
333 switch (msg->ibm_type) {
335 CERROR("Bad IBLND message type %x from %s\n",
336 msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid));
337 post_credit = IBLND_POSTRX_NO_CREDIT;
342 if (IBLND_OOB_CAPABLE(conn->ibc_version))
343 post_credit = IBLND_POSTRX_NO_CREDIT;
345 post_credit = IBLND_POSTRX_PEER_CREDIT;
348 case IBLND_MSG_IMMEDIATE:
349 post_credit = IBLND_POSTRX_DONT_POST;
350 rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
351 msg->ibm_srcnid, rx, 0);
352 if (rc < 0) /* repost on error */
353 post_credit = IBLND_POSTRX_PEER_CREDIT;
356 case IBLND_MSG_PUT_REQ:
357 post_credit = IBLND_POSTRX_DONT_POST;
358 rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
359 msg->ibm_srcnid, rx, 1);
360 if (rc < 0) /* repost on error */
361 post_credit = IBLND_POSTRX_PEER_CREDIT;
364 case IBLND_MSG_PUT_NAK:
365 CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
366 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
367 kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
368 msg->ibm_u.completion.ibcm_status,
369 msg->ibm_u.completion.ibcm_cookie);
372 case IBLND_MSG_PUT_ACK:
373 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
375 spin_lock(&conn->ibc_lock);
376 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
377 msg->ibm_u.putack.ibpam_src_cookie);
379 list_del(&tx->tx_list);
380 spin_unlock(&conn->ibc_lock);
383 CERROR("Unmatched PUT_ACK from %s\n",
384 libcfs_nid2str(conn->ibc_peer->ibp_nid));
389 LASSERT (tx->tx_waiting);
390 /* CAVEAT EMPTOR: I could be racing with tx_complete, but...
391 * (a) I can overwrite tx_msg since my peer has received it!
392 * (b) tx_waiting set tells tx_complete() it's not done. */
394 tx->tx_nwrq = 0; /* overwrite PUT_REQ */
396 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
397 kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
398 &msg->ibm_u.putack.ibpam_rd,
399 msg->ibm_u.putack.ibpam_dst_cookie);
401 CERROR("Can't setup rdma for PUT to %s: %d\n",
402 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
404 spin_lock(&conn->ibc_lock);
405 tx->tx_waiting = 0; /* clear waiting and queue atomically */
406 kiblnd_queue_tx_locked(tx, conn);
407 spin_unlock(&conn->ibc_lock);
410 case IBLND_MSG_PUT_DONE:
411 post_credit = IBLND_POSTRX_PEER_CREDIT;
412 kiblnd_handle_completion(conn, IBLND_MSG_PUT_ACK,
413 msg->ibm_u.completion.ibcm_status,
414 msg->ibm_u.completion.ibcm_cookie);
417 case IBLND_MSG_GET_REQ:
418 post_credit = IBLND_POSTRX_DONT_POST;
419 rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
420 msg->ibm_srcnid, rx, 1);
421 if (rc < 0) /* repost on error */
422 post_credit = IBLND_POSTRX_PEER_CREDIT;
425 case IBLND_MSG_GET_DONE:
426 post_credit = IBLND_POSTRX_RSRVD_CREDIT;
427 kiblnd_handle_completion(conn, IBLND_MSG_GET_REQ,
428 msg->ibm_u.completion.ibcm_status,
429 msg->ibm_u.completion.ibcm_cookie);
433 if (rc < 0) /* protocol error */
434 kiblnd_close_conn(conn, rc);
436 if (post_credit != IBLND_POSTRX_DONT_POST)
437 kiblnd_post_rx(rx, post_credit);
441 kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
443 kib_msg_t *msg = rx->rx_msg;
444 kib_conn_t *conn = rx->rx_conn;
445 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
446 kib_net_t *net = ni->ni_data;
450 LASSERT (net != NULL);
451 LASSERT (rx->rx_nob < 0); /* was posted */
452 rx->rx_nob = 0; /* isn't now */
454 if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
457 if (status != IB_WC_SUCCESS) {
458 CNETERR("Rx from %s failed: %d\n",
459 libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
466 rc = kiblnd_unpack_msg(msg, rx->rx_nob);
468 CERROR ("Error %d unpacking rx from %s\n",
469 rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
473 if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
474 msg->ibm_dstnid != ni->ni_nid ||
475 msg->ibm_srcstamp != conn->ibc_incarnation ||
476 msg->ibm_dststamp != net->ibn_incarnation) {
477 CERROR ("Stale rx from %s\n",
478 libcfs_nid2str(conn->ibc_peer->ibp_nid));
483 /* set time last known alive */
484 kiblnd_peer_alive(conn->ibc_peer);
486 /* racing with connection establishment/teardown! */
488 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
489 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
492 write_lock_irqsave(g_lock, flags);
493 /* must check holding global lock to eliminate race */
494 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
495 list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
496 write_unlock_irqrestore(g_lock, flags);
499 write_unlock_irqrestore(g_lock, flags);
501 kiblnd_handle_rx(rx);
505 CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
506 kiblnd_close_conn(conn, err);
508 kiblnd_drop_rx(rx); /* Don't re-post rx. */
512 kiblnd_kvaddr_to_page (unsigned long vaddr)
516 if (vaddr >= VMALLOC_START &&
517 vaddr < VMALLOC_END) {
518 page = vmalloc_to_page ((void *)vaddr);
519 LASSERT (page != NULL);
522 #ifdef CONFIG_HIGHMEM
523 if (vaddr >= PKMAP_BASE &&
524 vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
525 /* No highmem pages only used for bulk (kiov) I/O */
526 CERROR("find page for address in highmem\n");
530 page = virt_to_page (vaddr);
531 LASSERT (page != NULL);
536 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
538 kib_dev_t *ibdev = net->ibn_dev;
539 __u64 *pages = tx->tx_pages;
545 for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
546 for (size = 0; size < rd->rd_frags[i].rf_nob;
547 size += ibdev->ibd_page_size) {
548 pages[npages ++] = (rd->rd_frags[i].rf_addr &
549 ibdev->ibd_page_mask) + size;
553 rc = kiblnd_fmr_pool_map(&net->ibn_fmr_ps, pages, npages, 0, &tx->tx_u.fmr);
555 CERROR ("Can't map %d pages: %d\n", npages, rc);
559 /* If rd is not tx_rd, it's going to get sent to a peer, who will need
561 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey :
562 tx->tx_u.fmr.fmr_pfmr->fmr->lkey;
563 rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
564 rd->rd_frags[0].rf_nob = nob;
571 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
576 iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
578 rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr);
580 CERROR("Failed to create MR by phybuf: %d\n", rc);
584 /* If rd is not tx_rd, it's going to get sent to a peer, who will need
586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey :
587 tx->tx_u.pmr->pmr_mr->lkey;
589 rd->rd_frags[0].rf_addr = iova;
590 rd->rd_frags[0].rf_nob = nob;
596 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
598 kib_net_t *net = ni->ni_data;
600 LASSERT (net != NULL);
602 if (net->ibn_with_fmr && tx->tx_u.fmr.fmr_pfmr != NULL) {
603 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status);
604 tx->tx_u.fmr.fmr_pfmr = NULL;
605 } else if (net->ibn_with_pmr && tx->tx_u.pmr != NULL) {
606 kiblnd_pmr_pool_unmap(tx->tx_u.pmr);
610 if (tx->tx_nfrags != 0) {
611 kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
612 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
618 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
619 kib_rdma_desc_t *rd, int nfrags)
621 kib_net_t *net = ni->ni_data;
622 struct ib_mr *mr = NULL;
626 /* If rd is not tx_rd, it's going to get sent to a peer and I'm the
628 tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
629 tx->tx_nfrags = nfrags;
632 kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
633 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
635 for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
636 rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
637 net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
638 rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
639 net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
640 nob += rd->rd_frags[i].rf_nob;
643 /* looking for pre-mapping MR */
644 mr = kiblnd_find_rd_dma_mr(net, rd);
646 /* found pre-mapping MR */
647 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
651 if (net->ibn_with_fmr)
652 return kiblnd_fmr_map_tx(net, tx, rd, nob);
653 else if (net->ibn_with_pmr)
654 return kiblnd_pmr_map_tx(net, tx, rd, nob);
661 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
662 unsigned int niov, struct iovec *iov, int offset, int nob)
664 kib_net_t *net = ni->ni_data;
666 struct scatterlist *sg;
673 LASSERT (net != NULL);
675 while (offset >= iov->iov_len) {
676 offset -= iov->iov_len;
686 vaddr = ((unsigned long)iov->iov_base) + offset;
687 page_offset = vaddr & (PAGE_SIZE - 1);
688 page = kiblnd_kvaddr_to_page(vaddr);
690 CERROR ("Can't find page\n");
694 fragnob = min((int)(iov->iov_len - offset), nob);
695 fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
697 sg_set_page(sg, page, fragnob, page_offset);
700 if (offset + fragnob < iov->iov_len) {
710 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
714 kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
715 int nkiov, lnet_kiov_t *kiov, int offset, int nob)
717 kib_net_t *net = ni->ni_data;
718 struct scatterlist *sg;
721 CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
725 LASSERT (net != NULL);
727 while (offset >= kiov->kiov_len) {
728 offset -= kiov->kiov_len;
738 fragnob = min((int)(kiov->kiov_len - offset), nob);
740 memset(sg, 0, sizeof(*sg));
741 sg_set_page(sg, kiov->kiov_page, fragnob,
742 kiov->kiov_offset + offset);
751 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
755 kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
757 kib_msg_t *msg = tx->tx_msg;
758 kib_peer_t *peer = conn->ibc_peer;
759 int ver = conn->ibc_version;
762 struct ib_send_wr *bad_wrq;
764 LASSERT (tx->tx_queued);
765 /* We rely on this for QP sizing */
766 LASSERT (tx->tx_nwrq > 0);
767 LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
769 LASSERT (credit == 0 || credit == 1);
770 LASSERT (conn->ibc_outstanding_credits >= 0);
771 LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
772 LASSERT (conn->ibc_credits >= 0);
773 LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
775 if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
776 /* tx completions outstanding... */
777 CDEBUG(D_NET, "%s: posted enough\n",
778 libcfs_nid2str(peer->ibp_nid));
782 if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
783 CDEBUG(D_NET, "%s: no credits\n",
784 libcfs_nid2str(peer->ibp_nid));
788 if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
789 conn->ibc_credits == 1 && /* last credit reserved for */
790 conn->ibc_outstanding_credits == 0) { /* giving back credits */
791 CDEBUG(D_NET, "%s: not using last credit\n",
792 libcfs_nid2str(peer->ibp_nid));
796 /* NB don't drop ibc_lock before bumping tx_sending */
797 list_del(&tx->tx_list);
800 if (msg->ibm_type == IBLND_MSG_NOOP &&
801 (!kiblnd_send_noop(conn) || /* redundant NOOP */
802 (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
803 conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
804 /* OK to drop when posted enough NOOPs, since
805 * kiblnd_check_sends will queue NOOP again when
806 * posted NOOPs complete */
807 spin_unlock(&conn->ibc_lock);
808 kiblnd_tx_done(peer->ibp_ni, tx);
809 spin_lock(&conn->ibc_lock);
810 CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
811 libcfs_nid2str(peer->ibp_nid),
812 conn->ibc_noops_posted);
816 kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
817 peer->ibp_nid, conn->ibc_incarnation);
819 conn->ibc_credits -= credit;
820 conn->ibc_outstanding_credits = 0;
821 conn->ibc_nsends_posted++;
822 if (msg->ibm_type == IBLND_MSG_NOOP)
823 conn->ibc_noops_posted++;
825 /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
826 * PUT. If so, it was first queued here as a PUT_REQ, sent and
827 * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
828 * and then re-queued here. It's (just) possible that
829 * tx_sending is non-zero if we've not done the tx_complete()
830 * from the first send; hence the ++ rather than = below. */
832 list_add(&tx->tx_list, &conn->ibc_active_txs);
834 /* I'm still holding ibc_lock! */
835 if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
838 rc = ib_post_send(conn->ibc_cmid->qp,
839 tx->tx_wrq, &bad_wrq);
840 conn->ibc_last_send = jiffies;
845 /* NB credits are transferred in the actual
846 * message, which can only be the last work item */
847 conn->ibc_credits += credit;
848 conn->ibc_outstanding_credits += msg->ibm_credits;
849 conn->ibc_nsends_posted--;
850 if (msg->ibm_type == IBLND_MSG_NOOP)
851 conn->ibc_noops_posted--;
857 done = (tx->tx_sending == 0);
859 list_del(&tx->tx_list);
861 spin_unlock(&conn->ibc_lock);
863 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
864 CERROR("Error %d posting transmit to %s\n",
865 rc, libcfs_nid2str(peer->ibp_nid));
867 CDEBUG(D_NET, "Error %d posting transmit to %s\n",
868 rc, libcfs_nid2str(peer->ibp_nid));
870 kiblnd_close_conn(conn, rc);
873 kiblnd_tx_done(peer->ibp_ni, tx);
875 spin_lock(&conn->ibc_lock);
881 kiblnd_check_sends (kib_conn_t *conn)
883 int ver = conn->ibc_version;
884 lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
887 /* Don't send anything until after the connection is established */
888 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
889 CDEBUG(D_NET, "%s too soon\n",
890 libcfs_nid2str(conn->ibc_peer->ibp_nid));
894 spin_lock(&conn->ibc_lock);
896 LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
897 LASSERT (!IBLND_OOB_CAPABLE(ver) ||
898 conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
899 LASSERT (conn->ibc_reserved_credits >= 0);
901 while (conn->ibc_reserved_credits > 0 &&
902 !list_empty(&conn->ibc_tx_queue_rsrvd)) {
903 tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
905 list_del(&tx->tx_list);
906 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
907 conn->ibc_reserved_credits--;
910 if (kiblnd_send_noop(conn)) {
911 spin_unlock(&conn->ibc_lock);
913 tx = kiblnd_get_idle_tx(ni);
915 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
917 spin_lock(&conn->ibc_lock);
919 kiblnd_queue_tx_locked(tx, conn);
922 kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */
927 if (!list_empty(&conn->ibc_tx_queue_nocred)) {
929 tx = list_entry(conn->ibc_tx_queue_nocred.next,
931 } else if (!list_empty(&conn->ibc_tx_queue)) {
933 tx = list_entry(conn->ibc_tx_queue.next,
938 if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
942 spin_unlock(&conn->ibc_lock);
944 kiblnd_conn_decref(conn); /* ...until here */
948 kiblnd_tx_complete (kib_tx_t *tx, int status)
950 int failed = (status != IB_WC_SUCCESS);
951 kib_conn_t *conn = tx->tx_conn;
954 LASSERT (tx->tx_sending > 0);
957 if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
958 CNETERR("Tx -> %s cookie "LPX64
959 " sending %d waiting %d: failed %d\n",
960 libcfs_nid2str(conn->ibc_peer->ibp_nid),
961 tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
964 kiblnd_close_conn(conn, -EIO);
966 kiblnd_peer_alive(conn->ibc_peer);
969 spin_lock(&conn->ibc_lock);
971 /* I could be racing with rdma completion. Whoever makes 'tx' idle
972 * gets to free it, which also drops its ref on 'conn'. */
975 conn->ibc_nsends_posted--;
976 if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP)
977 conn->ibc_noops_posted--;
980 tx->tx_waiting = 0; /* don't wait for peer */
981 tx->tx_status = -EIO;
984 idle = (tx->tx_sending == 0) && /* This is the final callback */
985 !tx->tx_waiting && /* Not waiting for peer */
986 !tx->tx_queued; /* Not re-queued (PUT_DONE) */
988 list_del(&tx->tx_list);
990 kiblnd_conn_addref(conn); /* 1 ref for me.... */
992 spin_unlock(&conn->ibc_lock);
995 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
997 kiblnd_check_sends(conn);
999 kiblnd_conn_decref(conn); /* ...until here */
1003 kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
1005 kib_net_t *net = ni->ni_data;
1006 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
1007 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
1008 int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
1011 LASSERT (net != NULL);
1012 LASSERT (tx->tx_nwrq >= 0);
1013 LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
1014 LASSERT (nob <= IBLND_MSG_SIZE);
1016 kiblnd_init_msg(tx->tx_msg, type, body_nob);
1018 mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
1019 LASSERT (mr != NULL);
1021 sge->lkey = mr->lkey;
1022 sge->addr = tx->tx_msgaddr;
1025 memset(wrq, 0, sizeof(*wrq));
1028 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
1031 wrq->opcode = IB_WR_SEND;
1032 wrq->send_flags = IB_SEND_SIGNALED;
1038 kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
1039 int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
1041 kib_msg_t *ibmsg = tx->tx_msg;
1042 kib_rdma_desc_t *srcrd = tx->tx_rd;
1043 struct ib_sge *sge = &tx->tx_sge[0];
1044 struct ib_send_wr *wrq = &tx->tx_wrq[0];
1050 LASSERT (!in_interrupt());
1051 LASSERT (tx->tx_nwrq == 0);
1052 LASSERT (type == IBLND_MSG_GET_DONE ||
1053 type == IBLND_MSG_PUT_DONE);
1055 srcidx = dstidx = 0;
1058 if (srcidx >= srcrd->rd_nfrags) {
1059 CERROR("Src buffer exhausted: %d frags\n", srcidx);
1064 if (dstidx == dstrd->rd_nfrags) {
1065 CERROR("Dst buffer exhausted: %d frags\n", dstidx);
1070 if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
1071 CERROR("RDMA too fragmented for %s (%d): "
1072 "%d/%d src %d/%d dst frags\n",
1073 libcfs_nid2str(conn->ibc_peer->ibp_nid),
1074 IBLND_RDMA_FRAGS(conn->ibc_version),
1075 srcidx, srcrd->rd_nfrags,
1076 dstidx, dstrd->rd_nfrags);
1081 wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
1082 kiblnd_rd_frag_size(dstrd, dstidx)), resid);
1084 sge = &tx->tx_sge[tx->tx_nwrq];
1085 sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
1086 sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
1087 sge->length = wrknob;
1089 wrq = &tx->tx_wrq[tx->tx_nwrq];
1091 wrq->next = wrq + 1;
1092 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
1095 wrq->opcode = IB_WR_RDMA_WRITE;
1096 wrq->send_flags = 0;
1098 wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
1099 wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
1101 srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
1102 dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
1111 if (rc < 0) /* no RDMA if completing with failure */
1114 ibmsg->ibm_u.completion.ibcm_status = rc;
1115 ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
1116 kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
1117 type, sizeof (kib_completion_msg_t));
1123 kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
1125 struct list_head *q;
1127 LASSERT (tx->tx_nwrq > 0); /* work items set up */
1128 LASSERT (!tx->tx_queued); /* not queued for sending already */
1129 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1132 tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
1134 if (tx->tx_conn == NULL) {
1135 kiblnd_conn_addref(conn);
1137 LASSERT (tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE);
1139 /* PUT_DONE first attached to conn as a PUT_REQ */
1140 LASSERT (tx->tx_conn == conn);
1141 LASSERT (tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
1144 switch (tx->tx_msg->ibm_type) {
1148 case IBLND_MSG_PUT_REQ:
1149 case IBLND_MSG_GET_REQ:
1150 q = &conn->ibc_tx_queue_rsrvd;
1153 case IBLND_MSG_PUT_NAK:
1154 case IBLND_MSG_PUT_ACK:
1155 case IBLND_MSG_PUT_DONE:
1156 case IBLND_MSG_GET_DONE:
1157 q = &conn->ibc_tx_queue_nocred;
1160 case IBLND_MSG_NOOP:
1161 if (IBLND_OOB_CAPABLE(conn->ibc_version))
1162 q = &conn->ibc_tx_queue_nocred;
1164 q = &conn->ibc_tx_queue;
1167 case IBLND_MSG_IMMEDIATE:
1168 q = &conn->ibc_tx_queue;
1172 list_add_tail(&tx->tx_list, q);
1176 kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
1178 spin_lock(&conn->ibc_lock);
1179 kiblnd_queue_tx_locked(tx, conn);
1180 spin_unlock(&conn->ibc_lock);
1182 kiblnd_check_sends(conn);
1186 kiblnd_connect_peer (kib_peer_t *peer)
1188 struct rdma_cm_id *cmid;
1190 kib_net_t *net = peer->ibp_ni->ni_data;
1191 struct sockaddr_in srcaddr;
1192 struct sockaddr_in dstaddr;
1195 LASSERT (net != NULL);
1196 LASSERT (peer->ibp_connecting > 0);
1198 cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
1200 CERROR("Can't create CMID for %s: %ld\n",
1201 libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
1207 memset(&srcaddr, 0, sizeof(srcaddr));
1208 srcaddr.sin_family = AF_INET;
1209 srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip);
1211 memset(&dstaddr, 0, sizeof(dstaddr));
1212 dstaddr.sin_family = AF_INET;
1213 dstaddr.sin_port = htons(*kiblnd_tunables.kib_service);
1214 dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid));
1216 kiblnd_peer_addref(peer); /* cmid's ref */
1218 rc = rdma_resolve_addr(cmid,
1219 (struct sockaddr *)&srcaddr,
1220 (struct sockaddr *)&dstaddr,
1221 *kiblnd_tunables.kib_timeout * 1000);
1223 LASSERT (cmid->device != NULL);
1224 CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
1225 libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
1226 HIPQUAD(dev->ibd_ifip), cmid->device->name);
1230 /* Can't initiate address resolution: */
1231 CERROR("Can't resolve addr for %s: %d\n",
1232 libcfs_nid2str(peer->ibp_nid), rc);
1234 kiblnd_peer_decref(peer); /* cmid's ref */
1235 rdma_destroy_id(cmid);
1237 kiblnd_peer_connect_failed(peer, 1, rc);
1241 kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid)
1246 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
1247 unsigned long flags;
1250 /* If I get here, I've committed to send, so I complete the tx with
1251 * failure on any problems */
1253 LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
1254 LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
1256 /* First time, just use a read lock since I expect to find my peer
1258 read_lock_irqsave(g_lock, flags);
1260 peer = kiblnd_find_peer_locked(nid);
1261 if (peer != NULL && !list_empty(&peer->ibp_conns)) {
1262 /* Found a peer with an established connection */
1263 conn = kiblnd_get_conn_locked(peer);
1264 kiblnd_conn_addref(conn); /* 1 ref for me... */
1266 read_unlock_irqrestore(g_lock, flags);
1269 kiblnd_queue_tx(tx, conn);
1270 kiblnd_conn_decref(conn); /* ...to here */
1274 read_unlock(g_lock);
1275 /* Re-try with a write lock */
1278 peer = kiblnd_find_peer_locked(nid);
1280 if (list_empty(&peer->ibp_conns)) {
1281 /* found a peer, but it's still connecting... */
1282 LASSERT (peer->ibp_connecting != 0 ||
1283 peer->ibp_accepting != 0);
1285 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1286 write_unlock_irqrestore(g_lock, flags);
1288 conn = kiblnd_get_conn_locked(peer);
1289 kiblnd_conn_addref(conn); /* 1 ref for me... */
1291 write_unlock_irqrestore(g_lock, flags);
1294 kiblnd_queue_tx(tx, conn);
1295 kiblnd_conn_decref(conn); /* ...to here */
1300 write_unlock_irqrestore(g_lock, flags);
1302 /* Allocate a peer ready to add to the peer table and retry */
1303 rc = kiblnd_create_peer(ni, &peer, nid);
1305 CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
1307 tx->tx_status = -EHOSTUNREACH;
1309 kiblnd_tx_done(ni, tx);
1314 write_lock_irqsave(g_lock, flags);
1316 peer2 = kiblnd_find_peer_locked(nid);
1317 if (peer2 != NULL) {
1318 if (list_empty(&peer2->ibp_conns)) {
1319 /* found a peer, but it's still connecting... */
1320 LASSERT (peer2->ibp_connecting != 0 ||
1321 peer2->ibp_accepting != 0);
1323 list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
1324 write_unlock_irqrestore(g_lock, flags);
1326 conn = kiblnd_get_conn_locked(peer2);
1327 kiblnd_conn_addref(conn); /* 1 ref for me... */
1329 write_unlock_irqrestore(g_lock, flags);
1332 kiblnd_queue_tx(tx, conn);
1333 kiblnd_conn_decref(conn); /* ...to here */
1336 kiblnd_peer_decref(peer);
1340 /* Brand new peer */
1341 LASSERT (peer->ibp_connecting == 0);
1342 peer->ibp_connecting = 1;
1344 /* always called with a ref on ni, which prevents ni being shutdown */
1345 LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
1348 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
1350 kiblnd_peer_addref(peer);
1351 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
1353 write_unlock_irqrestore(g_lock, flags);
1355 kiblnd_connect_peer(peer);
1356 kiblnd_peer_decref(peer);
1360 kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
1362 lnet_hdr_t *hdr = &lntmsg->msg_hdr;
1363 int type = lntmsg->msg_type;
1364 lnet_process_id_t target = lntmsg->msg_target;
1365 int target_is_router = lntmsg->msg_target_is_router;
1366 int routing = lntmsg->msg_routing;
1367 unsigned int payload_niov = lntmsg->msg_niov;
1368 struct iovec *payload_iov = lntmsg->msg_iov;
1369 lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
1370 unsigned int payload_offset = lntmsg->msg_offset;
1371 unsigned int payload_nob = lntmsg->msg_len;
1377 /* NB 'private' is different depending on what we're sending.... */
1379 CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
1380 payload_nob, payload_niov, libcfs_id2str(target));
1382 LASSERT (payload_nob == 0 || payload_niov > 0);
1383 LASSERT (payload_niov <= LNET_MAX_IOV);
1385 /* Thread context */
1386 LASSERT (!in_interrupt());
1387 /* payload is either all vaddrs or all pages */
1388 LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
1396 LASSERT (payload_nob == 0);
1400 if (routing || target_is_router)
1401 break; /* send IMMEDIATE */
1403 /* is the REPLY message too small for RDMA? */
1404 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
1405 if (nob <= IBLND_MSG_SIZE)
1406 break; /* send IMMEDIATE */
1408 tx = kiblnd_get_idle_tx(ni);
1410 CERROR("Can't allocate txd for GET to %s: \n",
1411 libcfs_nid2str(target.nid));
1417 if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
1418 rc = kiblnd_setup_rd_iov(ni, tx,
1419 &ibmsg->ibm_u.get.ibgm_rd,
1420 lntmsg->msg_md->md_niov,
1421 lntmsg->msg_md->md_iov.iov,
1422 0, lntmsg->msg_md->md_length);
1424 rc = kiblnd_setup_rd_kiov(ni, tx,
1425 &ibmsg->ibm_u.get.ibgm_rd,
1426 lntmsg->msg_md->md_niov,
1427 lntmsg->msg_md->md_iov.kiov,
1428 0, lntmsg->msg_md->md_length);
1430 CERROR("Can't setup GET sink for %s: %d\n",
1431 libcfs_nid2str(target.nid), rc);
1432 kiblnd_tx_done(ni, tx);
1436 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
1437 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
1438 ibmsg->ibm_u.get.ibgm_hdr = *hdr;
1440 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
1442 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
1443 if (tx->tx_lntmsg[1] == NULL) {
1444 CERROR("Can't create reply for GET -> %s\n",
1445 libcfs_nid2str(target.nid));
1446 kiblnd_tx_done(ni, tx);
1450 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
1451 tx->tx_waiting = 1; /* waiting for GET_DONE */
1452 kiblnd_launch_tx(ni, tx, target.nid);
1455 case LNET_MSG_REPLY:
1457 /* Is the payload small enough not to need RDMA? */
1458 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
1459 if (nob <= IBLND_MSG_SIZE)
1460 break; /* send IMMEDIATE */
1462 tx = kiblnd_get_idle_tx(ni);
1464 CERROR("Can't allocate %s txd for %s\n",
1465 type == LNET_MSG_PUT ? "PUT" : "REPLY",
1466 libcfs_nid2str(target.nid));
1470 if (payload_kiov == NULL)
1471 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1472 payload_niov, payload_iov,
1473 payload_offset, payload_nob);
1475 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1476 payload_niov, payload_kiov,
1477 payload_offset, payload_nob);
1479 CERROR("Can't setup PUT src for %s: %d\n",
1480 libcfs_nid2str(target.nid), rc);
1481 kiblnd_tx_done(ni, tx);
1486 ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
1487 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
1488 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
1490 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1491 tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
1492 kiblnd_launch_tx(ni, tx, target.nid);
1496 /* send IMMEDIATE */
1498 LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
1501 tx = kiblnd_get_idle_tx(ni);
1503 CERROR ("Can't send %d to %s: tx descs exhausted\n",
1504 type, libcfs_nid2str(target.nid));
1509 ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
1511 if (payload_kiov != NULL)
1512 lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
1513 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1514 payload_niov, payload_kiov,
1515 payload_offset, payload_nob);
1517 lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
1518 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1519 payload_niov, payload_iov,
1520 payload_offset, payload_nob);
1522 nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
1523 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
1525 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1526 kiblnd_launch_tx(ni, tx, target.nid);
1531 kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
1533 lnet_process_id_t target = lntmsg->msg_target;
1534 unsigned int niov = lntmsg->msg_niov;
1535 struct iovec *iov = lntmsg->msg_iov;
1536 lnet_kiov_t *kiov = lntmsg->msg_kiov;
1537 unsigned int offset = lntmsg->msg_offset;
1538 unsigned int nob = lntmsg->msg_len;
1542 tx = kiblnd_get_idle_tx(ni);
1544 CERROR("Can't get tx for REPLY to %s\n",
1545 libcfs_nid2str(target.nid));
1551 else if (kiov == NULL)
1552 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
1553 niov, iov, offset, nob);
1555 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
1556 niov, kiov, offset, nob);
1559 CERROR("Can't setup GET src for %s: %d\n",
1560 libcfs_nid2str(target.nid), rc);
1564 rc = kiblnd_init_rdma(rx->rx_conn, tx,
1565 IBLND_MSG_GET_DONE, nob,
1566 &rx->rx_msg->ibm_u.get.ibgm_rd,
1567 rx->rx_msg->ibm_u.get.ibgm_cookie);
1569 CERROR("Can't setup rdma for GET from %s: %d\n",
1570 libcfs_nid2str(target.nid), rc);
1575 /* No RDMA: local completion may happen now! */
1576 lnet_finalize(ni, lntmsg, 0);
1578 /* RDMA: lnet_finalize(lntmsg) when it
1580 tx->tx_lntmsg[0] = lntmsg;
1583 kiblnd_queue_tx(tx, rx->rx_conn);
1587 kiblnd_tx_done(ni, tx);
1589 lnet_finalize(ni, lntmsg, -EIO);
1593 kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
1594 unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
1595 unsigned int offset, unsigned int mlen, unsigned int rlen)
1597 kib_rx_t *rx = private;
1598 kib_msg_t *rxmsg = rx->rx_msg;
1599 kib_conn_t *conn = rx->rx_conn;
1603 int post_credit = IBLND_POSTRX_PEER_CREDIT;
1606 LASSERT (mlen <= rlen);
1607 LASSERT (!in_interrupt());
1608 /* Either all pages or all vaddrs */
1609 LASSERT (!(kiov != NULL && iov != NULL));
1611 switch (rxmsg->ibm_type) {
1615 case IBLND_MSG_IMMEDIATE:
1616 nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
1617 if (nob > rx->rx_nob) {
1618 CERROR ("Immediate message from %s too big: %d(%d)\n",
1619 libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
1626 lnet_copy_flat2kiov(niov, kiov, offset,
1627 IBLND_MSG_SIZE, rxmsg,
1628 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1631 lnet_copy_flat2iov(niov, iov, offset,
1632 IBLND_MSG_SIZE, rxmsg,
1633 offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
1635 lnet_finalize (ni, lntmsg, 0);
1638 case IBLND_MSG_PUT_REQ:
1640 lnet_finalize(ni, lntmsg, 0);
1641 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
1642 rxmsg->ibm_u.putreq.ibprm_cookie);
1646 tx = kiblnd_get_idle_tx(ni);
1648 CERROR("Can't allocate tx for %s\n",
1649 libcfs_nid2str(conn->ibc_peer->ibp_nid));
1650 /* Not replying will break the connection */
1657 rc = kiblnd_setup_rd_iov(ni, tx,
1658 &txmsg->ibm_u.putack.ibpam_rd,
1659 niov, iov, offset, mlen);
1661 rc = kiblnd_setup_rd_kiov(ni, tx,
1662 &txmsg->ibm_u.putack.ibpam_rd,
1663 niov, kiov, offset, mlen);
1665 CERROR("Can't setup PUT sink for %s: %d\n",
1666 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
1667 kiblnd_tx_done(ni, tx);
1668 /* tell peer it's over */
1669 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
1670 rxmsg->ibm_u.putreq.ibprm_cookie);
1674 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
1675 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
1676 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
1678 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
1680 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
1681 tx->tx_waiting = 1; /* waiting for PUT_DONE */
1682 kiblnd_queue_tx(tx, conn);
1684 /* reposted buffer reserved for PUT_DONE */
1685 post_credit = IBLND_POSTRX_NO_CREDIT;
1688 case IBLND_MSG_GET_REQ:
1689 if (lntmsg != NULL) {
1690 /* Optimized GET; RDMA lntmsg's payload */
1691 kiblnd_reply(ni, rx, lntmsg);
1693 /* GET didn't match anything */
1694 kiblnd_send_completion(rx->rx_conn, IBLND_MSG_GET_DONE,
1696 rxmsg->ibm_u.get.ibgm_cookie);
1701 kiblnd_post_rx(rx, post_credit);
1706 kiblnd_thread_start (int (*fn)(void *arg), void *arg)
1708 long pid = kernel_thread (fn, arg, 0);
1713 atomic_inc (&kiblnd_data.kib_nthreads);
1718 kiblnd_thread_fini (void)
1720 atomic_dec (&kiblnd_data.kib_nthreads);
1724 kiblnd_peer_alive (kib_peer_t *peer)
1726 /* This is racy, but everyone's only writing cfs_time_current() */
1727 peer->ibp_last_alive = cfs_time_current();
1732 kiblnd_peer_notify (kib_peer_t *peer)
1735 cfs_time_t last_alive = 0;
1736 unsigned long flags;
1738 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1740 if (list_empty(&peer->ibp_conns) &&
1741 peer->ibp_accepting == 0 &&
1742 peer->ibp_connecting == 0 &&
1743 peer->ibp_error != 0) {
1744 error = peer->ibp_error;
1745 peer->ibp_error = 0;
1747 last_alive = peer->ibp_last_alive;
1750 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1753 lnet_notify(peer->ibp_ni,
1754 peer->ibp_nid, 0, last_alive);
1758 kiblnd_close_conn_locked (kib_conn_t *conn, int error)
1760 /* This just does the immediate housekeeping. 'error' is zero for a
1761 * normal shutdown which can happen only after the connection has been
1762 * established. If the connection is established, schedule the
1763 * connection to be finished off by the connd. Otherwise the connd is
1764 * already dealing with it (either to set it up or tear it down).
1765 * Caller holds kib_global_lock exclusively in irq context */
1766 unsigned long flags;
1767 kib_peer_t *peer = conn->ibc_peer;
1769 LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1771 if (error != 0 && conn->ibc_comms_error == 0)
1772 conn->ibc_comms_error = error;
1774 if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
1775 return; /* already being handled */
1778 list_empty(&conn->ibc_tx_queue) &&
1779 list_empty(&conn->ibc_tx_queue_rsrvd) &&
1780 list_empty(&conn->ibc_tx_queue_nocred) &&
1781 list_empty(&conn->ibc_active_txs)) {
1782 CDEBUG(D_NET, "closing conn to %s\n",
1783 libcfs_nid2str(peer->ibp_nid));
1785 CNETERR("Closing conn to %s: error %d%s%s%s%s\n",
1786 libcfs_nid2str(peer->ibp_nid), error,
1787 list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
1788 list_empty(&conn->ibc_tx_queue_rsrvd) ?
1789 "" : "(sending_rsrvd)",
1790 list_empty(&conn->ibc_tx_queue_nocred) ?
1791 "" : "(sending_nocred)",
1792 list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
1795 list_del(&conn->ibc_list);
1796 /* connd (see below) takes over ibc_list's ref */
1798 if (list_empty (&peer->ibp_conns) && /* no more conns */
1799 kiblnd_peer_active(peer)) { /* still in peer table */
1800 kiblnd_unlink_peer_locked(peer);
1802 /* set/clear error on last conn */
1803 peer->ibp_error = conn->ibc_comms_error;
1806 kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
1808 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
1810 list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
1811 wake_up (&kiblnd_data.kib_connd_waitq);
1813 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
1817 kiblnd_close_conn (kib_conn_t *conn, int error)
1819 unsigned long flags;
1821 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1823 kiblnd_close_conn_locked(conn, error);
1825 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1829 kiblnd_handle_early_rxs(kib_conn_t *conn)
1831 unsigned long flags;
1834 LASSERT (!in_interrupt());
1835 LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
1837 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1838 while (!list_empty(&conn->ibc_early_rxs)) {
1839 rx = list_entry(conn->ibc_early_rxs.next,
1841 list_del(&rx->rx_list);
1842 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1844 kiblnd_handle_rx(rx);
1846 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1848 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1852 kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
1854 LIST_HEAD (zombies);
1855 struct list_head *tmp;
1856 struct list_head *nxt;
1859 spin_lock(&conn->ibc_lock);
1861 list_for_each_safe (tmp, nxt, txs) {
1862 tx = list_entry (tmp, kib_tx_t, tx_list);
1864 if (txs == &conn->ibc_active_txs) {
1865 LASSERT (!tx->tx_queued);
1866 LASSERT (tx->tx_waiting ||
1867 tx->tx_sending != 0);
1869 LASSERT (tx->tx_queued);
1872 tx->tx_status = -ECONNABORTED;
1875 if (tx->tx_sending == 0) {
1877 list_del (&tx->tx_list);
1878 list_add (&tx->tx_list, &zombies);
1882 spin_unlock(&conn->ibc_lock);
1884 kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
1885 &zombies, -ECONNABORTED);
1889 kiblnd_finalise_conn (kib_conn_t *conn)
1891 LASSERT (!in_interrupt());
1892 LASSERT (conn->ibc_state > IBLND_CONN_INIT);
1894 kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
1896 /* abort_receives moves QP state to IB_QPS_ERR. This is only required
1897 * for connections that didn't get as far as being connected, because
1898 * rdma_disconnect() does this for free. */
1899 kiblnd_abort_receives(conn);
1901 /* Complete all tx descs not waiting for sends to complete.
1902 * NB we should be safe from RDMA now that the QP has changed state */
1904 kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
1905 kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
1906 kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
1907 kiblnd_abort_txs(conn, &conn->ibc_active_txs);
1909 kiblnd_handle_early_rxs(conn);
1913 kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
1915 LIST_HEAD (zombies);
1916 unsigned long flags;
1918 LASSERT (error != 0);
1919 LASSERT (!in_interrupt());
1921 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1924 LASSERT (peer->ibp_connecting > 0);
1925 peer->ibp_connecting--;
1927 LASSERT (peer->ibp_accepting > 0);
1928 peer->ibp_accepting--;
1931 if (peer->ibp_connecting != 0 ||
1932 peer->ibp_accepting != 0) {
1933 /* another connection attempt under way... */
1934 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1938 if (list_empty(&peer->ibp_conns)) {
1939 /* Take peer's blocked transmits to complete with error */
1940 list_add(&zombies, &peer->ibp_tx_queue);
1941 list_del_init(&peer->ibp_tx_queue);
1943 if (kiblnd_peer_active(peer))
1944 kiblnd_unlink_peer_locked(peer);
1946 peer->ibp_error = error;
1948 /* Can't have blocked transmits if there are connections */
1949 LASSERT (list_empty(&peer->ibp_tx_queue));
1952 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
1954 kiblnd_peer_notify(peer);
1956 if (list_empty (&zombies))
1959 CNETERR("Deleting messages for %s: connection failed\n",
1960 libcfs_nid2str(peer->ibp_nid));
1962 kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
1966 kiblnd_connreq_done(kib_conn_t *conn, int status)
1968 kib_peer_t *peer = conn->ibc_peer;
1970 struct list_head txs;
1971 unsigned long flags;
1974 active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
1976 CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
1977 libcfs_nid2str(peer->ibp_nid), active,
1978 conn->ibc_version, status);
1980 LASSERT (!in_interrupt());
1981 LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
1982 peer->ibp_connecting > 0) ||
1983 (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
1984 peer->ibp_accepting > 0));
1986 LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
1987 conn->ibc_connvars = NULL;
1990 /* failed to establish connection */
1991 kiblnd_peer_connect_failed(peer, active, status);
1992 kiblnd_finalise_conn(conn);
1996 /* connection established */
1997 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
1999 conn->ibc_last_send = jiffies;
2000 kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
2001 kiblnd_peer_alive(peer);
2003 /* Add conn to peer's list and nuke any dangling conns from a different
2004 * peer instance... */
2005 kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
2006 list_add(&conn->ibc_list, &peer->ibp_conns);
2008 peer->ibp_connecting--;
2010 peer->ibp_accepting--;
2012 if (peer->ibp_version == 0) {
2013 peer->ibp_version = conn->ibc_version;
2014 peer->ibp_incarnation = conn->ibc_incarnation;
2017 if (peer->ibp_version != conn->ibc_version ||
2018 peer->ibp_incarnation != conn->ibc_incarnation) {
2019 kiblnd_close_stale_conns_locked(peer, conn->ibc_version,
2020 conn->ibc_incarnation);
2021 peer->ibp_version = conn->ibc_version;
2022 peer->ibp_incarnation = conn->ibc_incarnation;
2025 /* grab pending txs while I have the lock */
2026 list_add(&txs, &peer->ibp_tx_queue);
2027 list_del_init(&peer->ibp_tx_queue);
2029 if (!kiblnd_peer_active(peer) || /* peer has been deleted */
2030 conn->ibc_comms_error != 0) { /* error has happened already */
2031 lnet_ni_t *ni = peer->ibp_ni;
2033 /* start to shut down connection */
2034 kiblnd_close_conn_locked(conn, -ECONNABORTED);
2035 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2037 kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
2042 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2044 /* Schedule blocked txs */
2045 spin_lock (&conn->ibc_lock);
2046 while (!list_empty (&txs)) {
2047 tx = list_entry (txs.next, kib_tx_t, tx_list);
2048 list_del(&tx->tx_list);
2050 kiblnd_queue_tx_locked(tx, conn);
2052 spin_unlock (&conn->ibc_lock);
2054 kiblnd_check_sends(conn);
2056 /* schedule blocked rxs */
2057 kiblnd_handle_early_rxs(conn);
2061 kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
2065 rc = rdma_reject(cmid, rej, sizeof(*rej));
2068 CWARN("Error %d sending reject\n", rc);
2072 kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
2074 rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
2075 kib_msg_t *reqmsg = priv;
2081 lnet_ni_t *ni = NULL;
2082 kib_net_t *net = NULL;
2084 struct rdma_conn_param cp;
2086 int version = IBLND_MSG_VERSION;
2087 unsigned long flags;
2090 LASSERT (!in_interrupt());
2092 /* cmid inherits 'context' from the corresponding listener id */
2093 ibdev = (kib_dev_t *)cmid->context;
2094 LASSERT (ibdev != NULL);
2096 memset(&rej, 0, sizeof(rej));
2097 rej.ibr_magic = IBLND_MSG_MAGIC;
2098 rej.ibr_why = IBLND_REJECT_FATAL;
2099 rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
2101 if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
2102 CERROR("Short connection request\n");
2106 /* Future protocol version compatibility support! If the
2107 * o2iblnd-specific protocol changes, or when LNET unifies
2108 * protocols over all LNDs, the initial connection will
2109 * negotiate a protocol version. I trap this here to avoid
2110 * console errors; the reject tells the peer which protocol I
2112 if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
2113 reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
2115 if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
2116 reqmsg->ibm_version != IBLND_MSG_VERSION &&
2117 reqmsg->ibm_version != IBLND_MSG_VERSION_1)
2119 if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
2120 reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
2121 reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
2124 rc = kiblnd_unpack_msg(reqmsg, priv_nob);
2126 CERROR("Can't parse connection request: %d\n", rc);
2130 nid = reqmsg->ibm_srcnid;
2131 ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
2134 net = (kib_net_t *)ni->ni_data;
2135 rej.ibr_incarnation = net->ibn_incarnation;
2138 if (ni == NULL || /* no matching net */
2139 ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
2140 net->ibn_dev != ibdev) { /* wrong device */
2141 CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): "
2142 "bad dst nid %s\n", libcfs_nid2str(nid),
2143 ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
2144 ibdev->ibd_ifname, ibdev->ibd_nnets,
2145 HIPQUAD(ibdev->ibd_ifip),
2146 libcfs_nid2str(reqmsg->ibm_dstnid));
2151 /* check time stamp as soon as possible */
2152 if (reqmsg->ibm_dststamp != 0 &&
2153 reqmsg->ibm_dststamp != net->ibn_incarnation) {
2154 CWARN("Stale connection request\n");
2155 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2159 /* I can accept peer's version */
2160 version = reqmsg->ibm_version;
2162 if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
2163 CERROR("Unexpected connreq msg type: %x from %s\n",
2164 reqmsg->ibm_type, libcfs_nid2str(nid));
2168 if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
2169 IBLND_MSG_QUEUE_SIZE(version)) {
2170 CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
2171 libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
2172 IBLND_MSG_QUEUE_SIZE(version));
2174 if (version == IBLND_MSG_VERSION)
2175 rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
2180 if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
2181 IBLND_RDMA_FRAGS(version)) {
2182 CERROR("Can't accept %s(version %x): "
2183 "incompatible max_frags %d (%d wanted)\n",
2184 libcfs_nid2str(nid), version,
2185 reqmsg->ibm_u.connparams.ibcp_max_frags,
2186 IBLND_RDMA_FRAGS(version));
2188 if (version == IBLND_MSG_VERSION)
2189 rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
2195 if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2196 CERROR("Can't accept %s: message size %d too big (%d max)\n",
2197 libcfs_nid2str(nid),
2198 reqmsg->ibm_u.connparams.ibcp_max_msg_size,
2203 /* assume 'nid' is a new peer; create */
2204 rc = kiblnd_create_peer(ni, &peer, nid);
2206 CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
2207 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2211 write_lock_irqsave(g_lock, flags);
2213 peer2 = kiblnd_find_peer_locked(nid);
2214 if (peer2 != NULL) {
2215 if (peer2->ibp_version == 0) {
2216 peer2->ibp_version = version;
2217 peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
2220 /* not the guy I've talked with */
2221 if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
2222 peer2->ibp_version != version) {
2223 kiblnd_close_peer_conns_locked(peer2, -ESTALE);
2224 write_unlock_irqrestore(g_lock, flags);
2226 CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
2227 libcfs_nid2str(nid), peer2->ibp_version, version);
2229 kiblnd_peer_decref(peer);
2230 rej.ibr_why = IBLND_REJECT_CONN_STALE;
2234 /* tie-break connection race in favour of the higher NID */
2235 if (peer2->ibp_connecting != 0 &&
2237 write_unlock_irqrestore(g_lock, flags);
2239 CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
2241 kiblnd_peer_decref(peer);
2242 rej.ibr_why = IBLND_REJECT_CONN_RACE;
2246 peer2->ibp_accepting++;
2247 kiblnd_peer_addref(peer2);
2249 write_unlock_irqrestore(g_lock, flags);
2250 kiblnd_peer_decref(peer);
2253 /* Brand new peer */
2254 LASSERT (peer->ibp_accepting == 0);
2255 LASSERT (peer->ibp_version == 0 &&
2256 peer->ibp_incarnation == 0);
2258 peer->ibp_accepting = 1;
2259 peer->ibp_version = version;
2260 peer->ibp_incarnation = reqmsg->ibm_srcstamp;
2262 /* I have a ref on ni that prevents it being shutdown */
2263 LASSERT (net->ibn_shutdown == 0);
2265 kiblnd_peer_addref(peer);
2266 list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
2268 write_unlock_irqrestore(g_lock, flags);
2271 conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
2273 kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
2274 kiblnd_peer_decref(peer);
2275 rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
2279 /* conn now "owns" cmid, so I return success from here on to ensure the
2280 * CM callback doesn't destroy cmid. */
2282 conn->ibc_incarnation = reqmsg->ibm_srcstamp;
2283 conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
2284 conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
2285 LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
2286 <= IBLND_RX_MSGS(version));
2288 ackmsg = &conn->ibc_connvars->cv_msg;
2289 memset(ackmsg, 0, sizeof(*ackmsg));
2291 kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
2292 sizeof(ackmsg->ibm_u.connparams));
2293 ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2294 ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2295 ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2297 kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
2299 memset(&cp, 0, sizeof(cp));
2300 cp.private_data = ackmsg;
2301 cp.private_data_len = ackmsg->ibm_nob;
2302 cp.responder_resources = 0; /* No atomic ops or RDMA reads */
2303 cp.initiator_depth = 0;
2304 cp.flow_control = 1;
2305 cp.retry_count = *kiblnd_tunables.kib_retry_count;
2306 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
2308 CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
2310 rc = rdma_accept(cmid, &cp);
2312 CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
2313 rej.ibr_version = version;
2314 rej.ibr_why = IBLND_REJECT_FATAL;
2316 kiblnd_reject(cmid, &rej);
2317 kiblnd_connreq_done(conn, rc);
2318 kiblnd_conn_decref(conn);
2328 rej.ibr_version = version;
2329 rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2330 rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2331 kiblnd_reject(cmid, &rej);
2333 return -ECONNREFUSED;
2337 kiblnd_reconnect (kib_conn_t *conn, int version,
2338 __u64 incarnation, int why, kib_connparams_t *cp)
2340 kib_peer_t *peer = conn->ibc_peer;
2343 unsigned long flags;
2345 LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2346 LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
2348 write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2350 /* retry connection if it's still needed and no other connection
2351 * attempts (active or passive) are in progress
2352 * NB: reconnect is still needed even when ibp_tx_queue is
2353 * empty if ibp_version != version because reconnect may be
2354 * initiated by kiblnd_query() */
2355 if ((!list_empty(&peer->ibp_tx_queue) ||
2356 peer->ibp_version != version) &&
2357 peer->ibp_connecting == 1 &&
2358 peer->ibp_accepting == 0) {
2360 peer->ibp_connecting++;
2362 peer->ibp_version = version;
2363 peer->ibp_incarnation = incarnation;
2366 write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2376 case IBLND_REJECT_CONN_STALE:
2380 case IBLND_REJECT_CONN_RACE:
2381 reason = "conn race";
2384 case IBLND_REJECT_CONN_UNCOMPAT:
2385 reason = "version negotiation";
2389 CNETERR("%s: retrying (%s), %x, %x, "
2390 "queue_dep: %d, max_frag: %d, msg_size: %d\n",
2391 libcfs_nid2str(peer->ibp_nid),
2392 reason, IBLND_MSG_VERSION, version,
2393 cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version),
2394 cp != NULL? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
2395 cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
2397 kiblnd_connect_peer(peer);
2401 kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
2403 kib_peer_t *peer = conn->ibc_peer;
2405 LASSERT (!in_interrupt());
2406 LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
2409 case IB_CM_REJ_STALE_CONN:
2410 kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
2411 IBLND_REJECT_CONN_STALE, NULL);
2414 case IB_CM_REJ_INVALID_SERVICE_ID:
2415 CNETERR("%s rejected: no listener at %d\n",
2416 libcfs_nid2str(peer->ibp_nid),
2417 *kiblnd_tunables.kib_service);
2420 case IB_CM_REJ_CONSUMER_DEFINED:
2421 if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
2422 kib_rej_t *rej = priv;
2423 kib_connparams_t *cp = NULL;
2425 __u64 incarnation = -1;
2427 /* NB. default incarnation is -1 because:
2428 * a) V1 will ignore dst incarnation in connreq.
2429 * b) V2 will provide incarnation while rejecting me,
2430 * -1 will be overwrote.
2432 * if I try to connect to a V1 peer with V2 protocol,
2433 * it rejected me then upgrade to V2, I have no idea
2434 * about the upgrading and try to reconnect with V1,
2435 * in this case upgraded V2 can find out I'm trying to
2436 * talk to the old guy and reject me(incarnation is -1).
2439 if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
2440 rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
2441 __swab32s(&rej->ibr_magic);
2442 __swab16s(&rej->ibr_version);
2446 if (priv_nob >= sizeof(kib_rej_t) &&
2447 rej->ibr_version > IBLND_MSG_VERSION_1) {
2448 /* priv_nob is always 148 in current version
2449 * of OFED, so we still need to check version.
2450 * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
2454 __swab64s(&rej->ibr_incarnation);
2455 __swab16s(&cp->ibcp_queue_depth);
2456 __swab16s(&cp->ibcp_max_frags);
2457 __swab32s(&cp->ibcp_max_msg_size);
2460 incarnation = rej->ibr_incarnation;
2463 if (rej->ibr_magic != IBLND_MSG_MAGIC &&
2464 rej->ibr_magic != LNET_PROTO_MAGIC) {
2465 CERROR("%s rejected: consumer defined fatal error\n",
2466 libcfs_nid2str(peer->ibp_nid));
2470 if (rej->ibr_version != IBLND_MSG_VERSION &&
2471 rej->ibr_version != IBLND_MSG_VERSION_1) {
2472 CERROR("%s rejected: o2iblnd version %x error\n",
2473 libcfs_nid2str(peer->ibp_nid),
2478 if (rej->ibr_why == IBLND_REJECT_FATAL &&
2479 rej->ibr_version == IBLND_MSG_VERSION_1) {
2480 CDEBUG(D_NET, "rejected by old version peer %s: %x\n",
2481 libcfs_nid2str(peer->ibp_nid), rej->ibr_version);
2483 if (conn->ibc_version != IBLND_MSG_VERSION_1)
2484 rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
2487 switch (rej->ibr_why) {
2488 case IBLND_REJECT_CONN_RACE:
2489 case IBLND_REJECT_CONN_STALE:
2490 case IBLND_REJECT_CONN_UNCOMPAT:
2491 kiblnd_reconnect(conn, rej->ibr_version,
2492 incarnation, rej->ibr_why, cp);
2495 case IBLND_REJECT_MSG_QUEUE_SIZE:
2496 CERROR("%s rejected: incompatible message queue depth %d, %d\n",
2497 libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
2498 IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
2501 case IBLND_REJECT_RDMA_FRAGS:
2502 CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
2503 libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
2504 IBLND_RDMA_FRAGS(conn->ibc_version));
2507 case IBLND_REJECT_NO_RESOURCES:
2508 CERROR("%s rejected: o2iblnd no resources\n",
2509 libcfs_nid2str(peer->ibp_nid));
2512 case IBLND_REJECT_FATAL:
2513 CERROR("%s rejected: o2iblnd fatal error\n",
2514 libcfs_nid2str(peer->ibp_nid));
2518 CERROR("%s rejected: o2iblnd reason %d\n",
2519 libcfs_nid2str(peer->ibp_nid),
2527 CNETERR("%s rejected: reason %d, size %d\n",
2528 libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
2532 kiblnd_connreq_done(conn, -ECONNREFUSED);
2536 kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
2538 kib_peer_t *peer = conn->ibc_peer;
2539 lnet_ni_t *ni = peer->ibp_ni;
2540 kib_net_t *net = ni->ni_data;
2541 kib_msg_t *msg = priv;
2542 int ver = conn->ibc_version;
2543 int rc = kiblnd_unpack_msg(msg, priv_nob);
2544 unsigned long flags;
2546 LASSERT (net != NULL);
2549 CERROR("Can't unpack connack from %s: %d\n",
2550 libcfs_nid2str(peer->ibp_nid), rc);
2554 if (msg->ibm_type != IBLND_MSG_CONNACK) {
2555 CERROR("Unexpected message %d from %s\n",
2556 msg->ibm_type, libcfs_nid2str(peer->ibp_nid));
2561 if (ver != msg->ibm_version) {
2562 CERROR("%s replied version %x is different with "
2563 "requested version %x\n",
2564 libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver);
2569 if (msg->ibm_u.connparams.ibcp_queue_depth !=
2570 IBLND_MSG_QUEUE_SIZE(ver)) {
2571 CERROR("%s has incompatible queue depth %d(%d wanted)\n",
2572 libcfs_nid2str(peer->ibp_nid),
2573 msg->ibm_u.connparams.ibcp_queue_depth,
2574 IBLND_MSG_QUEUE_SIZE(ver));
2579 if (msg->ibm_u.connparams.ibcp_max_frags !=
2580 IBLND_RDMA_FRAGS(ver)) {
2581 CERROR("%s has incompatible max_frags %d (%d wanted)\n",
2582 libcfs_nid2str(peer->ibp_nid),
2583 msg->ibm_u.connparams.ibcp_max_frags,
2584 IBLND_RDMA_FRAGS(ver));
2589 if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
2590 CERROR("%s max message size %d too big (%d max)\n",
2591 libcfs_nid2str(peer->ibp_nid),
2592 msg->ibm_u.connparams.ibcp_max_msg_size,
2598 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2599 if (msg->ibm_dstnid == ni->ni_nid &&
2600 msg->ibm_dststamp == net->ibn_incarnation)
2604 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2607 CERROR("Bad connection reply from %s, rc = %d, "
2608 "version: %x max_frags: %d\n",
2609 libcfs_nid2str(peer->ibp_nid), rc,
2610 msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags);
2614 conn->ibc_incarnation = msg->ibm_srcstamp;
2616 conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
2617 LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
2618 <= IBLND_RX_MSGS(ver));
2620 kiblnd_connreq_done(conn, 0);
2624 /* NB My QP has already established itself, so I handle anything going
2625 * wrong here by setting ibc_comms_error.
2626 * kiblnd_connreq_done(0) moves the conn state to ESTABLISHED, but then
2627 * immediately tears it down. */
2630 conn->ibc_comms_error = rc;
2631 kiblnd_connreq_done(conn, 0);
2635 kiblnd_active_connect (struct rdma_cm_id *cmid)
2637 kib_peer_t *peer = (kib_peer_t *)cmid->context;
2640 struct rdma_conn_param cp;
2643 unsigned long flags;
2646 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2648 incarnation = peer->ibp_incarnation;
2649 version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
2651 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2653 conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
2655 kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
2656 kiblnd_peer_decref(peer); /* lose cmid's ref */
2660 /* conn "owns" cmid now, so I return success from here on to ensure the
2661 * CM callback doesn't destroy cmid. conn also takes over cmid's ref
2664 msg = &conn->ibc_connvars->cv_msg;
2666 memset(msg, 0, sizeof(*msg));
2667 kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
2668 msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
2669 msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
2670 msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
2672 kiblnd_pack_msg(peer->ibp_ni, msg, version,
2673 0, peer->ibp_nid, incarnation);
2675 memset(&cp, 0, sizeof(cp));
2676 cp.private_data = msg;
2677 cp.private_data_len = msg->ibm_nob;
2678 cp.responder_resources = 0; /* No atomic ops or RDMA reads */
2679 cp.initiator_depth = 0;
2680 cp.flow_control = 1;
2681 cp.retry_count = *kiblnd_tunables.kib_retry_count;
2682 cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
2684 LASSERT(cmid->context == (void *)conn);
2685 LASSERT(conn->ibc_cmid == cmid);
2687 rc = rdma_connect(cmid, &cp);
2689 CERROR("Can't connect to %s: %d\n",
2690 libcfs_nid2str(peer->ibp_nid), rc);
2691 kiblnd_connreq_done(conn, rc);
2692 kiblnd_conn_decref(conn);
2699 kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
2705 switch (event->event) {
2707 CERROR("Unexpected event: %d, status: %d\n",
2708 event->event, event->status);
2711 case RDMA_CM_EVENT_CONNECT_REQUEST:
2712 /* destroy cmid on failure */
2713 rc = kiblnd_passive_connect(cmid,
2714 (void *)KIBLND_CONN_PARAM(event),
2715 KIBLND_CONN_PARAM_LEN(event));
2716 CDEBUG(D_NET, "connreq: %d\n", rc);
2719 case RDMA_CM_EVENT_ADDR_ERROR:
2720 peer = (kib_peer_t *)cmid->context;
2721 CNETERR("%s: ADDR ERROR %d\n",
2722 libcfs_nid2str(peer->ibp_nid), event->status);
2723 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2724 kiblnd_peer_decref(peer);
2725 return -EHOSTUNREACH; /* rc != 0 destroys cmid */
2727 case RDMA_CM_EVENT_ADDR_RESOLVED:
2728 peer = (kib_peer_t *)cmid->context;
2730 CDEBUG(D_NET,"%s Addr resolved: %d\n",
2731 libcfs_nid2str(peer->ibp_nid), event->status);
2733 if (event->status != 0) {
2734 CNETERR("Can't resolve address for %s: %d\n",
2735 libcfs_nid2str(peer->ibp_nid), event->status);
2738 rc = rdma_resolve_route(
2739 cmid, *kiblnd_tunables.kib_timeout * 1000);
2742 /* Can't initiate route resolution */
2743 CERROR("Can't resolve route for %s: %d\n",
2744 libcfs_nid2str(peer->ibp_nid), rc);
2746 kiblnd_peer_connect_failed(peer, 1, rc);
2747 kiblnd_peer_decref(peer);
2748 return rc; /* rc != 0 destroys cmid */
2750 case RDMA_CM_EVENT_ROUTE_ERROR:
2751 peer = (kib_peer_t *)cmid->context;
2752 CNETERR("%s: ROUTE ERROR %d\n",
2753 libcfs_nid2str(peer->ibp_nid), event->status);
2754 kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
2755 kiblnd_peer_decref(peer);
2756 return -EHOSTUNREACH; /* rc != 0 destroys cmid */
2758 case RDMA_CM_EVENT_ROUTE_RESOLVED:
2759 peer = (kib_peer_t *)cmid->context;
2760 CDEBUG(D_NET,"%s Route resolved: %d\n",
2761 libcfs_nid2str(peer->ibp_nid), event->status);
2763 if (event->status == 0)
2764 return kiblnd_active_connect(cmid);
2766 CNETERR("Can't resolve route for %s: %d\n",
2767 libcfs_nid2str(peer->ibp_nid), event->status);
2768 kiblnd_peer_connect_failed(peer, 1, event->status);
2769 kiblnd_peer_decref(peer);
2770 return event->status; /* rc != 0 destroys cmid */
2772 case RDMA_CM_EVENT_UNREACHABLE:
2773 conn = (kib_conn_t *)cmid->context;
2774 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2775 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2776 CNETERR("%s: UNREACHABLE %d\n",
2777 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2778 kiblnd_connreq_done(conn, -ENETDOWN);
2779 kiblnd_conn_decref(conn);
2782 case RDMA_CM_EVENT_CONNECT_ERROR:
2783 conn = (kib_conn_t *)cmid->context;
2784 LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
2785 conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
2786 CNETERR("%s: CONNECT ERROR %d\n",
2787 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
2788 kiblnd_connreq_done(conn, -ENOTCONN);
2789 kiblnd_conn_decref(conn);
2792 case RDMA_CM_EVENT_REJECTED:
2793 conn = (kib_conn_t *)cmid->context;
2794 switch (conn->ibc_state) {
2798 case IBLND_CONN_PASSIVE_WAIT:
2799 CERROR ("%s: REJECTED %d\n",
2800 libcfs_nid2str(conn->ibc_peer->ibp_nid),
2802 kiblnd_connreq_done(conn, -ECONNRESET);
2805 case IBLND_CONN_ACTIVE_CONNECT:
2806 kiblnd_rejected(conn, event->status,
2807 (void *)KIBLND_CONN_PARAM(event),
2808 KIBLND_CONN_PARAM_LEN(event));
2811 kiblnd_conn_decref(conn);
2814 case RDMA_CM_EVENT_ESTABLISHED:
2815 conn = (kib_conn_t *)cmid->context;
2816 switch (conn->ibc_state) {
2820 case IBLND_CONN_PASSIVE_WAIT:
2821 CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
2822 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2823 kiblnd_connreq_done(conn, 0);
2826 case IBLND_CONN_ACTIVE_CONNECT:
2827 CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
2828 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2829 kiblnd_check_connreply(conn,
2830 (void *)KIBLND_CONN_PARAM(event),
2831 KIBLND_CONN_PARAM_LEN(event));
2834 /* net keeps its ref on conn! */
2837 #ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT
2838 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
2839 CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
2842 case RDMA_CM_EVENT_DISCONNECTED:
2843 conn = (kib_conn_t *)cmid->context;
2844 if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
2845 CERROR("%s DISCONNECTED\n",
2846 libcfs_nid2str(conn->ibc_peer->ibp_nid));
2847 kiblnd_connreq_done(conn, -ECONNRESET);
2849 kiblnd_close_conn(conn, 0);
2851 kiblnd_conn_decref(conn);
2852 cmid->context = NULL;
2855 case RDMA_CM_EVENT_DEVICE_REMOVAL:
2856 LCONSOLE_ERROR_MSG(0x131,
2857 "Received notification of device removal\n"
2858 "Please shutdown LNET to allow this to proceed\n");
2859 /* Can't remove network from underneath LNET for now, so I have
2863 #ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE
2864 case RDMA_CM_EVENT_ADDR_CHANGE:
2865 LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
2872 kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
2875 struct list_head *ttmp;
2878 spin_lock(&conn->ibc_lock);
2880 list_for_each (ttmp, txs) {
2881 tx = list_entry (ttmp, kib_tx_t, tx_list);
2883 if (txs != &conn->ibc_active_txs) {
2884 LASSERT (tx->tx_queued);
2886 LASSERT (!tx->tx_queued);
2887 LASSERT (tx->tx_waiting || tx->tx_sending != 0);
2890 if (time_after_eq (jiffies, tx->tx_deadline)) {
2892 CERROR("Timed out tx: %s, %lu seconds\n",
2893 kiblnd_queue2str(conn, txs),
2894 cfs_duration_sec(jiffies - tx->tx_deadline));
2899 spin_unlock(&conn->ibc_lock);
2904 kiblnd_conn_timed_out (kib_conn_t *conn)
2906 return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
2907 kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
2908 kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
2909 kiblnd_check_txs(conn, &conn->ibc_active_txs);
2913 kiblnd_check_conns (int idx)
2915 struct list_head *peers = &kiblnd_data.kib_peers[idx];
2916 struct list_head *ptmp;
2919 struct list_head *ctmp;
2920 unsigned long flags;
2923 /* NB. We expect to have a look at all the peers and not find any
2924 * rdmas to time out, so we just use a shared lock while we
2926 read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
2928 list_for_each (ptmp, peers) {
2929 peer = list_entry (ptmp, kib_peer_t, ibp_list);
2931 list_for_each (ctmp, &peer->ibp_conns) {
2932 conn = list_entry (ctmp, kib_conn_t, ibc_list);
2934 LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
2936 /* In case we have enough credits to return via a
2937 * NOOP, but there were no non-blocking tx descs
2938 * free to do it last time... */
2939 kiblnd_check_sends(conn);
2941 if (!kiblnd_conn_timed_out(conn))
2944 /* Handle timeout by closing the whole connection. We
2945 * can only be sure RDMA activity has ceased once the
2946 * QP has been modified. */
2948 kiblnd_conn_addref(conn); /* 1 ref for me... */
2950 read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
2953 CERROR("Timed out RDMA with %s (%lu)\n",
2954 libcfs_nid2str(peer->ibp_nid),
2955 cfs_duration_sec(cfs_time_current() -
2956 peer->ibp_last_alive));
2958 kiblnd_close_conn(conn, -ETIMEDOUT);
2959 kiblnd_conn_decref(conn); /* ...until here */
2961 /* start again now I've dropped the lock */
2966 read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
2970 kiblnd_disconnect_conn (kib_conn_t *conn)
2972 LASSERT (!in_interrupt());
2973 LASSERT (current == kiblnd_data.kib_connd);
2974 LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
2976 rdma_disconnect(conn->ibc_cmid);
2977 kiblnd_finalise_conn(conn);
2979 kiblnd_peer_notify(conn->ibc_peer);
2983 kiblnd_connd (void *arg)
2986 unsigned long flags;
2992 unsigned long deadline = jiffies;
2994 cfs_daemonize ("kiblnd_connd");
2995 cfs_block_allsigs ();
2997 init_waitqueue_entry (&wait, current);
2998 kiblnd_data.kib_connd = current;
3000 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3002 while (!kiblnd_data.kib_shutdown) {
3006 if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
3007 conn = list_entry (kiblnd_data.kib_connd_zombies.next,
3008 kib_conn_t, ibc_list);
3009 list_del(&conn->ibc_list);
3011 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3014 kiblnd_destroy_conn(conn);
3016 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3019 if (!list_empty (&kiblnd_data.kib_connd_conns)) {
3020 conn = list_entry (kiblnd_data.kib_connd_conns.next,
3021 kib_conn_t, ibc_list);
3022 list_del(&conn->ibc_list);
3024 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3027 kiblnd_disconnect_conn(conn);
3028 kiblnd_conn_decref(conn);
3030 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3033 /* careful with the jiffy wrap... */
3034 timeout = (int)(deadline - jiffies);
3038 int chunk = kiblnd_data.kib_peer_hash_size;
3040 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
3043 /* Time to check for RDMA timeouts on a few more
3044 * peers: I do checks every 'p' seconds on a
3045 * proportion of the peer table and I need to check
3046 * every connection 'n' times within a timeout
3047 * interval, to ensure I detect a timeout on any
3048 * connection within (n+1)/n times the timeout
3051 if (*kiblnd_tunables.kib_timeout > n * p)
3052 chunk = (chunk * n * p) /
3053 *kiblnd_tunables.kib_timeout;
3057 for (i = 0; i < chunk; i++) {
3058 kiblnd_check_conns(peer_index);
3059 peer_index = (peer_index + 1) %
3060 kiblnd_data.kib_peer_hash_size;
3064 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
3070 /* Nothing to do for 'timeout' */
3071 set_current_state (TASK_INTERRUPTIBLE);
3072 add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3073 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3075 schedule_timeout (timeout);
3077 set_current_state (TASK_RUNNING);
3078 remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
3079 spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
3082 spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
3084 kiblnd_thread_fini();
3089 kiblnd_qp_event(struct ib_event *event, void *arg)
3091 kib_conn_t *conn = arg;
3093 switch (event->event) {
3094 case IB_EVENT_COMM_EST:
3095 CDEBUG(D_NET, "%s established\n",
3096 libcfs_nid2str(conn->ibc_peer->ibp_nid));
3100 CERROR("%s: Async QP event type %d\n",
3101 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3107 kiblnd_complete (struct ib_wc *wc)
3109 switch (kiblnd_wreqid2type(wc->wr_id)) {
3113 case IBLND_WID_RDMA:
3114 /* We only get RDMA completion notification if it fails. All
3115 * subsequent work items, including the final SEND will fail
3116 * too. However we can't print out any more info about the
3117 * failing RDMA because 'tx' might be back on the idle list or
3118 * even reused already if we didn't manage to post all our work
3120 CNETERR("RDMA (tx: %p) failed: %d\n",
3121 kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3125 kiblnd_tx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status);
3129 kiblnd_rx_complete(kiblnd_wreqid2ptr(wc->wr_id), wc->status,
3136 kiblnd_cq_completion (struct ib_cq *cq, void *arg)
3138 /* NB I'm not allowed to schedule this conn once its refcount has
3139 * reached 0. Since fundamentally I'm racing with scheduler threads
3140 * consuming my CQ I could be called after all completions have
3141 * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
3142 * and this CQ is about to be destroyed so I NOOP. */
3143 kib_conn_t *conn = (kib_conn_t *)arg;
3144 unsigned long flags;
3146 LASSERT (cq == conn->ibc_cq);
3148 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3150 conn->ibc_ready = 1;
3152 if (!conn->ibc_scheduled &&
3153 (conn->ibc_nrx > 0 ||
3154 conn->ibc_nsends_posted > 0)) {
3155 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3156 conn->ibc_scheduled = 1;
3157 list_add_tail(&conn->ibc_sched_list,
3158 &kiblnd_data.kib_sched_conns);
3159 wake_up(&kiblnd_data.kib_sched_waitq);
3162 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3166 kiblnd_cq_event(struct ib_event *event, void *arg)
3168 kib_conn_t *conn = arg;
3170 CERROR("%s: async CQ event type %d\n",
3171 libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
3175 kiblnd_scheduler(void *arg)
3177 long id = (long)arg;
3180 unsigned long flags;
3187 snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id);
3188 cfs_daemonize(name);
3189 cfs_block_allsigs();
3191 init_waitqueue_entry(&wait, current);
3193 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3195 while (!kiblnd_data.kib_shutdown) {
3196 if (busy_loops++ >= IBLND_RESCHED) {
3197 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3203 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3208 if (!list_empty(&kiblnd_data.kib_sched_conns)) {
3209 conn = list_entry(kiblnd_data.kib_sched_conns.next,
3210 kib_conn_t, ibc_sched_list);
3211 /* take over kib_sched_conns' ref on conn... */
3212 LASSERT(conn->ibc_scheduled);
3213 list_del(&conn->ibc_sched_list);
3214 conn->ibc_ready = 0;
3216 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3219 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3221 rc = ib_req_notify_cq(conn->ibc_cq,
3224 CWARN("%s: ib_req_notify_cq failed: %d, "
3225 "closing connection\n",
3226 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3227 kiblnd_close_conn(conn, -EIO);
3228 kiblnd_conn_decref(conn);
3229 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3233 rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
3237 CWARN("%s: ib_poll_cq failed: %d, "
3238 "closing connection\n",
3239 libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
3240 kiblnd_close_conn(conn, -EIO);
3241 kiblnd_conn_decref(conn);
3242 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3246 spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3249 if (rc != 0 || conn->ibc_ready) {
3250 /* There may be another completion waiting; get
3251 * another scheduler to check while I handle
3253 kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
3254 list_add_tail(&conn->ibc_sched_list,
3255 &kiblnd_data.kib_sched_conns);
3256 wake_up(&kiblnd_data.kib_sched_waitq);
3258 conn->ibc_scheduled = 0;
3262 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
3265 kiblnd_complete(&wc);
3267 spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
3271 kiblnd_conn_decref(conn); /* ...drop my ref from above */
3278 set_current_state(TASK_INTERRUPTIBLE);
3279 add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
3280 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3285 remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
3286 set_current_state(TASK_RUNNING);
3287 spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
3290 spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
3292 kiblnd_thread_fini();