X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=77a5a285f2b94cb9d96593796d971ea01c050ba8;hp=c228c8399319713adf0d2791cb67f84fe91fb2d6;hb=603aa7a1df6ee6ce6fe0d501a8b2bd1bfdf43bb8;hpb=88850e0c8eb2a0ac9d454d47dcbae70fd636f13d diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index c228c83..77a5a28 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -17,17 +15,15 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -40,73 +36,88 @@ #include "o2iblnd.h" +#define MAX_CONN_RACES_BEFORE_ABORT 20 + +static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni); +static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error); +static void kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, + int type, int body_nob); +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie); +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx); +static void kiblnd_check_sends_locked(kib_conn_t *conn); + void -kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) +kiblnd_tx_done(struct lnet_ni *ni, kib_tx_t *tx) { - lnet_msg_t *lntmsg[2]; - kib_net_t *net = ni->ni_data; - int rc; - int i; + struct lnet_msg *lntmsg[2]; + kib_net_t *net = ni->ni_data; + int rc; + int i; - LASSERT (net != NULL); - LASSERT (!cfs_in_interrupt()); - LASSERT (!tx->tx_queued); /* mustn't be queued for sending */ - LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */ - LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */ - LASSERT (tx->tx_pool != NULL); + LASSERT (net != NULL); + LASSERT (!in_interrupt()); + LASSERT (!tx->tx_queued); /* mustn't be queued for sending */ + LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */ + LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer_ni response */ + LASSERT (tx->tx_pool != NULL); - kiblnd_unmap_tx(ni, tx); + kiblnd_unmap_tx(ni, tx); - /* tx may have up to 2 lnet msgs to finalise */ - lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; - lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; - rc = tx->tx_status; + /* tx may have up to 2 lnet msgs to finalise */ + lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; + lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; + rc = tx->tx_status; - if (tx->tx_conn != NULL) { - LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni); + if (tx->tx_conn != NULL) { + LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni); - kiblnd_conn_decref(tx->tx_conn); - tx->tx_conn = NULL; - } + kiblnd_conn_decref(tx->tx_conn); + tx->tx_conn = NULL; + } - tx->tx_nwrq = 0; - tx->tx_status = 0; + tx->tx_nwrq = 0; + tx->tx_status = 0; - kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); + kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); - /* delay finalize until my descs have been freed */ - for (i = 0; i < 2; i++) { - if (lntmsg[i] == NULL) - continue; + /* delay finalize until my descs have been freed */ + for (i = 0; i < 2; i++) { + if (lntmsg[i] == NULL) + continue; - lnet_finalize(ni, lntmsg[i], rc); - } + lnet_finalize(ni, lntmsg[i], rc); + } } void -kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status) +kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status) { - kib_tx_t *tx; + kib_tx_t *tx; - while (!cfs_list_empty (txlist)) { - tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list); + while (!list_empty(txlist)) { + tx = list_entry(txlist->next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); - /* complete now */ - tx->tx_waiting = 0; - tx->tx_status = status; - kiblnd_tx_done(ni, tx); - } + list_del(&tx->tx_list); + /* complete now */ + tx->tx_waiting = 0; + tx->tx_status = status; + kiblnd_tx_done(ni, tx); + } } -kib_tx_t * -kiblnd_get_idle_tx (lnet_ni_t *ni) +static kib_tx_t * +kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target) { - kib_net_t *net = (kib_net_t *)ni->ni_data; - cfs_list_t *node; - kib_tx_t *tx; + kib_net_t *net = (kib_net_t *)ni->ni_data; + struct list_head *node; + kib_tx_t *tx; + kib_tx_poolset_t *tps; - node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset); + tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)]; + node = kiblnd_pool_alloc_node(&tps->tps_poolset); if (node == NULL) return NULL; tx = container_of(node, kib_tx_t, tx_list); @@ -119,43 +130,41 @@ kiblnd_get_idle_tx (lnet_ni_t *ni) LASSERT (tx->tx_conn == NULL); LASSERT (tx->tx_lntmsg[0] == NULL); LASSERT (tx->tx_lntmsg[1] == NULL); - LASSERT (tx->tx_u.pmr == NULL); LASSERT (tx->tx_nfrags == 0); return tx; } -void -kiblnd_drop_rx (kib_rx_t *rx) +static void +kiblnd_drop_rx(kib_rx_t *rx) { - kib_conn_t *conn = rx->rx_conn; - unsigned long flags; - - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); - LASSERT (conn->ibc_nrx > 0); - conn->ibc_nrx--; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + kib_conn_t *conn = rx->rx_conn; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; + + spin_lock_irqsave(&sched->ibs_lock, flags); + LASSERT(conn->ibc_nrx > 0); + conn->ibc_nrx--; + spin_unlock_irqrestore(&sched->ibs_lock, flags); - kiblnd_conn_decref(conn); + kiblnd_conn_decref(conn); } int kiblnd_post_rx (kib_rx_t *rx, int credit) { - kib_conn_t *conn = rx->rx_conn; - kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; - struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; - int rc; - - LASSERT (net != NULL); - LASSERT (!cfs_in_interrupt()); - LASSERT (credit == IBLND_POSTRX_NO_CREDIT || - credit == IBLND_POSTRX_PEER_CREDIT || - credit == IBLND_POSTRX_RSRVD_CREDIT); - - mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE); - LASSERT (mr != NULL); + kib_conn_t *conn = rx->rx_conn; + kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; + struct ib_recv_wr *bad_wrq = NULL; + struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; + int rc; + + LASSERT (net != NULL); + LASSERT (!in_interrupt()); + LASSERT (credit == IBLND_POSTRX_NO_CREDIT || + credit == IBLND_POSTRX_PEER_CREDIT || + credit == IBLND_POSTRX_RSRVD_CREDIT); + LASSERT(mr != NULL); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; @@ -176,75 +185,81 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) rx->rx_nob = -1; /* flag posted */ - rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); - if (rc != 0) { - CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); - rx->rx_nob = 0; - } - - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ - return rc; - - if (rc != 0) { - kiblnd_close_conn(conn, rc); - kiblnd_drop_rx(rx); /* No more posts for this rx */ - return rc; - } - - if (credit == IBLND_POSTRX_NO_CREDIT) - return 0; - - cfs_spin_lock(&conn->ibc_lock); - if (credit == IBLND_POSTRX_PEER_CREDIT) - conn->ibc_outstanding_credits++; - else - conn->ibc_reserved_credits++; - cfs_spin_unlock(&conn->ibc_lock); - - kiblnd_check_sends(conn); - return 0; + /* NB: need an extra reference after ib_post_recv because we don't + * own this rx (and rx::rx_conn) anymore, LU-5678. + */ + kiblnd_conn_addref(conn); + rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); + if (unlikely(rc != 0)) { + CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); + rx->rx_nob = 0; + } + + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ + goto out; + + if (unlikely(rc != 0)) { + kiblnd_close_conn(conn, rc); + kiblnd_drop_rx(rx); /* No more posts for this rx */ + goto out; + } + + if (credit == IBLND_POSTRX_NO_CREDIT) + goto out; + + spin_lock(&conn->ibc_lock); + if (credit == IBLND_POSTRX_PEER_CREDIT) + conn->ibc_outstanding_credits++; + else + conn->ibc_reserved_credits++; + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); + +out: + kiblnd_conn_decref(conn); + return rc; } -kib_tx_t * +static kib_tx_t * kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) { - cfs_list_t *tmp; + struct list_head *tmp; - cfs_list_for_each(tmp, &conn->ibc_active_txs) { - kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list); + list_for_each(tmp, &conn->ibc_active_txs) { + kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_sending != 0 || tx->tx_waiting); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending != 0 || tx->tx_waiting); - if (tx->tx_cookie != cookie) - continue; + if (tx->tx_cookie != cookie) + continue; - if (tx->tx_waiting && - tx->tx_msg->ibm_type == txtype) - return tx; + if (tx->tx_waiting && + tx->tx_msg->ibm_type == txtype) + return tx; - CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", - tx->tx_waiting ? "" : "NOT ", - tx->tx_msg->ibm_type, txtype); - } - return NULL; + CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", + tx->tx_waiting ? "" : "NOT ", + tx->tx_msg->ibm_type, txtype); + } + return NULL; } -void +static void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { - kib_tx_t *tx; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int idle; + kib_tx_t *tx; + struct lnet_ni *ni = conn->ibc_peer->ibp_ni; + int idle; - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); - if (tx == NULL) { - cfs_spin_unlock(&conn->ibc_lock); + tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); + if (tx == NULL) { + spin_unlock(&conn->ibc_lock); - CWARN("Unmatched completion type %x cookie "LPX64" from %s\n", + CWARN("Unmatched completion type %x cookie %#llx from %s\n", txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); kiblnd_close_conn(conn, -EPROTO); return; @@ -262,19 +277,19 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) idle = !tx->tx_queued && (tx->tx_sending == 0); if (idle) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); - if (idle) - kiblnd_tx_done(ni, tx); + if (idle) + kiblnd_tx_done(ni, tx); } -void -kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie) +static void +kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx = kiblnd_get_idle_tx(ni); + struct lnet_ni *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", @@ -289,12 +304,12 @@ kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie) kiblnd_queue_tx(tx, conn); } -void +static void kiblnd_handle_rx (kib_rx_t *rx) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + struct lnet_ni *ni = conn->ibc_peer->ibp_ni; int credits = msg->ibm_credits; kib_tx_t *tx; int rc = 0; @@ -309,27 +324,32 @@ kiblnd_handle_rx (kib_rx_t *rx) if (credits != 0) { /* Have I received credits that will let me send? */ - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - if (conn->ibc_credits + credits > - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { - rc2 = conn->ibc_credits; - cfs_spin_unlock(&conn->ibc_lock); + if (conn->ibc_credits + credits > + conn->ibc_queue_depth) { + rc2 = conn->ibc_credits; + spin_unlock(&conn->ibc_lock); - CERROR("Bad credits from %s: %d + %d > %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc2, credits, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + CERROR("Bad credits from %s: %d + %d > %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc2, credits, + conn->ibc_queue_depth); - kiblnd_close_conn(conn, -EPROTO); - kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); - return; - } + kiblnd_close_conn(conn, -EPROTO); + kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); + return; + } conn->ibc_credits += credits; - cfs_spin_unlock(&conn->ibc_lock); - kiblnd_check_sends(conn); + /* This ensures the credit taken by NOOP can be returned */ + if (msg->ibm_type == IBLND_MSG_NOOP && + !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */ + conn->ibc_outstanding_credits++; + + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); } switch (msg->ibm_type) { @@ -341,9 +361,14 @@ kiblnd_handle_rx (kib_rx_t *rx) break; case IBLND_MSG_NOOP: - if (IBLND_OOB_CAPABLE(conn->ibc_version)) + if (IBLND_OOB_CAPABLE(conn->ibc_version)) { post_credit = IBLND_POSTRX_NO_CREDIT; - else + break; + } + + if (credits != 0) /* credit already posted */ + post_credit = IBLND_POSTRX_NO_CREDIT; + else /* a keepalive NOOP */ post_credit = IBLND_POSTRX_PEER_CREDIT; break; @@ -375,12 +400,12 @@ kiblnd_handle_rx (kib_rx_t *rx) case IBLND_MSG_PUT_ACK: post_credit = IBLND_POSTRX_RSRVD_CREDIT; - cfs_spin_lock(&conn->ibc_lock); - tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.putack.ibpam_src_cookie); - if (tx != NULL) - cfs_list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); + tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, + msg->ibm_u.putack.ibpam_src_cookie); + if (tx != NULL) + list_del(&tx->tx_list); + spin_unlock(&conn->ibc_lock); if (tx == NULL) { CERROR("Unmatched PUT_ACK from %s\n", @@ -391,7 +416,7 @@ kiblnd_handle_rx (kib_rx_t *rx) LASSERT (tx->tx_waiting); /* CAVEAT EMPTOR: I could be racing with tx_complete, but... - * (a) I can overwrite tx_msg since my peer has received it! + * (a) I can overwrite tx_msg since my peer_ni has received it! * (b) tx_waiting set tells tx_complete() it's not done. */ tx->tx_nwrq = 0; /* overwrite PUT_REQ */ @@ -404,11 +429,11 @@ kiblnd_handle_rx (kib_rx_t *rx) CERROR("Can't setup rdma for PUT to %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); - cfs_spin_lock(&conn->ibc_lock); - tx->tx_waiting = 0; /* clear waiting and queue atomically */ - kiblnd_queue_tx_locked(tx, conn); - cfs_spin_unlock(&conn->ibc_lock); - break; + spin_lock(&conn->ibc_lock); + tx->tx_waiting = 0; /* clear waiting and queue atomically */ + kiblnd_queue_tx_locked(tx, conn); + spin_unlock(&conn->ibc_lock); + break; case IBLND_MSG_PUT_DONE: post_credit = IBLND_POSTRX_PEER_CREDIT; @@ -440,12 +465,12 @@ kiblnd_handle_rx (kib_rx_t *rx) kiblnd_post_rx(rx, post_credit); } -void +static void kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) { kib_msg_t *msg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + struct lnet_ni *ni = conn->ibc_peer->ibp_ni; kib_net_t *net = ni->ni_data; int rc; int err = -EIO; @@ -458,8 +483,8 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) goto ignore; if (status != IB_WC_SUCCESS) { - CDEBUG(D_NETERROR, "Rx from %s failed: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), status); + CNETERR("Rx from %s failed: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), status); goto failed; } @@ -489,17 +514,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) /* racing with connection establishment/teardown! */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - - cfs_write_lock_irqsave(g_lock, flags); - /* must check holding global lock to eliminate race */ - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); - cfs_write_unlock_irqrestore(g_lock, flags); - return; - } - cfs_write_unlock_irqrestore(g_lock, flags); + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + + write_lock_irqsave(g_lock, flags); + /* must check holding global lock to eliminate race */ + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); + write_unlock_irqrestore(g_lock, flags); + return; + } + write_unlock_irqrestore(g_lock, flags); } kiblnd_handle_rx(rx); return; @@ -511,13 +536,12 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) kiblnd_drop_rx(rx); /* Don't re-post rx. */ } -struct page * +static struct page * kiblnd_kvaddr_to_page (unsigned long vaddr) { struct page *page; - if (vaddr >= VMALLOC_START && - vaddr < VMALLOC_END) { + if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page ((void *)vaddr); LASSERT (page != NULL); return page; @@ -536,133 +560,97 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) } static int -kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) +kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob) { - kib_dev_t *ibdev = net->ibn_dev; - __u64 *pages = tx->tx_pages; - int npages; - int size; - int rc; - int i; - - for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { - for (size = 0; size < rd->rd_frags[i].rf_nob; - size += ibdev->ibd_page_size) { - pages[npages ++] = (rd->rd_frags[i].rf_addr & - ibdev->ibd_page_mask) + size; - } - } - - rc = kiblnd_fmr_pool_map(&net->ibn_fmr_ps, pages, npages, 0, &tx->tx_u.fmr); - if (rc != 0) { - CERROR ("Can't map %d pages: %d\n", npages, rc); - return rc; - } - - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : - tx->tx_u.fmr.fmr_pfmr->fmr->lkey; - rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask; - rd->rd_frags[0].rf_nob = nob; - rd->rd_nfrags = 1; - - return 0; -} - -static int -kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) -{ - __u64 iova; - int rc; - - iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask; - - rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, rd, &iova, &tx->tx_u.pmr); - if (rc != 0) { - CERROR("Failed to create MR by phybuf: %d\n", rc); - return rc; - } - - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey : - tx->tx_u.pmr->pmr_mr->lkey; - rd->rd_nfrags = 1; - rd->rd_frags[0].rf_addr = iova; - rd->rd_frags[0].rf_nob = nob; - - return 0; + kib_hca_dev_t *hdev; + kib_fmr_poolset_t *fps; + int cpt; + int rc; + + LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + + hdev = tx->tx_pool->tpo_hdev; + cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; + + fps = net->ibn_fmr_ps[cpt]; + rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr); + if (rc != 0) { + CERROR("Can't map %u pages: %d\n", nob, rc); + return rc; + } + + /* If rd is not tx_rd, it's going to get sent to a peer_ni, who will need + * the rkey */ + rd->rd_key = tx->fmr.fmr_key; + rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; + rd->rd_frags[0].rf_nob = nob; + rd->rd_nfrags = 1; + + return 0; } -void -kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) +static void +kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; - LASSERT (net != NULL); + LASSERT(net != NULL); - if (net->ibn_with_fmr && tx->tx_u.fmr.fmr_pfmr != NULL) { - kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); - tx->tx_u.fmr.fmr_pfmr = NULL; - } else if (net->ibn_with_pmr && tx->tx_u.pmr != NULL) { - kiblnd_pmr_pool_unmap(tx->tx_u.pmr); - tx->tx_u.pmr = NULL; - } + if (net->ibn_fmr_ps != NULL) + kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); if (tx->tx_nfrags != 0) { - kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device, + kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); tx->tx_nfrags = 0; } } -int -kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, - kib_rdma_desc_t *rd, int nfrags) +static int +kiblnd_map_tx(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { - kib_net_t *net = ni->ni_data; - struct ib_mr *mr = NULL; - __u32 nob; - int i; + kib_net_t *net = ni->ni_data; + kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev; + struct ib_mr *mr = NULL; + __u32 nob; + int i; - /* If rd is not tx_rd, it's going to get sent to a peer and I'm the + /* If rd is not tx_rd, it's going to get sent to a peer_ni and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - tx->tx_nfrags = nfrags; + tx->tx_nfrags = nfrags; - rd->rd_nfrags = - kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, + tx->tx_nfrags, tx->tx_dmadir); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( - net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]); + hdev->ibh_ibdev, &tx->tx_frags[i]); rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address( - net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]); + hdev->ibh_ibdev, &tx->tx_frags[i]); nob += rd->rd_frags[i].rf_nob; } - /* looking for pre-mapping MR */ - mr = kiblnd_find_rd_dma_mr(net, rd); - if (mr != NULL) { - /* found pre-mapping MR */ - rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; - return 0; - } + mr = kiblnd_find_rd_dma_mr(ni, rd, + (tx->tx_conn != NULL) ? + tx->tx_conn->ibc_max_frags : -1); + if (mr != NULL) { + /* found pre-mapping MR */ + rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; + return 0; + } - if (net->ibn_with_fmr) - return kiblnd_fmr_map_tx(net, tx, rd, nob); - else if (net->ibn_with_pmr) - return kiblnd_pmr_map_tx(net, tx, rd, nob); + if (net->ibn_fmr_ps != NULL) + return kiblnd_fmr_map_tx(net, tx, rd, nob); - return -EINVAL; + return -EINVAL; } -int -kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - unsigned int niov, struct iovec *iov, int offset, int nob) +static int +kiblnd_setup_rd_iov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, + unsigned int niov, struct kvec *iov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct page *page; @@ -698,7 +686,11 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, fragnob = min(fragnob, (int)PAGE_SIZE - page_offset); sg_set_page(sg, page, fragnob, page_offset); - sg++; + sg = sg_next(sg); + if (!sg) { + CERROR("lacking enough sg entries to map tx\n"); + return -EFAULT; + } if (offset + fragnob < iov->iov_len) { offset += fragnob; @@ -713,31 +705,9 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } -static inline int -get_kiov_length (int nkiov, lnet_kiov_t *kiov, int offset, int nob) -{ - int fragnob; - int count = 0; - - do { - LASSERT (nkiov > 0); - - fragnob = min((int)(kiov->kiov_len - offset), nob); - - count++; - - offset = 0; - kiov++; - nkiov--; - nob -= fragnob; - } while (nob > 0); - - return count; -} - -int -kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - int nkiov, lnet_kiov_t *kiov, int offset, int nob) +static int +kiblnd_setup_rd_kiov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, + int nkiov, lnet_kiov_t *kiov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct scatterlist *sg; @@ -757,16 +727,18 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, } sg = tx->tx_frags; - tx->tx_nfrags = get_kiov_length(nkiov, kiov, offset, nob); - sg_init_table(sg, tx->tx_nfrags); do { LASSERT (nkiov > 0); fragnob = min((int)(kiov->kiov_len - offset), nob); - sg_set_page(sg, kiov->kiov_page, fragnob, - kiov->kiov_offset + offset); - sg++; + sg_set_page(sg, kiov->kiov_page, fragnob, + kiov->kiov_offset + offset); + sg = sg_next(sg); + if (!sg) { + CERROR("lacking enough sg entries to map tx\n"); + return -EFAULT; + } offset = 0; kiov++; @@ -777,70 +749,72 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } -int +static int kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) +__must_hold(&conn->ibc_lock) { - kib_msg_t *msg = tx->tx_msg; - kib_peer_t *peer = conn->ibc_peer; - int ver = conn->ibc_version; - int rc; - int done; - struct ib_send_wr *bad_wrq; - - LASSERT (tx->tx_queued); - /* We rely on this for QP sizing */ - LASSERT (tx->tx_nwrq > 0); - LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); - - LASSERT (credit == 0 || credit == 1); - LASSERT (conn->ibc_outstanding_credits >= 0); - LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); - LASSERT (conn->ibc_credits >= 0); - LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); - - if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { + kib_msg_t *msg = tx->tx_msg; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + struct lnet_ni *ni = peer_ni->ibp_ni; + int ver = conn->ibc_version; + int rc; + int done; + + LASSERT(tx->tx_queued); + /* We rely on this for QP sizing */ + LASSERT(tx->tx_nwrq > 0); + LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags); + + LASSERT(credit == 0 || credit == 1); + LASSERT(conn->ibc_outstanding_credits >= 0); + LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth); + LASSERT(conn->ibc_credits >= 0); + LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); + + if (conn->ibc_nsends_posted == + kiblnd_concurrent_sends(ver, ni)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ CDEBUG(D_NET, "%s: no credits\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } if (credit != 0 && !IBLND_OOB_CAPABLE(ver) && - conn->ibc_credits == 1 && /* last credit reserved for */ - conn->ibc_outstanding_credits == 0) { /* giving back credits */ + conn->ibc_credits == 1 && /* last credit reserved */ + msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); return -EAGAIN; } /* NB don't drop ibc_lock before bumping tx_sending */ - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); tx->tx_queued = 0; if (msg->ibm_type == IBLND_MSG_NOOP && - (!kiblnd_send_noop(conn) || /* redundant NOOP */ + (!kiblnd_need_noop(conn) || /* redundant NOOP */ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { - /* OK to drop when posted enough NOOPs, since - * kiblnd_check_sends will queue NOOP again when - * posted NOOPs complete */ - cfs_spin_unlock(&conn->ibc_lock); - kiblnd_tx_done(peer->ibp_ni, tx); - cfs_spin_lock(&conn->ibc_lock); + /* OK to drop when posted enough NOOPs, since + * kiblnd_check_sends_locked will queue NOOP again when + * posted NOOPs complete */ + spin_unlock(&conn->ibc_lock); + kiblnd_tx_done(peer_ni->ibp_ni, tx); + spin_lock(&conn->ibc_lock); CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), conn->ibc_noops_posted); return 0; } - kiblnd_pack_msg(peer->ibp_ni, msg, ver, conn->ibc_outstanding_credits, - peer->ibp_nid, conn->ibc_incarnation); + kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits, + peer_ni->ibp_nid, conn->ibc_incarnation); conn->ibc_credits -= credit; conn->ibc_outstanding_credits = 0; @@ -855,14 +829,39 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) * tx_sending is non-zero if we've not done the tx_complete() * from the first send; hence the ++ rather than = below. */ tx->tx_sending++; - cfs_list_add(&tx->tx_list, &conn->ibc_active_txs); + list_add(&tx->tx_list, &conn->ibc_active_txs); /* I'm still holding ibc_lock! */ - if (conn->ibc_state != IBLND_CONN_ESTABLISHED) + if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { rc = -ECONNABORTED; - else - rc = ib_post_send(conn->ibc_cmid->qp, - tx->tx_wrq, &bad_wrq); + } else if (tx->tx_pool->tpo_pool.po_failed || + conn->ibc_hdev != tx->tx_pool->tpo_hdev) { + /* close_conn will launch failover */ + rc = -ENETDOWN; + } else { + struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd; + struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + struct ib_send_wr *wr = &tx->tx_wrq[0].wr; + + if (frd != NULL) { + if (!frd->frd_valid) { + wr = &frd->frd_inv_wr.wr; + wr->next = &frd->frd_fastreg_wr.wr; + } else { + wr = &frd->frd_fastreg_wr.wr; + } + frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr; + } + + LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n", + bad->wr_id, bad->opcode, bad->send_flags, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + + bad = NULL; + rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad); + } + conn->ibc_last_send = jiffies; if (rc == 0) @@ -882,32 +881,32 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) done = (tx->tx_sending == 0); if (done) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CERROR("Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); + rc, libcfs_nid2str(peer_ni->ibp_nid)); else CDEBUG(D_NET, "Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer->ibp_nid)); + rc, libcfs_nid2str(peer_ni->ibp_nid)); kiblnd_close_conn(conn, rc); if (done) - kiblnd_tx_done(peer->ibp_ni, tx); + kiblnd_tx_done(peer_ni->ibp_ni, tx); - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - return -EIO; + return -EIO; } -void -kiblnd_check_sends (kib_conn_t *conn) +static void +kiblnd_check_sends_locked(kib_conn_t *conn) { int ver = conn->ibc_version; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + struct lnet_ni *ni = conn->ibc_peer->ibp_ni; kib_tx_t *tx; /* Don't send anything until after the connection is established */ @@ -917,46 +916,48 @@ kiblnd_check_sends (kib_conn_t *conn) return; } - cfs_spin_lock(&conn->ibc_lock); - - LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); + LASSERT(conn->ibc_nsends_posted <= + kiblnd_concurrent_sends(ver, ni)); LASSERT (!IBLND_OOB_CAPABLE(ver) || conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT (conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && - !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) { - tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next, + !list_empty(&conn->ibc_tx_queue_rsrvd)) { + tx = list_entry(conn->ibc_tx_queue_rsrvd.next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); - cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } - if (kiblnd_send_noop(conn)) { - cfs_spin_unlock(&conn->ibc_lock); + if (kiblnd_need_noop(conn)) { + spin_unlock(&conn->ibc_lock); - tx = kiblnd_get_idle_tx(ni); - if (tx != NULL) - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + if (tx != NULL) + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); if (tx != NULL) kiblnd_queue_tx_locked(tx, conn); } - kiblnd_conn_addref(conn); /* 1 ref for me.... (see b21911) */ - for (;;) { int credit; - if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) { + if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; - tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next, + tx = list_entry(conn->ibc_tx_queue_nocred.next, kib_tx_t, tx_list); - } else if (!cfs_list_empty(&conn->ibc_tx_queue)) { + } else if (!list_empty(&conn->ibc_tx_noops)) { + LASSERT (!IBLND_OOB_CAPABLE(ver)); + credit = 1; + tx = list_entry(conn->ibc_tx_noops.next, + kib_tx_t, tx_list); + } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; - tx = cfs_list_entry(conn->ibc_tx_queue.next, + tx = list_entry(conn->ibc_tx_queue.next, kib_tx_t, tx_list); } else break; @@ -964,13 +965,9 @@ kiblnd_check_sends (kib_conn_t *conn) if (kiblnd_post_tx_locked(conn, tx, credit) != 0) break; } - - cfs_spin_unlock(&conn->ibc_lock); - - kiblnd_conn_decref(conn); /* ...until here */ } -void +static void kiblnd_tx_complete (kib_tx_t *tx, int status) { int failed = (status != IB_WC_SUCCESS); @@ -981,18 +978,18 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) if (failed) { if (conn->ibc_state == IBLND_CONN_ESTABLISHED) - CDEBUG(D_NETERROR, "Tx -> %s cookie "LPX64 - " sending %d waiting %d: failed %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - tx->tx_cookie, tx->tx_sending, tx->tx_waiting, - status); + CNETERR("Tx -> %s cookie %#llx" + " sending %d waiting %d: failed %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + tx->tx_cookie, tx->tx_sending, tx->tx_waiting, + status); kiblnd_close_conn(conn, -EIO); } else { kiblnd_peer_alive(conn->ibc_peer); } - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); /* I could be racing with rdma completion. Whoever makes 'tx' idle * gets to free it, which also drops its ref on 'conn'. */ @@ -1003,82 +1000,75 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) conn->ibc_noops_posted--; if (failed) { - tx->tx_waiting = 0; /* don't wait for peer */ + tx->tx_waiting = 0; /* don't wait for peer_ni */ tx->tx_status = -EIO; } idle = (tx->tx_sending == 0) && /* This is the final callback */ - !tx->tx_waiting && /* Not waiting for peer */ + !tx->tx_waiting && /* Not waiting for peer_ni */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); - kiblnd_conn_addref(conn); /* 1 ref for me.... */ - - cfs_spin_unlock(&conn->ibc_lock); + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); if (idle) kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); - - kiblnd_check_sends(conn); - - kiblnd_conn_decref(conn); /* ...until here */ } -void -kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) +static void +kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, int type, int body_nob) { - kib_net_t *net = ni->ni_data; - struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; - struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof (kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; + struct ib_rdma_wr *wrq; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + struct ib_mr *mr = hdev->ibh_mrs; - LASSERT (net != NULL); - LASSERT (tx->tx_nwrq >= 0); - LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); - LASSERT (nob <= IBLND_MSG_SIZE); + LASSERT(tx->tx_nwrq >= 0); + LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); + LASSERT(nob <= IBLND_MSG_SIZE); + LASSERT(mr != NULL); kiblnd_init_msg(tx->tx_msg, type, body_nob); - mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob); - LASSERT (mr != NULL); - sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; sge->length = nob; - memset(wrq, 0, sizeof(*wrq)); + wrq = &tx->tx_wrq[tx->tx_nwrq]; + memset(wrq, 0, sizeof(*wrq)); - wrq->next = NULL; - wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); - wrq->sg_list = sge; - wrq->num_sge = 1; - wrq->opcode = IB_WR_SEND; - wrq->send_flags = IB_SEND_SIGNALED; + wrq->wr.next = NULL; + wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); + wrq->wr.sg_list = sge; + wrq->wr.num_sge = 1; + wrq->wr.opcode = IB_WR_SEND; + wrq->wr.send_flags = IB_SEND_SIGNALED; - tx->tx_nwrq++; + tx->tx_nwrq++; } -int -kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) +static int +kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { - kib_msg_t *ibmsg = tx->tx_msg; - kib_rdma_desc_t *srcrd = tx->tx_rd; - struct ib_sge *sge = &tx->tx_sge[0]; - struct ib_send_wr *wrq = &tx->tx_wrq[0]; - int rc = resid; - int srcidx; - int dstidx; - int wrknob; - - LASSERT (!cfs_in_interrupt()); - LASSERT (tx->tx_nwrq == 0); - LASSERT (type == IBLND_MSG_GET_DONE || - type == IBLND_MSG_PUT_DONE); - - srcidx = dstidx = 0; + kib_msg_t *ibmsg = tx->tx_msg; + kib_rdma_desc_t *srcrd = tx->tx_rd; + struct ib_sge *sge = &tx->tx_sge[0]; + struct ib_rdma_wr *wrq; + int rc = resid; + int srcidx; + int dstidx; + int wrknob; + + LASSERT (!in_interrupt()); + LASSERT (tx->tx_nwrq == 0); + LASSERT (type == IBLND_MSG_GET_DONE || + type == IBLND_MSG_PUT_DONE); + + srcidx = dstidx = 0; while (resid > 0) { if (srcidx >= srcrd->rd_nfrags) { @@ -1093,16 +1083,16 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, break; } - if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { - CERROR("RDMA too fragmented for %s (%d): " - "%d/%d src %d/%d dst frags\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - IBLND_RDMA_FRAGS(conn->ibc_version), - srcidx, srcrd->rd_nfrags, - dstidx, dstrd->rd_nfrags); - rc = -EMSGSIZE; - break; - } + if (tx->tx_nwrq >= conn->ibc_max_frags) { + CERROR("RDMA has too many fragments for peer_ni %s (%d), " + "src idx/frags: %d/%d dst idx/frags: %d/%d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_max_frags, + srcidx, srcrd->rd_nfrags, + dstidx, dstrd->rd_nfrags); + rc = -EMSGSIZE; + break; + } wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx), kiblnd_rd_frag_size(dstrd, dstidx)), resid); @@ -1114,15 +1104,20 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, wrq = &tx->tx_wrq[tx->tx_nwrq]; - wrq->next = wrq + 1; - wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); - wrq->sg_list = sge; - wrq->num_sge = 1; - wrq->opcode = IB_WR_RDMA_WRITE; - wrq->send_flags = 0; - - wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); - wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); + wrq->wr.next = &(wrq + 1)->wr; + wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); + wrq->wr.sg_list = sge; + wrq->wr.num_sge = 1; + wrq->wr.opcode = IB_WR_RDMA_WRITE; + wrq->wr.send_flags = 0; + +#ifdef HAVE_IB_RDMA_WR + wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); + wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx); +#else + wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); + wrq->wr.wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); +#endif srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); @@ -1145,17 +1140,19 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, return rc; } -void -kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) +static void +kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { - cfs_list_t *q; + struct list_head *q; - LASSERT (tx->tx_nwrq > 0); /* work items set up */ - LASSERT (!tx->tx_queued); /* not queued for sending already */ - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(tx->tx_nwrq > 0); /* work items set up */ + LASSERT(!tx->tx_queued); /* not queued for sending already */ + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - tx->tx_queued = 1; - tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ); + tx->tx_queued = 1; + tx->tx_deadline = jiffies + + msecs_to_jiffies(*kiblnd_tunables.kib_timeout * + MSEC_PER_SEC); if (tx->tx_conn == NULL) { kiblnd_conn_addref(conn); @@ -1187,7 +1184,7 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) if (IBLND_OOB_CAPABLE(conn->ibc_version)) q = &conn->ibc_tx_queue_nocred; else - q = &conn->ibc_tx_queue; + q = &conn->ibc_tx_noops; break; case IBLND_MSG_IMMEDIATE: @@ -1195,36 +1192,75 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) break; } - cfs_list_add_tail(&tx->tx_list, q); + list_add_tail(&tx->tx_list, q); } -void +static void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn) { - cfs_spin_lock(&conn->ibc_lock); - kiblnd_queue_tx_locked(tx, conn); - cfs_spin_unlock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); + kiblnd_queue_tx_locked(tx, conn); + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); +} + +static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, + struct sockaddr_in *srcaddr, + struct sockaddr_in *dstaddr, + int timeout_ms) +{ + unsigned short port; + int rc; + + /* allow the port to be reused */ + rc = rdma_set_reuseaddr(cmid, 1); + if (rc != 0) { + CERROR("Unable to set reuse on cmid: %d\n", rc); + return rc; + } - kiblnd_check_sends(conn); + /* look for a free privileged port */ + for (port = PROT_SOCK-1; port > 0; port--) { + srcaddr->sin_port = htons(port); + rc = rdma_resolve_addr(cmid, + (struct sockaddr *)srcaddr, + (struct sockaddr *)dstaddr, + timeout_ms); + if (rc == 0) { + CDEBUG(D_NET, "bound to port %hu\n", port); + return 0; + } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) { + CDEBUG(D_NET, "bind to port %hu failed: %d\n", + port, rc); + } else { + return rc; + } + } + + CERROR("Failed to bind to a free privileged port\n"); + return rc; } -void -kiblnd_connect_peer (kib_peer_t *peer) +static void +kiblnd_connect_peer (kib_peer_ni_t *peer_ni) { struct rdma_cm_id *cmid; kib_dev_t *dev; - kib_net_t *net = peer->ibp_ni->ni_data; + kib_net_t *net = peer_ni->ibp_ni->ni_data; struct sockaddr_in srcaddr; struct sockaddr_in dstaddr; int rc; LASSERT (net != NULL); - LASSERT (peer->ibp_connecting > 0); + LASSERT (peer_ni->ibp_connecting > 0); + LASSERT(!peer_ni->ibp_reconnecting); + + cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP, + IB_QPT_RC); - cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP); if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", - libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); + libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid)); rc = PTR_ERR(cmid); goto failed; } @@ -1237,39 +1273,100 @@ kiblnd_connect_peer (kib_peer_t *peer) memset(&dstaddr, 0, sizeof(dstaddr)); dstaddr.sin_family = AF_INET; dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); - dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer->ibp_nid)); - - kiblnd_peer_addref(peer); /* cmid's ref */ - - rc = rdma_resolve_addr(cmid, - (struct sockaddr *)&srcaddr, - (struct sockaddr *)&dstaddr, - *kiblnd_tunables.kib_timeout * 1000); - if (rc == 0) { - LASSERT (cmid->device != NULL); - CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n", - libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, - HIPQUAD(dev->ibd_ifip), cmid->device->name); - return; + dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid)); + + kiblnd_peer_addref(peer_ni); /* cmid's ref */ + + if (*kiblnd_tunables.kib_use_priv_port) { + rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, + *kiblnd_tunables.kib_timeout * 1000); + } else { + rc = rdma_resolve_addr(cmid, + (struct sockaddr *)&srcaddr, + (struct sockaddr *)&dstaddr, + *kiblnd_tunables.kib_timeout * 1000); } + if (rc != 0) { + /* Can't initiate address resolution: */ + CERROR("Can't resolve addr for %s: %d\n", + libcfs_nid2str(peer_ni->ibp_nid), rc); + goto failed2; + } + + LASSERT (cmid->device != NULL); + CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", + libcfs_nid2str(peer_ni->ibp_nid), dev->ibd_ifname, + &dev->ibd_ifip, cmid->device->name); - /* Can't initiate address resolution: */ - CERROR("Can't resolve addr for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + return; - kiblnd_peer_decref(peer); /* cmid's ref */ - rdma_destroy_id(cmid); + failed2: + kiblnd_peer_connect_failed(peer_ni, 1, rc); + kiblnd_peer_decref(peer_ni); /* cmid's ref */ + rdma_destroy_id(cmid); + return; failed: - kiblnd_peer_connect_failed(peer, 1, rc); + kiblnd_peer_connect_failed(peer_ni, 1, rc); +} + +bool +kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + char *reason = NULL; + struct list_head txs; + unsigned long flags; + + INIT_LIST_HEAD(&txs); + + write_lock_irqsave(glock, flags); + if (peer_ni->ibp_reconnecting == 0) { + if (peer_ni->ibp_accepting) + reason = "accepting"; + else if (peer_ni->ibp_connecting) + reason = "connecting"; + else if (!list_empty(&peer_ni->ibp_conns)) + reason = "connected"; + else /* connected then closed */ + reason = "closed"; + + goto no_reconnect; + } + + LASSERT(!peer_ni->ibp_accepting && !peer_ni->ibp_connecting && + list_empty(&peer_ni->ibp_conns)); + peer_ni->ibp_reconnecting = 0; + + if (!kiblnd_peer_active(peer_ni)) { + list_splice_init(&peer_ni->ibp_tx_queue, &txs); + reason = "unlinked"; + goto no_reconnect; + } + + peer_ni->ibp_connecting++; + peer_ni->ibp_reconnected++; + + write_unlock_irqrestore(glock, flags); + + kiblnd_connect_peer(peer_ni); + return true; + + no_reconnect: + write_unlock_irqrestore(glock, flags); + + CWARN("Abort reconnection of %s: %s\n", + libcfs_nid2str(peer_ni->ibp_nid), reason); + kiblnd_txlist_done(peer_ni->ibp_ni, &txs, -ECONNABORTED); + return false; } void -kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) +kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid) { - kib_peer_t *peer; - kib_peer_t *peer2; + kib_peer_ni_t *peer_ni; + kib_peer_ni_t *peer2; kib_conn_t *conn; - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; int rc; @@ -1279,17 +1376,17 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ - /* First time, just use a read lock since I expect to find my peer + /* First time, just use a read lock since I expect to find my peer_ni * connected */ - cfs_read_lock_irqsave(g_lock, flags); + read_lock_irqsave(g_lock, flags); - peer = kiblnd_find_peer_locked(nid); - if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) { - /* Found a peer with an established connection */ - conn = kiblnd_get_conn_locked(peer); + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) { + /* Found a peer_ni with an established connection */ + conn = kiblnd_get_conn_locked(peer_ni); kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_read_unlock_irqrestore(g_lock, flags); + read_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); @@ -1297,25 +1394,24 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) return; } - cfs_read_unlock(g_lock); - /* Re-try with a write lock */ - cfs_write_lock(g_lock); + read_unlock(g_lock); + /* Re-try with a write lock */ + write_lock(g_lock); - peer = kiblnd_find_peer_locked(nid); - if (peer != NULL) { - if (cfs_list_empty(&peer->ibp_conns)) { - /* found a peer, but it's still connecting... */ - LASSERT (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0); + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL) { + if (list_empty(&peer_ni->ibp_conns)) { + /* found a peer_ni, but it's still connecting... */ + LASSERT(kiblnd_peer_connecting(peer_ni)); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, - &peer->ibp_tx_queue); - cfs_write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer); - kiblnd_conn_addref(conn); /* 1 ref for me... */ + list_add_tail(&tx->tx_list, + &peer_ni->ibp_tx_queue); + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer_ni); + kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); @@ -1324,101 +1420,101 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) return; } - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); - /* Allocate a peer ready to add to the peer table and retry */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { - CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); - if (tx != NULL) { - tx->tx_status = -EHOSTUNREACH; - tx->tx_waiting = 0; - kiblnd_tx_done(ni, tx); - } - return; - } + /* Allocate a peer_ni ready to add to the peer_ni table and retry */ + rc = kiblnd_create_peer(ni, &peer_ni, nid); + if (rc != 0) { + CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid)); + if (tx != NULL) { + tx->tx_status = -EHOSTUNREACH; + tx->tx_waiting = 0; + kiblnd_tx_done(ni, tx); + } + return; + } - cfs_write_lock_irqsave(g_lock, flags); + write_lock_irqsave(g_lock, flags); - peer2 = kiblnd_find_peer_locked(nid); + peer2 = kiblnd_find_peer_locked(ni, nid); if (peer2 != NULL) { - if (cfs_list_empty(&peer2->ibp_conns)) { - /* found a peer, but it's still connecting... */ - LASSERT (peer2->ibp_connecting != 0 || - peer2->ibp_accepting != 0); + if (list_empty(&peer2->ibp_conns)) { + /* found a peer_ni, but it's still connecting... */ + LASSERT(kiblnd_peer_connecting(peer2)); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, + list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue); - cfs_write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer2); - kiblnd_conn_addref(conn); /* 1 ref for me... */ + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer2); + kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); kiblnd_conn_decref(conn); /* ...to here */ } - kiblnd_peer_decref(peer); + kiblnd_peer_decref(peer_ni); return; } - /* Brand new peer */ - LASSERT (peer->ibp_connecting == 0); - peer->ibp_connecting = 1; + /* Brand new peer_ni */ + LASSERT (peer_ni->ibp_connecting == 0); + peer_ni->ibp_connecting = 1; /* always called with a ref on ni, which prevents ni being shutdown */ LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); + list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue); - kiblnd_peer_addref(peer); - cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); - kiblnd_connect_peer(peer); - kiblnd_peer_decref(peer); + kiblnd_connect_peer(peer_ni); + kiblnd_peer_decref(peer_ni); } int -kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) +kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) { - lnet_hdr_t *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; - lnet_process_id_t target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct iovec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - kib_msg_t *ibmsg; - kib_tx_t *tx; - int nob; - int rc; + struct lnet_hdr *hdr = &lntmsg->msg_hdr; + int type = lntmsg->msg_type; + struct lnet_process_id target = lntmsg->msg_target; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + kib_msg_t *ibmsg; + kib_rdma_desc_t *rd; + kib_tx_t *tx; + int nob; + int rc; /* NB 'private' is different depending on what we're sending.... */ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", payload_nob, payload_niov, libcfs_id2str(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT (payload_nob == 0 || payload_niov > 0); + LASSERT (payload_niov <= LNET_MAX_IOV); - /* Thread context */ - LASSERT (!cfs_in_interrupt()); - /* payload is either all vaddrs or all pages */ - LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); + /* Thread context */ + LASSERT (!in_interrupt()); + /* payload is either all vaddrs or all pages */ + LASSERT (!(payload_kiov != NULL && payload_iov != NULL)); - switch (type) { - default: - LBUG(); - return (-EIO); + switch (type) { + default: + LBUG(); + return (-EIO); case LNET_MSG_ACK: LASSERT (payload_nob == 0); @@ -1433,37 +1529,35 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni); - if (tx == NULL) { - CERROR("Can't allocate txd for GET to %s: \n", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } - - ibmsg = tx->tx_msg; - - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) - rc = kiblnd_setup_rd_iov(ni, tx, - &ibmsg->ibm_u.get.ibgm_rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.iov, - 0, lntmsg->msg_md->md_length); - else - rc = kiblnd_setup_rd_kiov(ni, tx, - &ibmsg->ibm_u.get.ibgm_rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.kiov, - 0, lntmsg->msg_md->md_length); - if (rc != 0) { - CERROR("Can't setup GET sink for %s: %d\n", - libcfs_nid2str(target.nid), rc); - kiblnd_tx_done(ni, tx); - return -EIO; - } - - nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]); - ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; - ibmsg->ibm_u.get.ibgm_hdr = *hdr; + tx = kiblnd_get_idle_tx(ni, target.nid); + if (tx == NULL) { + CERROR("Can't allocate txd for GET to %s\n", + libcfs_nid2str(target.nid)); + return -ENOMEM; + } + + ibmsg = tx->tx_msg; + rd = &ibmsg->ibm_u.get.ibgm_rd; + if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) + rc = kiblnd_setup_rd_iov(ni, tx, rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.iov, + 0, lntmsg->msg_md->md_length); + else + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.kiov, + 0, lntmsg->msg_md->md_length); + if (rc != 0) { + CERROR("Can't setup GET sink for %s: %d\n", + libcfs_nid2str(target.nid), rc); + kiblnd_tx_done(ni, tx); + return -EIO; + } + + nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]); + ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; + ibmsg->ibm_u.get.ibgm_hdr = *hdr; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); @@ -1487,7 +1581,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR("Can't allocate %s txd for %s\n", type == LNET_MSG_PUT ? "PUT" : "REPLY", @@ -1526,7 +1620,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR ("Can't send %d to %s: tx descs exhausted\n", type, libcfs_nid2str(target.nid)); @@ -1555,19 +1649,19 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return 0; } -void -kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) +static void +kiblnd_reply(struct lnet_ni *ni, kib_rx_t *rx, struct lnet_msg *lntmsg) { - lnet_process_id_t target = lntmsg->msg_target; + struct lnet_process_id target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; - struct iovec *iov = lntmsg->msg_iov; + struct kvec *iov = lntmsg->msg_iov; lnet_kiov_t *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; kib_tx_t *tx; int rc; - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for REPLY to %s\n", libcfs_nid2str(target.nid)); @@ -1618,27 +1712,26 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) } int -kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, - unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, + int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; kib_tx_t *tx; - kib_msg_t *txmsg; - int nob; - int post_credit = IBLND_POSTRX_PEER_CREDIT; - int rc = 0; + int nob; + int post_credit = IBLND_POSTRX_PEER_CREDIT; + int rc = 0; - LASSERT (mlen <= rlen); - LASSERT (!cfs_in_interrupt()); - /* Either all pages or all vaddrs */ - LASSERT (!(kiov != NULL && iov != NULL)); + LASSERT (mlen <= rlen); + LASSERT (!in_interrupt()); + /* Either all pages or all vaddrs */ + LASSERT (!(kiov != NULL && iov != NULL)); - switch (rxmsg->ibm_type) { - default: - LBUG(); + switch (rxmsg->ibm_type) { + default: + LBUG(); case IBLND_MSG_IMMEDIATE: nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]); @@ -1663,7 +1756,10 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, lnet_finalize (ni, lntmsg, 0); break; - case IBLND_MSG_PUT_REQ: + case IBLND_MSG_PUT_REQ: { + kib_msg_t *txmsg; + kib_rdma_desc_t *rd; + if (mlen == 0) { lnet_finalize(ni, lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, @@ -1671,7 +1767,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, break; } - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't allocate tx for %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); @@ -1680,28 +1776,27 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, break; } - txmsg = tx->tx_msg; - if (kiov == NULL) - rc = kiblnd_setup_rd_iov(ni, tx, - &txmsg->ibm_u.putack.ibpam_rd, - niov, iov, offset, mlen); - else - rc = kiblnd_setup_rd_kiov(ni, tx, - &txmsg->ibm_u.putack.ibpam_rd, - niov, kiov, offset, mlen); - if (rc != 0) { - CERROR("Can't setup PUT sink for %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); - kiblnd_tx_done(ni, tx); - /* tell peer it's over */ - kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, - rxmsg->ibm_u.putreq.ibprm_cookie); - break; - } - - nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]); - txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; - txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; + txmsg = tx->tx_msg; + rd = &txmsg->ibm_u.putack.ibpam_rd; + if (kiov == NULL) + rc = kiblnd_setup_rd_iov(ni, tx, rd, + niov, iov, offset, mlen); + else + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + niov, kiov, offset, mlen); + if (rc != 0) { + CERROR("Can't setup PUT sink for %s: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); + kiblnd_tx_done(ni, tx); + /* tell peer_ni it's over */ + kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, + rxmsg->ibm_u.putreq.ibprm_cookie); + break; + } + + nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]); + txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; + txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); @@ -1712,6 +1807,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, /* reposted buffer reserved for PUT_DONE */ post_credit = IBLND_POSTRX_NO_CREDIT; break; + } case IBLND_MSG_GET_REQ: if (lntmsg != NULL) { @@ -1731,55 +1827,52 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } int -kiblnd_thread_start (int (*fn)(void *arg), void *arg) +kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) { - long pid = cfs_kernel_thread (fn, arg, 0); + struct task_struct *task = kthread_run(fn, arg, name); - if (pid < 0) - return ((int)pid); + if (IS_ERR(task)) + return PTR_ERR(task); - cfs_atomic_inc (&kiblnd_data.kib_nthreads); - return (0); + atomic_inc(&kiblnd_data.kib_nthreads); + return 0; } -void +static void kiblnd_thread_fini (void) { - cfs_atomic_dec (&kiblnd_data.kib_nthreads); + atomic_dec (&kiblnd_data.kib_nthreads); } -void -kiblnd_peer_alive (kib_peer_t *peer) +static void +kiblnd_peer_alive (kib_peer_ni_t *peer_ni) { - /* This is racy, but everyone's only writing cfs_time_current() */ - peer->ibp_last_alive = cfs_time_current(); - cfs_mb(); + /* This is racy, but everyone's only writing cfs_time_current() */ + peer_ni->ibp_last_alive = cfs_time_current(); + smp_mb(); } -void -kiblnd_peer_notify (kib_peer_t *peer) +static void +kiblnd_peer_notify (kib_peer_ni_t *peer_ni) { int error = 0; cfs_time_t last_alive = 0; unsigned long flags; - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (cfs_list_empty(&peer->ibp_conns) && - peer->ibp_accepting == 0 && - peer->ibp_connecting == 0 && - peer->ibp_error != 0) { - error = peer->ibp_error; - peer->ibp_error = 0; + if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) { + error = peer_ni->ibp_error; + peer_ni->ibp_error = 0; - last_alive = peer->ibp_last_alive; + last_alive = peer_ni->ibp_last_alive; } - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (error != 0) - lnet_notify(peer->ibp_ni, - peer->ibp_nid, 0, last_alive); + lnet_notify(peer_ni->ibp_ni, + peer_ni->ibp_nid, 0, last_alive); } void @@ -1791,8 +1884,9 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) * connection to be finished off by the connd. Otherwise the connd is * already dealing with it (either to set it up or tear it down). * Caller holds kib_global_lock exclusively in irq context */ + kib_peer_ni_t *peer_ni = conn->ibc_peer; + kib_dev_t *dev; unsigned long flags; - kib_peer_t *peer = conn->ibc_peer; LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED); @@ -1803,290 +1897,307 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) return; /* already being handled */ if (error == 0 && - cfs_list_empty(&conn->ibc_tx_queue) && - cfs_list_empty(&conn->ibc_tx_queue_rsrvd) && - cfs_list_empty(&conn->ibc_tx_queue_nocred) && - cfs_list_empty(&conn->ibc_active_txs)) { + list_empty(&conn->ibc_tx_noops) && + list_empty(&conn->ibc_tx_queue) && + list_empty(&conn->ibc_tx_queue_rsrvd) && + list_empty(&conn->ibc_tx_queue_nocred) && + list_empty(&conn->ibc_active_txs)) { CDEBUG(D_NET, "closing conn to %s\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); } else { - CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n", - libcfs_nid2str(peer->ibp_nid), error, - cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); - } - - cfs_list_del(&conn->ibc_list); + CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", + libcfs_nid2str(peer_ni->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? + "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? + "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + } + + dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev; + list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ - if (cfs_list_empty (&peer->ibp_conns) && /* no more conns */ - kiblnd_peer_active(peer)) { /* still in peer table */ - kiblnd_unlink_peer_locked(peer); + if (list_empty(&peer_ni->ibp_conns) && /* no more conns */ + kiblnd_peer_active(peer_ni)) { /* still in peer_ni table */ + kiblnd_unlink_peer_locked(peer_ni); /* set/clear error on last conn */ - peer->ibp_error = conn->ibc_comms_error; + peer_ni->ibp_error = conn->ibc_comms_error; } kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + if (error != 0 && + kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + wake_up(&kiblnd_data.kib_failover_waitq); + } + + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); - cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns); - cfs_waitq_signal (&kiblnd_data.kib_connd_waitq); + list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); + wake_up(&kiblnd_data.kib_connd_waitq); - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); } void -kiblnd_close_conn (kib_conn_t *conn, int error) +kiblnd_close_conn(kib_conn_t *conn, int error) { - unsigned long flags; + unsigned long flags; - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - kiblnd_close_conn_locked(conn, error); + kiblnd_close_conn_locked(conn, error); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } -void +static void kiblnd_handle_early_rxs(kib_conn_t *conn) { - unsigned long flags; - kib_rx_t *rx; + unsigned long flags; + kib_rx_t *rx; - LASSERT (!cfs_in_interrupt()); - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(!in_interrupt()); + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!cfs_list_empty(&conn->ibc_early_rxs)) { - rx = cfs_list_entry(conn->ibc_early_rxs.next, - kib_rx_t, rx_list); - cfs_list_del(&rx->rx_list); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + while (!list_empty(&conn->ibc_early_rxs)) { + rx = list_entry(conn->ibc_early_rxs.next, + kib_rx_t, rx_list); + list_del(&rx->rx_list); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_handle_rx(rx); + kiblnd_handle_rx(rx); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + } + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } -void -kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs) +static void +kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { - CFS_LIST_HEAD (zombies); - cfs_list_t *tmp; - cfs_list_t *nxt; - kib_tx_t *tx; + struct list_head zombies = LIST_HEAD_INIT(zombies); + struct list_head *tmp; + struct list_head *nxt; + kib_tx_t *tx; - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - cfs_list_for_each_safe (tmp, nxt, txs) { - tx = cfs_list_entry (tmp, kib_tx_t, tx_list); + list_for_each_safe(tmp, nxt, txs) { + tx = list_entry(tmp, kib_tx_t, tx_list); - if (txs == &conn->ibc_active_txs) { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || - tx->tx_sending != 0); - } else { - LASSERT (tx->tx_queued); - } + if (txs == &conn->ibc_active_txs) { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || + tx->tx_sending != 0); + } else { + LASSERT(tx->tx_queued); + } - tx->tx_status = -ECONNABORTED; - tx->tx_waiting = 0; + tx->tx_status = -ECONNABORTED; + tx->tx_waiting = 0; - if (tx->tx_sending == 0) { - tx->tx_queued = 0; - cfs_list_del (&tx->tx_list); - cfs_list_add (&tx->tx_list, &zombies); - } - } + if (tx->tx_sending == 0) { + tx->tx_queued = 0; + list_del(&tx->tx_list); + list_add(&tx->tx_list, &zombies); + } + } - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); - kiblnd_txlist_done(conn->ibc_peer->ibp_ni, - &zombies, -ECONNABORTED); + kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); } -void +static void kiblnd_finalise_conn (kib_conn_t *conn) { - LASSERT (!cfs_in_interrupt()); - LASSERT (conn->ibc_state > IBLND_CONN_INIT); + LASSERT (!in_interrupt()); + LASSERT (conn->ibc_state > IBLND_CONN_INIT); - kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); + kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED); - /* abort_receives moves QP state to IB_QPS_ERR. This is only required - * for connections that didn't get as far as being connected, because - * rdma_disconnect() does this for free. */ - kiblnd_abort_receives(conn); + /* abort_receives moves QP state to IB_QPS_ERR. This is only required + * for connections that didn't get as far as being connected, because + * rdma_disconnect() does this for free. */ + kiblnd_abort_receives(conn); - /* Complete all tx descs not waiting for sends to complete. - * NB we should be safe from RDMA now that the QP has changed state */ + /* Complete all tx descs not waiting for sends to complete. + * NB we should be safe from RDMA now that the QP has changed state */ - kiblnd_abort_txs(conn, &conn->ibc_tx_queue); - kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); - kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred); - kiblnd_abort_txs(conn, &conn->ibc_active_txs); + kiblnd_abort_txs(conn, &conn->ibc_tx_noops); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); + kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred); + kiblnd_abort_txs(conn, &conn->ibc_active_txs); - kiblnd_handle_early_rxs(conn); + kiblnd_handle_early_rxs(conn); } -void -kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) +static void +kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error) { - CFS_LIST_HEAD (zombies); - unsigned long flags; + struct list_head zombies = LIST_HEAD_INIT(zombies); + unsigned long flags; - LASSERT (error != 0); - LASSERT (!cfs_in_interrupt()); + LASSERT (error != 0); + LASSERT (!in_interrupt()); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (active) { - LASSERT (peer->ibp_connecting > 0); - peer->ibp_connecting--; - } else { - LASSERT (peer->ibp_accepting > 0); - peer->ibp_accepting--; - } + if (active) { + LASSERT(peer_ni->ibp_connecting > 0); + peer_ni->ibp_connecting--; + } else { + LASSERT (peer_ni->ibp_accepting > 0); + peer_ni->ibp_accepting--; + } - if (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0) { - /* another connection attempt under way... */ - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return; - } + if (kiblnd_peer_connecting(peer_ni)) { + /* another connection attempt under way... */ + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, + flags); + return; + } - if (cfs_list_empty(&peer->ibp_conns)) { - /* Take peer's blocked transmits to complete with error */ - cfs_list_add(&zombies, &peer->ibp_tx_queue); - cfs_list_del_init(&peer->ibp_tx_queue); + peer_ni->ibp_reconnected = 0; + if (list_empty(&peer_ni->ibp_conns)) { + /* Take peer_ni's blocked transmits to complete with error */ + list_add(&zombies, &peer_ni->ibp_tx_queue); + list_del_init(&peer_ni->ibp_tx_queue); - if (kiblnd_peer_active(peer)) - kiblnd_unlink_peer_locked(peer); + if (kiblnd_peer_active(peer_ni)) + kiblnd_unlink_peer_locked(peer_ni); - peer->ibp_error = error; - } else { - /* Can't have blocked transmits if there are connections */ - LASSERT (cfs_list_empty(&peer->ibp_tx_queue)); - } + peer_ni->ibp_error = error; + } else { + /* Can't have blocked transmits if there are connections */ + LASSERT(list_empty(&peer_ni->ibp_tx_queue)); + } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_peer_notify(peer); + kiblnd_peer_notify(peer_ni); - if (cfs_list_empty (&zombies)) - return; + if (list_empty(&zombies)) + return; - CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n", - libcfs_nid2str(peer->ibp_nid)); + CNETERR("Deleting messages for %s: connection failed\n", + libcfs_nid2str(peer_ni->ibp_nid)); - kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); + kiblnd_txlist_done(peer_ni->ibp_ni, &zombies, -EHOSTUNREACH); } -void +static void kiblnd_connreq_done(kib_conn_t *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; - kib_tx_t *tx; - cfs_list_t txs; - unsigned long flags; - int active; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + kib_tx_t *tx; + struct list_head txs; + unsigned long flags; + int active; active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n", - libcfs_nid2str(peer->ibp_nid), active, - conn->ibc_version, status); + CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n", + libcfs_nid2str(peer_ni->ibp_nid), active, + conn->ibc_version, status); - LASSERT (!cfs_in_interrupt()); - LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && - peer->ibp_connecting > 0) || - (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && - peer->ibp_accepting > 0)); + LASSERT (!in_interrupt()); + LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT && + peer_ni->ibp_connecting > 0) || + (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT && + peer_ni->ibp_accepting > 0)); LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars)); conn->ibc_connvars = NULL; if (status != 0) { /* failed to establish connection */ - kiblnd_peer_connect_failed(peer, active, status); + kiblnd_peer_connect_failed(peer_ni, active, status); kiblnd_finalise_conn(conn); return; } /* connection established */ - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); conn->ibc_last_send = jiffies; kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); - kiblnd_peer_alive(peer); - - /* Add conn to peer's list and nuke any dangling conns from a different - * peer instance... */ - kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ - cfs_list_add(&conn->ibc_list, &peer->ibp_conns); - if (active) - peer->ibp_connecting--; - else - peer->ibp_accepting--; - - if (peer->ibp_version == 0) { - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; - } - - if (peer->ibp_version != conn->ibc_version || - peer->ibp_incarnation != conn->ibc_incarnation) { - kiblnd_close_stale_conns_locked(peer, conn->ibc_version, + kiblnd_peer_alive(peer_ni); + + /* Add conn to peer_ni's list and nuke any dangling conns from a different + * peer_ni instance... */ + kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ + list_add(&conn->ibc_list, &peer_ni->ibp_conns); + peer_ni->ibp_reconnected = 0; + if (active) + peer_ni->ibp_connecting--; + else + peer_ni->ibp_accepting--; + + if (peer_ni->ibp_version == 0) { + peer_ni->ibp_version = conn->ibc_version; + peer_ni->ibp_incarnation = conn->ibc_incarnation; + } + + if (peer_ni->ibp_version != conn->ibc_version || + peer_ni->ibp_incarnation != conn->ibc_incarnation) { + kiblnd_close_stale_conns_locked(peer_ni, conn->ibc_version, conn->ibc_incarnation); - peer->ibp_version = conn->ibc_version; - peer->ibp_incarnation = conn->ibc_incarnation; + peer_ni->ibp_version = conn->ibc_version; + peer_ni->ibp_incarnation = conn->ibc_incarnation; } - /* grab pending txs while I have the lock */ - cfs_list_add(&txs, &peer->ibp_tx_queue); - cfs_list_del_init(&peer->ibp_tx_queue); + /* grab pending txs while I have the lock */ + list_add(&txs, &peer_ni->ibp_tx_queue); + list_del_init(&peer_ni->ibp_tx_queue); - if (!kiblnd_peer_active(peer) || /* peer has been deleted */ + if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */ conn->ibc_comms_error != 0) { /* error has happened already */ - lnet_ni_t *ni = peer->ibp_ni; + struct lnet_ni *ni = peer_ni->ibp_ni; /* start to shut down connection */ kiblnd_close_conn_locked(conn, -ECONNABORTED); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - - kiblnd_txlist_done(ni, &txs, -ECONNABORTED); - - return; - } - - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - /* Schedule blocked txs */ - cfs_spin_lock (&conn->ibc_lock); - while (!cfs_list_empty (&txs)) { - tx = cfs_list_entry (txs.next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); - - kiblnd_queue_tx_locked(tx, conn); - } - cfs_spin_unlock (&conn->ibc_lock); - - kiblnd_check_sends(conn); - - /* schedule blocked rxs */ - kiblnd_handle_early_rxs(conn); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + kiblnd_txlist_done(ni, &txs, -ECONNABORTED); + + return; + } + + /* +1 ref for myself, this connection is visible to other threads + * now, refcount of peer:ibp_conns can be released by connection + * close from either a different thread, or the calling of + * kiblnd_check_sends_locked() below. See bz21911 for details. + */ + kiblnd_conn_addref(conn); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* Schedule blocked txs */ + spin_lock(&conn->ibc_lock); + while (!list_empty(&txs)) { + tx = list_entry(txs.next, kib_tx_t, tx_list); + list_del(&tx->tx_list); + + kiblnd_queue_tx_locked(tx, conn); + } + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); + + /* schedule blocked rxs */ + kiblnd_handle_early_rxs(conn); + kiblnd_conn_decref(conn); } -void +static void kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) { int rc; @@ -2097,129 +2208,153 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) CWARN("Error %d sending reject\n", rc); } -int -kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) +static int +kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; kib_msg_t *reqmsg = priv; kib_msg_t *ackmsg; kib_dev_t *ibdev; - kib_peer_t *peer; - kib_peer_t *peer2; + kib_peer_ni_t *peer_ni; + kib_peer_ni_t *peer2; kib_conn_t *conn; - lnet_ni_t *ni = NULL; + struct lnet_ni *ni = NULL; kib_net_t *net = NULL; lnet_nid_t nid; struct rdma_conn_param cp; kib_rej_t rej; - int version = IBLND_MSG_VERSION; - unsigned long flags; - int rc; + int version = IBLND_MSG_VERSION; + unsigned long flags; + int rc; + struct sockaddr_in *peer_addr; + LASSERT (!in_interrupt()); - LASSERT (!cfs_in_interrupt()); - - /* cmid inherits 'context' from the corresponding listener id */ - ibdev = (kib_dev_t *)cmid->context; - LASSERT (ibdev != NULL); + /* cmid inherits 'context' from the corresponding listener id */ + ibdev = (kib_dev_t *)cmid->context; + LASSERT (ibdev != NULL); memset(&rej, 0, sizeof(rej)); rej.ibr_magic = IBLND_MSG_MAGIC; rej.ibr_why = IBLND_REJECT_FATAL; rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; - if (priv_nob < offsetof(kib_msg_t, ibm_type)) { - CERROR("Short connection request\n"); - goto failed; - } - - /* Future protocol version compatibility support! If the - * o2iblnd-specific protocol changes, or when LNET unifies - * protocols over all LNDs, the initial connection will - * negotiate a protocol version. I trap this here to avoid - * console errors; the reject tells the peer which protocol I - * speak. */ - if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || - reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) - goto failed; - if (reqmsg->ibm_magic == IBLND_MSG_MAGIC && - reqmsg->ibm_version != IBLND_MSG_VERSION && - reqmsg->ibm_version != IBLND_MSG_VERSION_1) - goto failed; - if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) && - reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) && - reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1)) - goto failed; - - rc = kiblnd_unpack_msg(reqmsg, priv_nob); - if (rc != 0) { - CERROR("Can't parse connection request: %d\n", rc); - goto failed; - } - - nid = reqmsg->ibm_srcnid; - ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid)); - - if (ni != NULL) { - net = (kib_net_t *)ni->ni_data; - rej.ibr_incarnation = net->ibn_incarnation; - } - - if (ni == NULL || /* no matching net */ - ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ - net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): " - "bad dst nid %s\n", libcfs_nid2str(nid), - ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), - ibdev->ibd_ifname, ibdev->ibd_nnets, - HIPQUAD(ibdev->ibd_ifip), - libcfs_nid2str(reqmsg->ibm_dstnid)); - - goto failed; - } + peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); + if (*kiblnd_tunables.kib_require_priv_port && + ntohs(peer_addr->sin_port) >= PROT_SOCK) { + __u32 ip = ntohl(peer_addr->sin_addr.s_addr); + CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n", + &ip, ntohs(peer_addr->sin_port)); + goto failed; + } + + if (priv_nob < offsetof(kib_msg_t, ibm_type)) { + CERROR("Short connection request\n"); + goto failed; + } + + /* Future protocol version compatibility support! If the + * o2iblnd-specific protocol changes, or when LNET unifies + * protocols over all LNDs, the initial connection will + * negotiate a protocol version. I trap this here to avoid + * console errors; the reject tells the peer_ni which protocol I + * speak. */ + if (reqmsg->ibm_magic == LNET_PROTO_MAGIC || + reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC)) + goto failed; + if (reqmsg->ibm_magic == IBLND_MSG_MAGIC && + reqmsg->ibm_version != IBLND_MSG_VERSION && + reqmsg->ibm_version != IBLND_MSG_VERSION_1) + goto failed; + if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) && + reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) && + reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1)) + goto failed; + + rc = kiblnd_unpack_msg(reqmsg, priv_nob); + if (rc != 0) { + CERROR("Can't parse connection request: %d\n", rc); + goto failed; + } + + nid = reqmsg->ibm_srcnid; + ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid); + + if (ni != NULL) { + net = (kib_net_t *)ni->ni_data; + rej.ibr_incarnation = net->ibn_incarnation; + } + + if (ni == NULL || /* no matching net */ + ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ + net->ibn_dev != ibdev) { /* wrong device */ + CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): " + "bad dst nid %s\n", libcfs_nid2str(nid), + ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), + ibdev->ibd_ifname, ibdev->ibd_nnets, + &ibdev->ibd_ifip, + libcfs_nid2str(reqmsg->ibm_dstnid)); + + goto failed; + } /* check time stamp as soon as possible */ - if (reqmsg->ibm_dststamp != 0 && - reqmsg->ibm_dststamp != net->ibn_incarnation) { - CWARN("Stale connection request\n"); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } - - /* I can accept peer's version */ - version = reqmsg->ibm_version; - - if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { - CERROR("Unexpected connreq msg type: %x from %s\n", - reqmsg->ibm_type, libcfs_nid2str(nid)); - goto failed; - } - - if (reqmsg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(version)) { - CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", - libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(version)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; - - goto failed; - } - - if (reqmsg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(version)) { - CERROR("Can't accept %s(version %x): " - "incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(nid), version, - reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; - - goto failed; - - } + if (reqmsg->ibm_dststamp != 0 && + reqmsg->ibm_dststamp != net->ibn_incarnation) { + CWARN("Stale connection request\n"); + rej.ibr_why = IBLND_REJECT_CONN_STALE; + goto failed; + } + + /* I can accept peer_ni's version */ + version = reqmsg->ibm_version; + + if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { + CERROR("Unexpected connreq msg type: %x from %s\n", + reqmsg->ibm_type, libcfs_nid2str(nid)); + goto failed; + } + + if (reqmsg->ibm_u.connparams.ibcp_queue_depth > + kiblnd_msg_queue_size(version, ni)) { + CERROR("Can't accept conn from %s, queue depth too large: " + " %d (<=%d wanted)\n", + libcfs_nid2str(nid), + reqmsg->ibm_u.connparams.ibcp_queue_depth, + kiblnd_msg_queue_size(version, ni)); + + if (version == IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; + + goto failed; + } + + if (reqmsg->ibm_u.connparams.ibcp_max_frags > + kiblnd_rdma_frags(version, ni)) { + CWARN("Can't accept conn from %s (version %x): " + "max_frags %d too large (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + kiblnd_rdma_frags(version, ni)); + + if (version >= IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + + goto failed; + } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < + kiblnd_rdma_frags(version, ni) && + net->ibn_fmr_ps == NULL) { + CWARN("Can't accept conn from %s (version %x): " + "max_frags %d incompatible without FMR pool " + "(%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + kiblnd_rdma_frags(version, ni)); + + if (version == IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + + goto failed; + } if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("Can't accept %s: message size %d too big (%d max)\n", @@ -2229,17 +2364,21 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* assume 'nid' is a new peer; create */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { - CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); - rej.ibr_why = IBLND_REJECT_NO_RESOURCES; - goto failed; - } + /* assume 'nid' is a new peer_ni; create */ + rc = kiblnd_create_peer(ni, &peer_ni, nid); + if (rc != 0) { + CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid)); + rej.ibr_why = IBLND_REJECT_NO_RESOURCES; + goto failed; + } - cfs_write_lock_irqsave(g_lock, flags); + /* We have validated the peer's parameters so use those */ + peer_ni->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer_ni->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; - peer2 = kiblnd_find_peer_locked(nid); + write_lock_irqsave(g_lock, flags); + + peer2 = kiblnd_find_peer_locked(ni, nid); if (peer2 != NULL) { if (peer2->ibp_version == 0) { peer2->ibp_version = version; @@ -2249,79 +2388,108 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) /* not the guy I've talked with */ if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { - kiblnd_close_peer_conns_locked(peer2, -ESTALE); - cfs_write_unlock_irqrestore(g_lock, flags); - - CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", - libcfs_nid2str(nid), peer2->ibp_version, version); - - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } + kiblnd_close_peer_conns_locked(peer2, -ESTALE); - /* tie-break connection race in favour of the higher NID */ - if (peer2->ibp_connecting != 0 && - nid < ni->ni_nid) { - cfs_write_unlock_irqrestore(g_lock, flags); + if (kiblnd_peer_active(peer2)) { + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + peer2->ibp_version = version; + } + write_unlock_irqrestore(g_lock, flags); - CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid)); + CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n", + libcfs_nid2str(nid), peer2->ibp_version, version, + peer2->ibp_incarnation, reqmsg->ibm_srcstamp); - kiblnd_peer_decref(peer); - rej.ibr_why = IBLND_REJECT_CONN_RACE; + kiblnd_peer_decref(peer_ni); + rej.ibr_why = IBLND_REJECT_CONN_STALE; goto failed; } - peer2->ibp_accepting++; - kiblnd_peer_addref(peer2); - - cfs_write_unlock_irqrestore(g_lock, flags); - kiblnd_peer_decref(peer); - peer = peer2; + /* Tie-break connection race in favour of the higher NID. + * If we keep running into a race condition multiple times, + * we have to assume that the connection attempt with the + * higher NID is stuck in a connecting state and will never + * recover. As such, we pass through this if-block and let + * the lower NID connection win so we can move forward. + */ + if (peer2->ibp_connecting != 0 && + nid < ni->ni_nid && peer2->ibp_races < + MAX_CONN_RACES_BEFORE_ABORT) { + peer2->ibp_races++; + write_unlock_irqrestore(g_lock, flags); + + CDEBUG(D_NET, "Conn race %s\n", + libcfs_nid2str(peer2->ibp_nid)); + + kiblnd_peer_decref(peer_ni); + rej.ibr_why = IBLND_REJECT_CONN_RACE; + goto failed; + } + if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT) + CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n", + libcfs_nid2str(peer2->ibp_nid), + MAX_CONN_RACES_BEFORE_ABORT); + /* + * passive connection is allowed even this peer_ni is waiting for + * reconnection. + */ + peer2->ibp_reconnecting = 0; + peer2->ibp_races = 0; + peer2->ibp_accepting++; + kiblnd_peer_addref(peer2); + + /* Race with kiblnd_launch_tx (active connect) to create peer_ni + * so copy validated parameters since we now know what the + * peer_ni's limits are */ + peer2->ibp_max_frags = peer_ni->ibp_max_frags; + peer2->ibp_queue_depth = peer_ni->ibp_queue_depth; + + write_unlock_irqrestore(g_lock, flags); + kiblnd_peer_decref(peer_ni); + peer_ni = peer2; } else { - /* Brand new peer */ - LASSERT (peer->ibp_accepting == 0); - LASSERT (peer->ibp_version == 0 && - peer->ibp_incarnation == 0); + /* Brand new peer_ni */ + LASSERT (peer_ni->ibp_accepting == 0); + LASSERT (peer_ni->ibp_version == 0 && + peer_ni->ibp_incarnation == 0); - peer->ibp_accepting = 1; - peer->ibp_version = version; - peer->ibp_incarnation = reqmsg->ibm_srcstamp; + peer_ni->ibp_accepting = 1; + peer_ni->ibp_version = version; + peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp; /* I have a ref on ni that prevents it being shutdown */ LASSERT (net->ibn_shutdown == 0); - kiblnd_peer_addref(peer); - cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); + conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version); if (conn == NULL) { - kiblnd_peer_connect_failed(peer, 0, -ENOMEM); - kiblnd_peer_decref(peer); + kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM); + kiblnd_peer_decref(peer_ni); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } /* conn now "owns" cmid, so I return success from here on to ensure the * CM callback doesn't destroy cmid. */ - - conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) - <= IBLND_RX_MSGS(version)); + conn->ibc_incarnation = reqmsg->ibm_srcstamp; + conn->ibc_credits = conn->ibc_queue_depth; + conn->ibc_reserved_credits = conn->ibc_queue_depth; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); ackmsg = &conn->ibc_connvars->cv_msg; memset(ackmsg, 0, sizeof(*ackmsg)); kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; + ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); @@ -2351,57 +2519,97 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) return 0; failed: - if (ni != NULL) - lnet_ni_decref(ni); + if (ni != NULL) { + rej.ibr_cp.ibcp_queue_depth = + kiblnd_msg_queue_size(version, ni); + rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); + lnet_ni_decref(ni); + } - rej.ibr_version = version; - rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); - kiblnd_reject(cmid, &rej); + rej.ibr_version = version; + kiblnd_reject(cmid, &rej); - return -ECONNREFUSED; + return -ECONNREFUSED; } -void -kiblnd_reconnect (kib_conn_t *conn, int version, - __u64 incarnation, int why, kib_connparams_t *cp) +static void +kiblnd_check_reconnect(kib_conn_t *conn, int version, + __u64 incarnation, int why, kib_connparams_t *cp) { - kib_peer_t *peer = conn->ibc_peer; - char *reason; - int retry = 0; - unsigned long flags; - - LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */ - - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + char *reason; + int msg_size = IBLND_MSG_SIZE; + int frag_num = -1; + int queue_dep = -1; + bool reconnect; + unsigned long flags; + + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT(peer_ni->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(!peer_ni->ibp_reconnecting); + + if (cp) { + msg_size = cp->ibcp_max_msg_size; + frag_num = cp->ibcp_max_frags; + queue_dep = cp->ibcp_queue_depth; + } + + write_lock_irqsave(glock, flags); /* retry connection if it's still needed and no other connection * attempts (active or passive) are in progress * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be * initiated by kiblnd_query() */ - if ((!cfs_list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting == 1 && - peer->ibp_accepting == 0) { - retry = 1; - peer->ibp_connecting++; - - peer->ibp_version = version; - peer->ibp_incarnation = incarnation; - } - - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (!retry) - return; + reconnect = (!list_empty(&peer_ni->ibp_tx_queue) || + peer_ni->ibp_version != version) && + peer_ni->ibp_connecting == 1 && + peer_ni->ibp_accepting == 0; + if (!reconnect) { + reason = "no need"; + goto out; + } switch (why) { default: reason = "Unknown"; break; + case IBLND_REJECT_RDMA_FRAGS: { + struct lnet_ioctl_config_o2iblnd_tunables *tunables; + + if (!cp) { + reason = "can't negotiate max frags"; + goto out; + } + tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; + if (!tunables->lnd_map_on_demand) { + reason = "map_on_demand must be enabled"; + goto out; + } + if (conn->ibc_max_frags <= frag_num) { + reason = "unsupported max frags"; + goto out; + } + + peer_ni->ibp_max_frags = frag_num; + reason = "rdma fragments"; + break; + } + case IBLND_REJECT_MSG_QUEUE_SIZE: + if (!cp) { + reason = "can't negotiate queue depth"; + goto out; + } + if (conn->ibc_queue_depth <= queue_dep) { + reason = "unsupported queue depth"; + goto out; + } + + peer_ni->ibp_queue_depth = queue_dep; + reason = "queue depth"; + break; + case IBLND_REJECT_CONN_STALE: reason = "stale"; break; @@ -2413,37 +2621,52 @@ kiblnd_reconnect (kib_conn_t *conn, int version, case IBLND_REJECT_CONN_UNCOMPAT: reason = "version negotiation"; break; - } - CDEBUG(D_NETERROR, "%s: retrying (%s), %x, %x, " - "queue_dep: %d, max_frag: %d, msg_size: %d\n", - libcfs_nid2str(peer->ibp_nid), - reason, IBLND_MSG_VERSION, version, - cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version), - cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), - cp != NULL ? cp->ibcp_max_msg_size: IBLND_MSG_SIZE); - - kiblnd_connect_peer(peer); + case IBLND_REJECT_INVALID_SRV_ID: + reason = "invalid service id"; + break; + } + + conn->ibc_reconnect = 1; + peer_ni->ibp_reconnecting = 1; + peer_ni->ibp_version = version; + if (incarnation != 0) + peer_ni->ibp_incarnation = incarnation; + out: + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", + libcfs_nid2str(peer_ni->ibp_nid), + reconnect ? "reconnect" : "don't reconnect", + reason, IBLND_MSG_VERSION, version, msg_size, + conn->ibc_queue_depth, queue_dep, + conn->ibc_max_frags, frag_num); + /* + * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer_ni + * while destroying the zombie + */ } -void +static void kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; + kib_peer_ni_t *peer_ni = conn->ibc_peer; - LASSERT (!cfs_in_interrupt()); - LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT (!in_interrupt()); + LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - switch (reason) { - case IB_CM_REJ_STALE_CONN: - kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_CONN_STALE, NULL); - break; + switch (reason) { + case IB_CM_REJ_STALE_CONN: + kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_CONN_STALE, NULL); + break; case IB_CM_REJ_INVALID_SERVICE_ID: - CDEBUG(D_NETERROR, "%s rejected: no listener at %d\n", - libcfs_nid2str(peer->ibp_nid), - *kiblnd_tunables.kib_service); + kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_INVALID_SRV_ID, NULL); + CNETERR("%s rejected: no listener at %d\n", + libcfs_nid2str(peer_ni->ibp_nid), + *kiblnd_tunables.kib_service); break; case IB_CM_REJ_CONSUMER_DEFINED: @@ -2458,7 +2681,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) * b) V2 will provide incarnation while rejecting me, * -1 will be overwrote. * - * if I try to connect to a V1 peer with V2 protocol, + * if I try to connect to a V1 peer_ni with V2 protocol, * it rejected me then upgrade to V2, I have no idea * about the upgrading and try to reconnect with V1, * in this case upgraded V2 can find out I'm trying to @@ -2492,22 +2715,22 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) if (rej->ibr_magic != IBLND_MSG_MAGIC && rej->ibr_magic != LNET_PROTO_MAGIC) { CERROR("%s rejected: consumer defined fatal error\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; } if (rej->ibr_version != IBLND_MSG_VERSION && rej->ibr_version != IBLND_MSG_VERSION_1) { CERROR("%s rejected: o2iblnd version %x error\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version); break; } if (rej->ibr_why == IBLND_REJECT_FATAL && rej->ibr_version == IBLND_MSG_VERSION_1) { - CDEBUG(D_NET, "rejected by old version peer %s: %x\n", - libcfs_nid2str(peer->ibp_nid), rej->ibr_version); + CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n", + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version); if (conn->ibc_version != IBLND_MSG_VERSION_1) rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; @@ -2517,35 +2740,25 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_CONN_RACE: case IBLND_REJECT_CONN_STALE: case IBLND_REJECT_CONN_UNCOMPAT: - kiblnd_reconnect(conn, rej->ibr_version, - incarnation, rej->ibr_why, cp); - break; - - case IBLND_REJECT_MSG_QUEUE_SIZE: - CERROR("%s rejected: incompatible message queue depth %d, %d\n", - libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); - break; - - case IBLND_REJECT_RDMA_FRAGS: - CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", - libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags, - IBLND_RDMA_FRAGS(conn->ibc_version)); + case IBLND_REJECT_MSG_QUEUE_SIZE: + case IBLND_REJECT_RDMA_FRAGS: + kiblnd_check_reconnect(conn, rej->ibr_version, + incarnation, rej->ibr_why, cp); break; case IBLND_REJECT_NO_RESOURCES: CERROR("%s rejected: o2iblnd no resources\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; case IBLND_REJECT_FATAL: CERROR("%s rejected: o2iblnd fatal error\n", - libcfs_nid2str(peer->ibp_nid)); + libcfs_nid2str(peer_ni->ibp_nid)); break; default: CERROR("%s rejected: o2iblnd reason %d\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_why); break; } @@ -2553,19 +2766,19 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) } /* fall through */ default: - CDEBUG(D_NETERROR, "%s rejected: reason %d, size %d\n", - libcfs_nid2str(peer->ibp_nid), reason, priv_nob); + CNETERR("%s rejected: reason %d, size %d\n", + libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob); break; } kiblnd_connreq_done(conn, -ECONNREFUSED); } -void +static void kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) { - kib_peer_t *peer = conn->ibc_peer; - lnet_ni_t *ni = peer->ibp_ni; + kib_peer_ni_t *peer_ni = conn->ibc_peer; + struct lnet_ni *ni = peer_ni->ibp_ni; kib_net_t *net = ni->ni_data; kib_msg_t *msg = priv; int ver = conn->ibc_version; @@ -2576,13 +2789,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (rc != 0) { CERROR("Can't unpack connack from %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); goto failed; } if (msg->ibm_type != IBLND_MSG_CONNACK) { CERROR("Unexpected message %d from %s\n", - msg->ibm_type, libcfs_nid2str(peer->ibp_nid)); + msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid)); rc = -EPROTO; goto failed; } @@ -2590,61 +2803,63 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) if (ver != msg->ibm_version) { CERROR("%s replied version %x is different with " "requested version %x\n", - libcfs_nid2str(peer->ibp_nid), msg->ibm_version, ver); + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver); rc = -EPROTO; goto failed; } - if (msg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(ver)) { - CERROR("%s has incompatible queue depth %d(%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(ver)); - rc = -EPROTO; - goto failed; - } - - if (msg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(ver)) { - CERROR("%s has incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(ver)); - rc = -EPROTO; - goto failed; - } + if (msg->ibm_u.connparams.ibcp_queue_depth > + conn->ibc_queue_depth) { + CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", + libcfs_nid2str(peer_ni->ibp_nid), + msg->ibm_u.connparams.ibcp_queue_depth, + conn->ibc_queue_depth); + rc = -EPROTO; + goto failed; + } + + if (msg->ibm_u.connparams.ibcp_max_frags > + conn->ibc_max_frags) { + CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", + libcfs_nid2str(peer_ni->ibp_nid), + msg->ibm_u.connparams.ibcp_max_frags, + conn->ibc_max_frags); + rc = -EPROTO; + goto failed; + } if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("%s max message size %d too big (%d max)\n", - libcfs_nid2str(peer->ibp_nid), + libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_max_msg_size, IBLND_MSG_SIZE); rc = -EPROTO; goto failed; } - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (msg->ibm_dstnid == ni->ni_nid && - msg->ibm_dststamp == net->ibn_incarnation) - rc = 0; - else - rc = -ESTALE; - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (msg->ibm_dstnid == ni->ni_nid && + msg->ibm_dststamp == net->ibn_incarnation) + rc = 0; + else + rc = -ESTALE; + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (rc != 0) { CERROR("Bad connection reply from %s, rc = %d, " "version: %x max_frags: %d\n", - libcfs_nid2str(peer->ibp_nid), rc, + libcfs_nid2str(peer_ni->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); goto failed; } - conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) - <= IBLND_RX_MSGS(ver)); + conn->ibc_incarnation = msg->ibm_srcstamp; + conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn)); kiblnd_connreq_done(conn, 0); return; @@ -2660,10 +2875,10 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) kiblnd_connreq_done(conn, 0); } -int +static int kiblnd_active_connect (struct rdma_cm_id *cmid) { - kib_peer_t *peer = (kib_peer_t *)cmid->context; + kib_peer_ni_t *peer_ni = (kib_peer_ni_t *)cmid->context; kib_conn_t *conn; kib_msg_t *msg; struct rdma_conn_param cp; @@ -2672,34 +2887,36 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) unsigned long flags; int rc; - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version; + incarnation = peer_ni->ibp_incarnation; + version = (peer_ni->ibp_version == 0) ? IBLND_MSG_VERSION : + peer_ni->ibp_version; - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); + conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_ACTIVE_CONNECT, + version); if (conn == NULL) { - kiblnd_peer_connect_failed(peer, 1, -ENOMEM); - kiblnd_peer_decref(peer); /* lose cmid's ref */ + kiblnd_peer_connect_failed(peer_ni, 1, -ENOMEM); + kiblnd_peer_decref(peer_ni); /* lose cmid's ref */ return -ENOMEM; } /* conn "owns" cmid now, so I return success from here on to ensure the * CM callback doesn't destroy cmid. conn also takes over cmid's ref - * on peer */ + * on peer_ni */ msg = &conn->ibc_connvars->cv_msg; - memset(msg, 0, sizeof(*msg)); - kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); - msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); - msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; + memset(msg, 0, sizeof(*msg)); + kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); + msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; + msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - kiblnd_pack_msg(peer->ibp_ni, msg, version, - 0, peer->ibp_nid, incarnation); + kiblnd_pack_msg(peer_ni->ibp_ni, msg, version, + 0, peer_ni->ibp_nid, incarnation); memset(&cp, 0, sizeof(cp)); cp.private_data = msg; @@ -2716,7 +2933,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) rc = rdma_connect(cmid, &cp); if (rc != 0) { CERROR("Can't connect to %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); kiblnd_connreq_done(conn, rc); kiblnd_conn_decref(conn); } @@ -2727,7 +2944,7 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { - kib_peer_t *peer; + kib_peer_ni_t *peer_ni; kib_conn_t *conn; int rc; @@ -2746,22 +2963,22 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return rc; case RDMA_CM_EVENT_ADDR_ERROR: - peer = (kib_peer_t *)cmid->context; - CDEBUG(D_NETERROR, "%s: ADDR ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); + peer_ni = (kib_peer_ni_t *)cmid->context; + CNETERR("%s: ADDR ERROR %d\n", + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ADDR_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CDEBUG(D_NET,"%s Addr resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer_ni->ibp_nid), event->status); if (event->status != 0) { - CDEBUG(D_NETERROR, "Can't resolve address for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + CNETERR("Can't resolve address for %s: %d\n", + libcfs_nid2str(peer_ni->ibp_nid), event->status); rc = event->status; } else { rc = rdma_resolve_route( @@ -2770,39 +2987,39 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) return 0; /* Can't initiate route resolution */ CERROR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), rc); + libcfs_nid2str(peer_ni->ibp_nid), rc); } - kiblnd_peer_connect_failed(peer, 1, rc); - kiblnd_peer_decref(peer); + kiblnd_peer_connect_failed(peer_ni, 1, rc); + kiblnd_peer_decref(peer_ni); return rc; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_ERROR: - peer = (kib_peer_t *)cmid->context; - CDEBUG(D_NETERROR, "%s: ROUTE ERROR %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH); - kiblnd_peer_decref(peer); + peer_ni = (kib_peer_ni_t *)cmid->context; + CNETERR("%s: ROUTE ERROR %d\n", + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); + kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_ROUTE_RESOLVED: - peer = (kib_peer_t *)cmid->context; + peer_ni = (kib_peer_ni_t *)cmid->context; CDEBUG(D_NET,"%s Route resolved: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); + libcfs_nid2str(peer_ni->ibp_nid), event->status); if (event->status == 0) return kiblnd_active_connect(cmid); - CDEBUG(D_NETERROR, "Can't resolve route for %s: %d\n", - libcfs_nid2str(peer->ibp_nid), event->status); - kiblnd_peer_connect_failed(peer, 1, event->status); - kiblnd_peer_decref(peer); + CNETERR("Can't resolve route for %s: %d\n", + libcfs_nid2str(peer_ni->ibp_nid), event->status); + kiblnd_peer_connect_failed(peer_ni, 1, event->status); + kiblnd_peer_decref(peer_ni); return event->status; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CDEBUG(D_NETERROR, "%s: UNREACHABLE %d\n", + CNETERR("%s: UNREACHABLE %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENETDOWN); kiblnd_conn_decref(conn); @@ -2812,8 +3029,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) conn = (kib_conn_t *)cmid->context; LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CDEBUG(D_NETERROR, "%s: CONNECT ERROR %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); + CNETERR("%s: CONNECT ERROR %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); kiblnd_connreq_done(conn, -ENOTCONN); kiblnd_conn_decref(conn); return 0; @@ -2863,11 +3080,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) /* net keeps its ref on conn! */ return 0; -#ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT case RDMA_CM_EVENT_TIMEWAIT_EXIT: CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); return 0; -#endif + case RDMA_CM_EVENT_DISCONNECTED: conn = (kib_conn_t *)cmid->context; if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { @@ -2889,181 +3105,252 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) * to ignore this */ return 0; -#ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE case RDMA_CM_EVENT_ADDR_CHANGE: LCONSOLE_INFO("Physical link changed (eg hca/port)\n"); return 0; -#endif } } -int -kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs) +static int +kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) { - kib_tx_t *tx; - cfs_list_t *ttmp; - int timed_out = 0; - - cfs_spin_lock(&conn->ibc_lock); - - cfs_list_for_each (ttmp, txs) { - tx = cfs_list_entry (ttmp, kib_tx_t, tx_list); - - if (txs != &conn->ibc_active_txs) { - LASSERT (tx->tx_queued); - } else { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || tx->tx_sending != 0); - } - - if (cfs_time_aftereq (jiffies, tx->tx_deadline)) { - timed_out = 1; - CERROR("Timed out tx: %s, %lu seconds\n", - kiblnd_queue2str(conn, txs), - cfs_duration_sec(jiffies - tx->tx_deadline)); - break; - } - } - - cfs_spin_unlock(&conn->ibc_lock); - return timed_out; + kib_tx_t *tx; + struct list_head *ttmp; + + list_for_each(ttmp, txs) { + tx = list_entry(ttmp, kib_tx_t, tx_list); + + if (txs != &conn->ibc_active_txs) { + LASSERT(tx->tx_queued); + } else { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || tx->tx_sending != 0); + } + + if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { + CERROR("Timed out tx: %s, %lu seconds\n", + kiblnd_queue2str(conn, txs), + cfs_duration_sec(jiffies - tx->tx_deadline)); + return 1; + } + } + + return 0; } -int -kiblnd_conn_timed_out (kib_conn_t *conn) +static int +kiblnd_conn_timed_out_locked(kib_conn_t *conn) { - return kiblnd_check_txs(conn, &conn->ibc_tx_queue) || - kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) || - kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) || - kiblnd_check_txs(conn, &conn->ibc_active_txs); + return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) || + kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); } -void +static void kiblnd_check_conns (int idx) { - cfs_list_t *peers = &kiblnd_data.kib_peers[idx]; - cfs_list_t *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; - cfs_list_t *ctmp; - unsigned long flags; - - again: - /* NB. We expect to have a look at all the peers and not find any - * rdmas to time out, so we just use a shared lock while we - * take a look... */ - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - - cfs_list_for_each (ptmp, peers) { - peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list); - - cfs_list_for_each (ctmp, &peer->ibp_conns) { - conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list); - - LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED); - - /* In case we have enough credits to return via a - * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - kiblnd_check_sends(conn); - - if (!kiblnd_conn_timed_out(conn)) - continue; - - /* Handle timeout by closing the whole connection. We - * can only be sure RDMA activity has ceased once the - * QP has been modified. */ - - kiblnd_conn_addref(conn); /* 1 ref for me... */ - - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - - CERROR("Timed out RDMA with %s (%lu)\n", - libcfs_nid2str(peer->ibp_nid), - cfs_duration_sec(cfs_time_current() - - peer->ibp_last_alive)); - - kiblnd_close_conn(conn, -ETIMEDOUT); - kiblnd_conn_decref(conn); /* ...until here */ - - /* start again now I've dropped the lock */ - goto again; - } - } - - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + struct list_head closes = LIST_HEAD_INIT(closes); + struct list_head checksends = LIST_HEAD_INIT(checksends); + struct list_head *peers = &kiblnd_data.kib_peers[idx]; + struct list_head *ptmp; + kib_peer_ni_t *peer_ni; + kib_conn_t *conn; + struct list_head *ctmp; + unsigned long flags; + + /* NB. We expect to have a look at all the peers and not find any + * RDMAs to time out, so we just use a shared lock while we + * take a look... */ + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + + list_for_each(ptmp, peers) { + peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list); + + list_for_each(ctmp, &peer_ni->ibp_conns) { + int timedout; + int sendnoop; + + conn = list_entry(ctmp, kib_conn_t, ibc_list); + + LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); + + spin_lock(&conn->ibc_lock); + + sendnoop = kiblnd_need_noop(conn); + timedout = kiblnd_conn_timed_out_locked(conn); + if (!sendnoop && !timedout) { + spin_unlock(&conn->ibc_lock); + continue; + } + + if (timedout) { + CERROR("Timed out RDMA with %s (%lu): " + "c: %u, oc: %u, rc: %u\n", + libcfs_nid2str(peer_ni->ibp_nid), + cfs_duration_sec(cfs_time_current() - + peer_ni->ibp_last_alive), + conn->ibc_credits, + conn->ibc_outstanding_credits, + conn->ibc_reserved_credits); + list_add(&conn->ibc_connd_list, &closes); + } else { + list_add(&conn->ibc_connd_list, &checksends); + } + /* +ref for 'closes' or 'checksends' */ + kiblnd_conn_addref(conn); + + spin_unlock(&conn->ibc_lock); + } + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* Handle timeout by closing the whole + * connection. We can only be sure RDMA activity + * has ceased once the QP has been modified. */ + while (!list_empty(&closes)) { + conn = list_entry(closes.next, + kib_conn_t, ibc_connd_list); + list_del(&conn->ibc_connd_list); + kiblnd_close_conn(conn, -ETIMEDOUT); + kiblnd_conn_decref(conn); + } + + /* In case we have enough credits to return via a + * NOOP, but there were no non-blocking tx descs + * free to do it last time... */ + while (!list_empty(&checksends)) { + conn = list_entry(checksends.next, + kib_conn_t, ibc_connd_list); + list_del(&conn->ibc_connd_list); + + spin_lock(&conn->ibc_lock); + kiblnd_check_sends_locked(conn); + spin_unlock(&conn->ibc_lock); + + kiblnd_conn_decref(conn); + } } -void +static void kiblnd_disconnect_conn (kib_conn_t *conn) { - LASSERT (!cfs_in_interrupt()); - LASSERT (current == kiblnd_data.kib_connd); - LASSERT (conn->ibc_state == IBLND_CONN_CLOSING); + LASSERT (!in_interrupt()); + LASSERT (current == kiblnd_data.kib_connd); + LASSERT (conn->ibc_state == IBLND_CONN_CLOSING); - rdma_disconnect(conn->ibc_cmid); - kiblnd_finalise_conn(conn); + rdma_disconnect(conn->ibc_cmid); + kiblnd_finalise_conn(conn); - kiblnd_peer_notify(conn->ibc_peer); + kiblnd_peer_notify(conn->ibc_peer); } +/* + * High-water for reconnection to the same peer_ni, reconnection attempt should + * be delayed after trying more than KIB_RECONN_HIGH_RACE. + */ +#define KIB_RECONN_HIGH_RACE 10 +/* + * Allow connd to take a break and handle other things after consecutive + * reconnection attemps. + */ +#define KIB_RECONN_BREAK 100 + int kiblnd_connd (void *arg) { - cfs_waitlink_t wait; - unsigned long flags; - kib_conn_t *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; + spinlock_t *lock= &kiblnd_data.kib_connd_lock; + wait_queue_t wait; + unsigned long flags; + kib_conn_t *conn; + int timeout; + int i; + int dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; - cfs_daemonize ("kiblnd_connd"); - cfs_block_allsigs (); + cfs_block_allsigs(); - cfs_waitlink_init (&wait); - kiblnd_data.kib_connd = current; + init_waitqueue_entry(&wait, current); + kiblnd_data.kib_connd = current; - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); - while (!kiblnd_data.kib_shutdown) { + while (!kiblnd_data.kib_shutdown) { + int reconn = 0; dropped_lock = 0; - if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) { - conn = cfs_list_entry(kiblnd_data. \ - kib_connd_zombies.next, - kib_conn_t, ibc_list); - cfs_list_del(&conn->ibc_list); + if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + kib_peer_ni_t *peer_ni = NULL; - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, - flags); - dropped_lock = 1; + conn = list_entry(kiblnd_data.kib_connd_zombies.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + if (conn->ibc_reconnect) { + peer_ni = conn->ibc_peer; + kiblnd_peer_addref(peer_ni); + } - kiblnd_destroy_conn(conn); + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, - flags); - } + kiblnd_destroy_conn(conn, !peer_ni); - if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) { - conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next, - kib_conn_t, ibc_list); - cfs_list_del(&conn->ibc_list); + spin_lock_irqsave(lock, flags); + if (!peer_ni) + continue; - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, - flags); - dropped_lock = 1; + conn->ibc_peer = peer_ni; + if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE) + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_list); + else + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_wait); + } + + if (!list_empty(&kiblnd_data.kib_connd_conns)) { + conn = list_entry(kiblnd_data.kib_connd_conns.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); - kiblnd_disconnect_conn(conn); - kiblnd_conn_decref(conn); + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, - flags); + kiblnd_disconnect_conn(conn); + kiblnd_conn_decref(conn); + + spin_lock_irqsave(lock, flags); } + while (reconn < KIB_RECONN_BREAK) { + if (kiblnd_data.kib_reconn_sec != + ktime_get_real_seconds()) { + kiblnd_data.kib_reconn_sec = ktime_get_real_seconds(); + list_splice_init(&kiblnd_data.kib_reconn_wait, + &kiblnd_data.kib_reconn_list); + } + + if (list_empty(&kiblnd_data.kib_reconn_list)) + break; + + conn = list_entry(kiblnd_data.kib_reconn_list.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; + + reconn += kiblnd_reconnect_peer(conn->ibc_peer); + kiblnd_peer_decref(conn->ibc_peer); + LIBCFS_FREE(conn, sizeof(*conn)); + + spin_lock_irqsave(lock, flags); + } + /* careful with the jiffy wrap... */ timeout = (int)(deadline - jiffies); if (timeout <= 0) { @@ -3071,12 +3358,12 @@ kiblnd_connd (void *arg) const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; /* Time to check for RDMA timeouts on a few more * peers: I do checks every 'p' seconds on a - * proportion of the peer table and I need to check + * proportion of the peer_ni table and I need to check * every connection 'n' times within a timeout * interval, to ensure I detect a timeout on any * connection within (n+1)/n times the timeout @@ -3088,36 +3375,35 @@ kiblnd_connd (void *arg) if (chunk == 0) chunk = 1; - for (i = 0; i < chunk; i++) { - kiblnd_check_conns(peer_index); - peer_index = (peer_index + 1) % - kiblnd_data.kib_peer_hash_size; - } + for (i = 0; i < chunk; i++) { + kiblnd_check_conns(peer_index); + peer_index = (peer_index + 1) % + kiblnd_data.kib_peer_hash_size; + } - deadline += p * CFS_HZ; - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, - flags); - } + deadline += msecs_to_jiffies(p * MSEC_PER_SEC); + spin_lock_irqsave(lock, flags); + } - if (dropped_lock) - continue; + if (dropped_lock) + continue; - /* Nothing to do for 'timeout' */ - cfs_set_current_state (CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait); - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags); + /* Nothing to do for 'timeout' */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_unlock_irqrestore(lock, flags); - cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout); + schedule_timeout(timeout); - cfs_set_current_state (CFS_TASK_RUNNING); - cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait); - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags); - } + set_current_state(TASK_RUNNING); + remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_lock_irqsave(lock, flags); + } - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); - kiblnd_thread_fini(); - return (0); + kiblnd_thread_fini(); + return 0; } void @@ -3129,8 +3415,12 @@ kiblnd_qp_event(struct ib_event *event, void *arg) case IB_EVENT_COMM_EST: CDEBUG(D_NET, "%s established\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); + /* We received a packet but connection isn't established + * probably handshake packet was lost, so free to + * force make connection established */ + rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST); return; - + default: CERROR("%s: Async QP event type %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); @@ -3138,12 +3428,18 @@ kiblnd_qp_event(struct ib_event *event, void *arg) } } -void +static void kiblnd_complete (struct ib_wc *wc) { - switch (kiblnd_wreqid2type(wc->wr_id)) { - default: - LBUG(); + switch (kiblnd_wreqid2type(wc->wr_id)) { + default: + LBUG(); + + case IBLND_WID_MR: + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) + CNETERR("FastReg failed: %d\n", wc->status); + return; case IBLND_WID_RDMA: /* We only get RDMA completion notification if it fails. All @@ -3152,8 +3448,8 @@ kiblnd_complete (struct ib_wc *wc) * failing RDMA because 'tx' might be back on the idle list or * even reused already if we didn't manage to post all our work * items */ - CDEBUG(D_NETERROR, "RDMA (tx: %p) failed: %d\n", - kiblnd_wreqid2ptr(wc->wr_id), wc->status); + CNETERR("RDMA (tx: %p) failed: %d\n", + kiblnd_wreqid2ptr(wc->wr_id), wc->status); return; case IBLND_WID_TX: @@ -3168,33 +3464,35 @@ kiblnd_complete (struct ib_wc *wc) } void -kiblnd_cq_completion (struct ib_cq *cq, void *arg) +kiblnd_cq_completion(struct ib_cq *cq, void *arg) { - /* NB I'm not allowed to schedule this conn once its refcount has - * reached 0. Since fundamentally I'm racing with scheduler threads - * consuming my CQ I could be called after all completions have - * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 - * and this CQ is about to be destroyed so I NOOP. */ - kib_conn_t *conn = (kib_conn_t *)arg; - unsigned long flags; + /* NB I'm not allowed to schedule this conn once its refcount has + * reached 0. Since fundamentally I'm racing with scheduler threads + * consuming my CQ I could be called after all completions have + * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 + * and this CQ is about to be destroyed so I NOOP. */ + kib_conn_t *conn = (kib_conn_t *)arg; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; - LASSERT (cq == conn->ibc_cq); + LASSERT(cq == conn->ibc_cq); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); + spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_ready = 1; + conn->ibc_ready = 1; - if (!conn->ibc_scheduled && - (conn->ibc_nrx > 0 || - conn->ibc_nsends_posted > 0)) { - kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ - conn->ibc_scheduled = 1; - cfs_list_add_tail(&conn->ibc_sched_list, - &kiblnd_data.kib_sched_conns); - cfs_waitq_signal(&kiblnd_data.kib_sched_waitq); - } + if (!conn->ibc_scheduled && + (conn->ibc_nrx > 0 || + conn->ibc_nsends_posted > 0)) { + kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ + conn->ibc_scheduled = 1; + list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } + + spin_unlock_irqrestore(&sched->ibs_lock, flags); } void @@ -3209,48 +3507,55 @@ kiblnd_cq_event(struct ib_event *event, void *arg) int kiblnd_scheduler(void *arg) { - long id = (long)arg; - cfs_waitlink_t wait; - char name[16]; - unsigned long flags; - kib_conn_t *conn; - struct ib_wc wc; - int rc; - int did_something; - int busy_loops = 0; + long id = (long)arg; + struct kib_sched_info *sched; + kib_conn_t *conn; + wait_queue_t wait; + unsigned long flags; + struct ib_wc wc; + int did_something; + int busy_loops = 0; + int rc; - snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id); - cfs_daemonize(name); - cfs_block_allsigs(); + cfs_block_allsigs(); - cfs_waitlink_init(&wait); + init_waitqueue_entry(&wait, current); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); + sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; - while (!kiblnd_data.kib_shutdown) { - if (busy_loops++ >= IBLND_RESCHED) { - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, - flags); + rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); + if (rc != 0) { + CWARN("Unable to bind on CPU partition %d, please verify " + "whether all CPUs are healthy and reload modules if " + "necessary, otherwise your system might under risk of " + "low performance\n", sched->ibs_cpt); + } - cfs_cond_resched(); - busy_loops = 0; + spin_lock_irqsave(&sched->ibs_lock, flags); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, - flags); - } + while (!kiblnd_data.kib_shutdown) { + if (busy_loops++ >= IBLND_RESCHED) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + cond_resched(); + busy_loops = 0; + + spin_lock_irqsave(&sched->ibs_lock, flags); + } - did_something = 0; + did_something = 0; - if (!cfs_list_empty(&kiblnd_data.kib_sched_conns)) { - conn = cfs_list_entry(kiblnd_data.kib_sched_conns.next, - kib_conn_t, ibc_sched_list); - /* take over kib_sched_conns' ref on conn... */ - LASSERT(conn->ibc_scheduled); - cfs_list_del(&conn->ibc_sched_list); - conn->ibc_ready = 0; + if (!list_empty(&sched->ibs_conns)) { + conn = list_entry(sched->ibs_conns.next, + kib_conn_t, ibc_sched_list); + /* take over kib_sched_conns' ref on conn... */ + LASSERT(conn->ibc_scheduled); + list_del(&conn->ibc_sched_list); + conn->ibc_ready = 0; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, - flags); + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + wc.wr_id = IBLND_WID_INVAL; rc = ib_poll_cq(conn->ibc_cq, 1, &wc); if (rc == 0) { @@ -3262,75 +3567,172 @@ kiblnd_scheduler(void *arg) libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, - flags); - continue; - } - - rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + spin_lock_irqsave(&sched->ibs_lock, + flags); + continue; + } + + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + } + + if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) { + LCONSOLE_ERROR( + "ib_poll_cq (rc: %d) returned invalid " + "wr_id, opcode %d, status: %d, " + "vendor_err: %d, conn: %s status: %d\n" + "please upgrade firmware and OFED or " + "contact vendor.\n", rc, + wc.opcode, wc.status, wc.vendor_err, + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_state); + rc = -EINVAL; + } + + if (rc < 0) { + CWARN("%s: ib_poll_cq failed: %d, " + "closing connection\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc); + kiblnd_close_conn(conn, -EIO); + kiblnd_conn_decref(conn); + spin_lock_irqsave(&sched->ibs_lock, flags); + continue; + } + + spin_lock_irqsave(&sched->ibs_lock, flags); + + if (rc != 0 || conn->ibc_ready) { + /* There may be another completion waiting; get + * another scheduler to check while I handle + * this one... */ + /* +1 ref for sched_conns */ + kiblnd_conn_addref(conn); + list_add_tail(&conn->ibc_sched_list, + &sched->ibs_conns); + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } else { + conn->ibc_scheduled = 0; + } + + if (rc != 0) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + kiblnd_complete(&wc); + + spin_lock_irqsave(&sched->ibs_lock, flags); } - if (rc < 0) { - CWARN("%s: ib_poll_cq failed: %d, " - "closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc); - kiblnd_close_conn(conn, -EIO); - kiblnd_conn_decref(conn); - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, flags); + kiblnd_conn_decref(conn); /* ...drop my ref from above */ + did_something = 1; + } + + if (did_something) + continue; + + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&sched->ibs_waitq, &wait); + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + schedule(); + busy_loops = 0; + + remove_wait_queue(&sched->ibs_waitq, &wait); + set_current_state(TASK_RUNNING); + spin_lock_irqsave(&sched->ibs_lock, flags); + } + + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + kiblnd_thread_fini(); + return 0; +} + +int +kiblnd_failover_thread(void *arg) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; + + LASSERT(*kiblnd_tunables.kib_dev_failover != 0); + + cfs_block_allsigs(); + + init_waitqueue_entry(&wait, current); + write_lock_irqsave(glock, flags); + + while (!kiblnd_data.kib_shutdown) { + int do_failover = 0; + int long_sleep; + + list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, + ibd_fail_list) { + if (cfs_time_before(cfs_time_current(), + dev->ibd_next_failover)) continue; - } + do_failover = 1; + break; + } - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, - flags); - - if (rc != 0 || conn->ibc_ready) { - /* There may be another completion waiting; get - * another scheduler to check while I handle - * this one... */ - kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ - cfs_list_add_tail(&conn->ibc_sched_list, - &kiblnd_data.kib_sched_conns); - cfs_waitq_signal(&kiblnd_data.kib_sched_waitq); - } else { - conn->ibc_scheduled = 0; - } + if (do_failover) { + list_del_init(&dev->ibd_fail_list); + dev->ibd_failover = 1; + write_unlock_irqrestore(glock, flags); - if (rc != 0) { - cfs_spin_unlock_irqrestore(&kiblnd_data. \ - kib_sched_lock, - flags); + rc = kiblnd_dev_failover(dev); - kiblnd_complete(&wc); + write_lock_irqsave(glock, flags); - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, - flags); + LASSERT (dev->ibd_failover); + dev->ibd_failover = 0; + if (rc >= 0) { /* Device is OK or failover succeed */ + dev->ibd_next_failover = cfs_time_shift(3); + continue; } - kiblnd_conn_decref(conn); /* ...drop my ref from above */ - did_something = 1; - } + /* failed to failover, retry later */ + dev->ibd_next_failover = + cfs_time_shift(min(dev->ibd_failed_failover, 10)); + if (kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + } - if (did_something) continue; + } - cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add_exclusive(&kiblnd_data.kib_sched_waitq, &wait); - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + /* long sleep if no more pending failover */ + long_sleep = list_empty(&kiblnd_data.kib_failed_devs); - cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE); - busy_loops = 0; + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_unlock_irqrestore(glock, flags); - cfs_waitq_del(&kiblnd_data.kib_sched_waitq, &wait); - cfs_set_current_state(CFS_TASK_RUNNING); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); + rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : + cfs_time_seconds(1)); + set_current_state(TASK_RUNNING); + remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_lock_irqsave(glock, flags); + + if (!long_sleep || rc != 0) + continue; + + /* have a long sleep, routine check all active devices, + * we need checking like this because if there is not active + * connection on the dev and no SEND from local, we may listen + * on wrong HCA for ever while there is a bonding failover */ + list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { + if (kiblnd_dev_can_failover(dev)) { + list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + } + } } - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + write_unlock_irqrestore(glock, flags); kiblnd_thread_fini(); - return (0); + return 0; }