-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include "o2iblnd.h"
+#define MAX_CONN_RACES_BEFORE_ABORT 20
+
+static void kiblnd_peer_alive(kib_peer_t *peer);
+static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error);
+static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx,
+ int type, int body_nob);
+static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
+static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
+static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx);
+static void kiblnd_check_sends_locked(kib_conn_t *conn);
+
void
kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx)
{
- lnet_msg_t *lntmsg[2];
- kib_net_t *net = ni->ni_data;
- int rc;
- int i;
-
- LASSERT (net != NULL);
- LASSERT (!in_interrupt());
- LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
- LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
- LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
+ lnet_msg_t *lntmsg[2];
+ kib_net_t *net = ni->ni_data;
+ int rc;
+ int i;
- kiblnd_unmap_tx(ni, tx);
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
+ LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
+ LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */
+ LASSERT (tx->tx_pool != NULL);
- /* tx may have up to 2 lnet msgs to finalise */
- lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
- lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
- rc = tx->tx_status;
+ kiblnd_unmap_tx(ni, tx);
- if (tx->tx_conn != NULL) {
- LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
+ /* tx may have up to 2 lnet msgs to finalise */
+ lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
+ lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL;
+ rc = tx->tx_status;
- kiblnd_conn_decref(tx->tx_conn);
- tx->tx_conn = NULL;
- }
-
- tx->tx_nwrq = 0;
- tx->tx_status = 0;
+ if (tx->tx_conn != NULL) {
+ LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
- spin_lock(&net->ibn_tx_lock);
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ }
- list_add(&tx->tx_list, &net->ibn_idle_txs);
+ tx->tx_nwrq = 0;
+ tx->tx_status = 0;
- spin_unlock(&net->ibn_tx_lock);
+ kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
- /* delay finalize until my descs have been freed */
- for (i = 0; i < 2; i++) {
- if (lntmsg[i] == NULL)
- continue;
+ /* delay finalize until my descs have been freed */
+ for (i = 0; i < 2; i++) {
+ if (lntmsg[i] == NULL)
+ continue;
- lnet_finalize(ni, lntmsg[i], rc);
- }
+ lnet_finalize(ni, lntmsg[i], rc);
+ }
}
void
-kiblnd_txlist_done (lnet_ni_t *ni, struct list_head *txlist, int status)
+kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status)
{
- kib_tx_t *tx;
+ kib_tx_t *tx;
- while (!list_empty (txlist)) {
- tx = list_entry (txlist->next, kib_tx_t, tx_list);
+ while (!list_empty(txlist)) {
+ tx = list_entry(txlist->next, kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- /* complete now */
- tx->tx_waiting = 0;
- tx->tx_status = status;
- kiblnd_tx_done(ni, tx);
- }
+ list_del(&tx->tx_list);
+ /* complete now */
+ tx->tx_waiting = 0;
+ tx->tx_status = status;
+ kiblnd_tx_done(ni, tx);
+ }
}
-kib_tx_t *
-kiblnd_get_idle_tx (lnet_ni_t *ni)
+static kib_tx_t *
+kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target)
{
- kib_net_t *net = ni->ni_data;
- kib_tx_t *tx;
-
- LASSERT (net != NULL);
-
- spin_lock(&net->ibn_tx_lock);
-
- if (list_empty(&net->ibn_idle_txs)) {
- spin_unlock(&net->ibn_tx_lock);
+ kib_net_t *net = (kib_net_t *)ni->ni_data;
+ struct list_head *node;
+ kib_tx_t *tx;
+ kib_tx_poolset_t *tps;
+
+ tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
+ node = kiblnd_pool_alloc_node(&tps->tps_poolset);
+ if (node == NULL)
return NULL;
- }
-
- tx = list_entry(net->ibn_idle_txs.next, kib_tx_t, tx_list);
- list_del(&tx->tx_list);
-
- /* Allocate a new completion cookie. It might not be needed,
- * but we've got a lock right now and we're unlikely to
- * wrap... */
- tx->tx_cookie = net->ibn_tx_next_cookie++;
-
- spin_unlock(&net->ibn_tx_lock);
+ tx = container_of(node, kib_tx_t, tx_list);
LASSERT (tx->tx_nwrq == 0);
LASSERT (!tx->tx_queued);
LASSERT (tx->tx_conn == NULL);
LASSERT (tx->tx_lntmsg[0] == NULL);
LASSERT (tx->tx_lntmsg[1] == NULL);
- LASSERT (tx->tx_u.fmr == NULL);
LASSERT (tx->tx_nfrags == 0);
return tx;
}
-void
-kiblnd_drop_rx (kib_rx_t *rx)
+static void
+kiblnd_drop_rx(kib_rx_t *rx)
{
- kib_conn_t *conn = rx->rx_conn;
- unsigned long flags;
-
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
- LASSERT (conn->ibc_nrx > 0);
- conn->ibc_nrx--;
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ kib_conn_t *conn = rx->rx_conn;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
- kiblnd_conn_decref(conn);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ LASSERT(conn->ibc_nrx > 0);
+ conn->ibc_nrx--;
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ kiblnd_conn_decref(conn);
}
int
kiblnd_post_rx (kib_rx_t *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr;
- int rc;
-
- LASSERT (net != NULL);
- LASSERT (!in_interrupt());
- LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
- credit == IBLND_POSTRX_PEER_CREDIT ||
- credit == IBLND_POSTRX_RSRVD_CREDIT);
-
- mr = kiblnd_find_dma_mr(net, rx->rx_msgaddr, IBLND_MSG_SIZE);
- LASSERT (mr != NULL);
+ kib_conn_t *conn = rx->rx_conn;
+ kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
+ struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
+ int rc;
+
+ LASSERT (net != NULL);
+ LASSERT (!in_interrupt());
+ LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
+ credit == IBLND_POSTRX_PEER_CREDIT ||
+ credit == IBLND_POSTRX_RSRVD_CREDIT);
+ LASSERT(mr != NULL);
rx->rx_sge.lkey = mr->lkey;
rx->rx_sge.addr = rx->rx_msgaddr;
rx->rx_nob = -1; /* flag posted */
- rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
- if (rc != 0) {
- CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
- rx->rx_nob = 0;
- }
-
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
- return rc;
-
- if (rc != 0) {
- kiblnd_close_conn(conn, rc);
- kiblnd_drop_rx(rx); /* No more posts for this rx */
- return rc;
- }
-
- if (credit == IBLND_POSTRX_NO_CREDIT)
- return 0;
-
- spin_lock(&conn->ibc_lock);
- if (credit == IBLND_POSTRX_PEER_CREDIT)
- conn->ibc_outstanding_credits++;
- else
- conn->ibc_reserved_credits++;
- spin_unlock(&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
- return 0;
+ /* NB: need an extra reference after ib_post_recv because we don't
+ * own this rx (and rx::rx_conn) anymore, LU-5678.
+ */
+ kiblnd_conn_addref(conn);
+ rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+ if (unlikely(rc != 0)) {
+ CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
+ rx->rx_nob = 0;
+ }
+
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */
+ goto out;
+
+ if (unlikely(rc != 0)) {
+ kiblnd_close_conn(conn, rc);
+ kiblnd_drop_rx(rx); /* No more posts for this rx */
+ goto out;
+ }
+
+ if (credit == IBLND_POSTRX_NO_CREDIT)
+ goto out;
+
+ spin_lock(&conn->ibc_lock);
+ if (credit == IBLND_POSTRX_PEER_CREDIT)
+ conn->ibc_outstanding_credits++;
+ else
+ conn->ibc_reserved_credits++;
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
+
+out:
+ kiblnd_conn_decref(conn);
+ return rc;
}
-kib_tx_t *
+static kib_tx_t *
kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
{
- struct list_head *tmp;
+ struct list_head *tmp;
- list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ list_for_each(tmp, &conn->ibc_active_txs) {
+ kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_sending != 0 || tx->tx_waiting);
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
- if (tx->tx_cookie != cookie)
- continue;
+ if (tx->tx_cookie != cookie)
+ continue;
- if (tx->tx_waiting &&
- tx->tx_msg->ibm_type == txtype)
- return tx;
+ if (tx->tx_waiting &&
+ tx->tx_msg->ibm_type == txtype)
+ return tx;
- CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
- tx->tx_waiting ? "" : "NOT ",
- tx->tx_msg->ibm_type, txtype);
- }
- return NULL;
+ CWARN("Bad completion: %swaiting, type %x (wanted %x)\n",
+ tx->tx_waiting ? "" : "NOT ",
+ tx->tx_msg->ibm_type, txtype);
+ }
+ return NULL;
}
-void
+static void
kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
{
- kib_tx_t *tx;
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- int idle;
+ kib_tx_t *tx;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ int idle;
- spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
- if (tx == NULL) {
- spin_unlock(&conn->ibc_lock);
+ tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie);
+ if (tx == NULL) {
+ spin_unlock(&conn->ibc_lock);
- CWARN("Unmatched completion type %x cookie "LPX64" from %s\n",
+ CWARN("Unmatched completion type %x cookie %#llx from %s\n",
txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
kiblnd_close_conn(conn, -EPROTO);
return;
idle = !tx->tx_queued && (tx->tx_sending == 0);
if (idle)
- list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
- if (idle)
- kiblnd_tx_done(ni, tx);
+ if (idle)
+ kiblnd_tx_done(ni, tx);
}
-void
-kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie)
+static void
+kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
{
- lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni);
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+ kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (tx == NULL) {
CERROR("Can't get tx for completion %x for %s\n",
kiblnd_queue_tx(tx, conn);
}
-void
+static void
kiblnd_handle_rx (kib_rx_t *rx)
{
kib_msg_t *msg = rx->rx_msg;
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
CDEBUG (D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ msg->ibm_type, credits,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
if (credits != 0) {
/* Have I received credits that will let me send? */
- spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- if (conn->ibc_credits + credits >
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) {
- rc2 = conn->ibc_credits;
- spin_unlock(&conn->ibc_lock);
+ if (conn->ibc_credits + credits >
+ conn->ibc_queue_depth) {
+ rc2 = conn->ibc_credits;
+ spin_unlock(&conn->ibc_lock);
- CERROR("Bad credits from %s: %d + %d > %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc2, credits, IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
+ CERROR("Bad credits from %s: %d + %d > %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc2, credits,
+ conn->ibc_queue_depth);
- kiblnd_close_conn(conn, -EPROTO);
- kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
- return;
- }
+ kiblnd_close_conn(conn, -EPROTO);
+ kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT);
+ return;
+ }
conn->ibc_credits += credits;
- spin_unlock(&conn->ibc_lock);
- kiblnd_check_sends(conn);
+ /* This ensures the credit taken by NOOP can be returned */
+ if (msg->ibm_type == IBLND_MSG_NOOP &&
+ !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */
+ conn->ibc_outstanding_credits++;
+
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
}
switch (msg->ibm_type) {
break;
case IBLND_MSG_NOOP:
- if (IBLND_OOB_CAPABLE(conn->ibc_version))
+ if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
post_credit = IBLND_POSTRX_NO_CREDIT;
- else
+ break;
+ }
+
+ if (credits != 0) /* credit already posted */
+ post_credit = IBLND_POSTRX_NO_CREDIT;
+ else /* a keepalive NOOP */
post_credit = IBLND_POSTRX_PEER_CREDIT;
break;
break;
case IBLND_MSG_PUT_NAK:
- CWARN ("PUT_NACK from %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CWARN ("PUT_NACK from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ,
msg->ibm_u.completion.ibcm_status,
case IBLND_MSG_PUT_ACK:
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
- spin_lock(&conn->ibc_lock);
- tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
- msg->ibm_u.putack.ibpam_src_cookie);
- if (tx != NULL)
- list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
+ tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ,
+ msg->ibm_u.putack.ibpam_src_cookie);
+ if (tx != NULL)
+ list_del(&tx->tx_list);
+ spin_unlock(&conn->ibc_lock);
if (tx == NULL) {
CERROR("Unmatched PUT_ACK from %s\n",
CERROR("Can't setup rdma for PUT to %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2);
- spin_lock(&conn->ibc_lock);
- tx->tx_waiting = 0; /* clear waiting and queue atomically */
- kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
- break;
+ spin_lock(&conn->ibc_lock);
+ tx->tx_waiting = 0; /* clear waiting and queue atomically */
+ kiblnd_queue_tx_locked(tx, conn);
+ spin_unlock(&conn->ibc_lock);
+ break;
case IBLND_MSG_PUT_DONE:
post_credit = IBLND_POSTRX_PEER_CREDIT;
kiblnd_post_rx(rx, post_credit);
}
-void
+static void
kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
{
kib_msg_t *msg = rx->rx_msg;
goto ignore;
if (status != IB_WC_SUCCESS) {
- CDEBUG(D_NETERROR, "Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
goto failed;
}
/* racing with connection establishment/teardown! */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
-
- write_lock_irqsave(g_lock, flags);
- /* must check holding global lock to eliminate race */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
- write_unlock_irqrestore(g_lock, flags);
- return;
- }
- write_unlock_irqrestore(g_lock, flags);
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+
+ write_lock_irqsave(g_lock, flags);
+ /* must check holding global lock to eliminate race */
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ write_unlock_irqrestore(g_lock, flags);
+ return;
+ }
+ write_unlock_irqrestore(g_lock, flags);
}
kiblnd_handle_rx(rx);
return;
kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
-struct page *
+static struct page *
kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (vaddr >= VMALLOC_START &&
- vaddr < VMALLOC_END) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
return page;
}
-static void
-kiblnd_fmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
-{
- int rc;
-
- if (tx->tx_u.fmr == NULL)
- return;
-
- rc = ib_fmr_pool_unmap(tx->tx_u.fmr);
- LASSERT (rc == 0);
-
- if (tx->tx_status != 0) {
- rc = ib_flush_fmr_pool(net->ibn_fmrpool);
- LASSERT (rc == 0);
- }
-
- tx->tx_u.fmr = NULL;
-}
-
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
{
- struct ib_pool_fmr *fmr;
- kib_dev_t *ibdev = net->ibn_dev;
- __u64 *pages = tx->tx_pages;
- int npages;
- int size;
- int i;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += ibdev->ibd_page_size) {
- pages[npages ++] = (rd->rd_frags[i].rf_addr &
- ibdev->ibd_page_mask) + size;
- }
- }
-
- fmr = ib_fmr_pool_map_phys(net->ibn_fmrpool, pages, npages, 0);
-
- if (IS_ERR(fmr)) {
- CERROR ("Can't map %d pages: %ld\n", npages, PTR_ERR(fmr));
- return PTR_ERR(fmr);
- }
-
- /* If rd is not tx_rd, it's going to get sent to a peer, who will need
- * the rkey */
- rd->rd_key = (rd != tx->tx_rd) ? fmr->fmr->rkey :
- fmr->fmr->lkey;
- rd->rd_frags[0].rf_addr &= ~ibdev->ibd_page_mask;
- rd->rd_frags[0].rf_nob = nob;
- rd->rd_nfrags = 1;
-
- tx->tx_u.fmr = fmr;
-
- return 0;
+ kib_hca_dev_t *hdev;
+ kib_fmr_poolset_t *fps;
+ int cpt;
+ int rc;
+
+ LASSERT(tx->tx_pool != NULL);
+ LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+
+ hdev = tx->tx_pool->tpo_hdev;
+ cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
+
+ fps = net->ibn_fmr_ps[cpt];
+ rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
+ if (rc != 0) {
+ CERROR("Can't map %u pages: %d\n", nob, rc);
+ return rc;
+ }
+
+ /* If rd is not tx_rd, it's going to get sent to a peer, who will need
+ * the rkey */
+ rd->rd_key = tx->fmr.fmr_key;
+ rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+ rd->rd_frags[0].rf_nob = nob;
+ rd->rd_nfrags = 1;
+
+ return 0;
}
static void
-kiblnd_pmr_unmap_tx(kib_net_t *net, kib_tx_t *tx)
-{
- if (tx->tx_u.pmr == NULL)
- return;
-
- kiblnd_phys_mr_unmap(net, tx->tx_u.pmr);
-
- tx->tx_u.pmr = NULL;
-}
-
-static int
-kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
-{
- kib_phys_mr_t *pmr;
- __u64 iova;
-
- iova = rd->rd_frags[0].rf_addr & ~net->ibn_dev->ibd_page_mask;
-
- pmr = kiblnd_phys_mr_map(net, rd, tx->tx_ipb, &iova);
- if (pmr == NULL) {
- CERROR("Failed to create MR by phybuf\n");
- return -ENOMEM;
- }
-
- rd->rd_key = (rd != tx->tx_rd) ? pmr->ibpm_mr->rkey :
- pmr->ibpm_mr->lkey;
- rd->rd_nfrags = 1;
- rd->rd_frags[0].rf_addr = iova;
- rd->rd_frags[0].rf_nob = nob;
-
- tx->tx_u.pmr = pmr;
-
- return 0;
-}
-
-void
kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
{
- kib_net_t *net = ni->ni_data;
+ kib_net_t *net = ni->ni_data;
- LASSERT (net != NULL);
+ LASSERT(net != NULL);
- if (net->ibn_fmrpool != NULL)
- kiblnd_fmr_unmap_tx(net, tx);
- else if (net->ibn_pmrpool != NULL)
- kiblnd_pmr_unmap_tx(net, tx);
+ if (net->ibn_fmr_ps != NULL)
+ kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(net->ibn_dev->ibd_cmid->device,
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
}
}
-int
-kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx,
- kib_rdma_desc_t *rd, int nfrags)
+static int
+kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags)
{
- kib_net_t *net = ni->ni_data;
- struct ib_mr *mr = NULL;
- __u32 nob;
- int i;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct ib_mr *mr = NULL;
+ __u32 nob;
+ int i;
/* If rd is not tx_rd, it's going to get sent to a peer and I'm the
* RDMA sink */
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- tx->tx_nfrags = nfrags;
+ tx->tx_nfrags = nfrags;
- rd->rd_nfrags =
- kiblnd_dma_map_sg(net->ibn_dev->ibd_cmid->device,
- tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+ rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+ tx->tx_nfrags, tx->tx_dmadir);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
rd->rd_frags[i].rf_addr = kiblnd_sg_dma_address(
- net->ibn_dev->ibd_cmid->device, &tx->tx_frags[i]);
+ hdev->ibh_ibdev, &tx->tx_frags[i]);
nob += rd->rd_frags[i].rf_nob;
}
- /* looking for pre-mapping MR */
- mr = kiblnd_find_rd_dma_mr(net, rd);
- if (mr != NULL) {
- /* found pre-mapping MR */
- rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
- return 0;
- }
-
- if (net->ibn_fmrpool != NULL)
- return kiblnd_fmr_map_tx(net, tx, rd, nob);
+ mr = kiblnd_find_rd_dma_mr(ni, rd,
+ (tx->tx_conn != NULL) ?
+ tx->tx_conn->ibc_max_frags : -1);
+ if (mr != NULL) {
+ /* found pre-mapping MR */
+ rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
+ return 0;
+ }
- if (net->ibn_pmrpool != NULL);
- return kiblnd_pmr_map_tx(net, tx, rd, nob);
+ if (net->ibn_fmr_ps != NULL)
+ return kiblnd_fmr_map_tx(net, tx, rd, nob);
- return -EINVAL;
+ return -EINVAL;
}
-int
+static int
kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- unsigned int niov, struct iovec *iov, int offset, int nob)
+ unsigned int niov, struct kvec *iov, int offset, int nob)
{
kib_net_t *net = ni->ni_data;
struct page *page;
fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
sg_set_page(sg, page, fragnob, page_offset);
- sg++;
+ sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
}
-int
+static int
kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
int nkiov, lnet_kiov_t *kiov, int offset, int nob)
{
fragnob = min((int)(kiov->kiov_len - offset), nob);
- memset(sg, 0, sizeof(*sg));
- sg_set_page(sg, kiov->kiov_page, fragnob,
- kiov->kiov_offset + offset);
- sg++;
+ sg_set_page(sg, kiov->kiov_page, fragnob,
+ kiov->kiov_offset + offset);
+ sg = sg_next(sg);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
offset = 0;
kiov++;
return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
}
-int
+static int
kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+__must_hold(&conn->ibc_lock)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
+ struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
int done;
- struct ib_send_wr *bad_wrq;
- LASSERT (tx->tx_queued);
- /* We rely on this for QP sizing */
- LASSERT (tx->tx_nwrq > 0);
- LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver));
+ LASSERT(tx->tx_queued);
+ /* We rely on this for QP sizing */
+ LASSERT(tx->tx_nwrq > 0);
+ LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
- LASSERT (credit == 0 || credit == 1);
- LASSERT (conn->ibc_outstanding_credits >= 0);
- LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver));
- LASSERT (conn->ibc_credits >= 0);
- LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver));
+ LASSERT(credit == 0 || credit == 1);
+ LASSERT(conn->ibc_outstanding_credits >= 0);
+ LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth);
+ LASSERT(conn->ibc_credits >= 0);
+ LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
- if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+ if (conn->ibc_nsends_posted ==
+ kiblnd_concurrent_sends(ver, ni)) {
/* tx completions outstanding... */
CDEBUG(D_NET, "%s: posted enough\n",
libcfs_nid2str(peer->ibp_nid));
}
if (credit != 0 && !IBLND_OOB_CAPABLE(ver) &&
- conn->ibc_credits == 1 && /* last credit reserved for */
- conn->ibc_outstanding_credits == 0) { /* giving back credits */
+ conn->ibc_credits == 1 && /* last credit reserved */
+ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */
CDEBUG(D_NET, "%s: not using last credit\n",
libcfs_nid2str(peer->ibp_nid));
return -EAGAIN;
}
/* NB don't drop ibc_lock before bumping tx_sending */
- list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
tx->tx_queued = 0;
if (msg->ibm_type == IBLND_MSG_NOOP &&
- (!kiblnd_send_noop(conn) || /* redundant NOOP */
+ (!kiblnd_need_noop(conn) || /* redundant NOOP */
(IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */
conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) {
- /* OK to drop when posted enough NOOPs, since
- * kiblnd_check_sends will queue NOOP again when
- * posted NOOPs complete */
- spin_unlock(&conn->ibc_lock);
- kiblnd_tx_done(peer->ibp_ni, tx);
- spin_lock(&conn->ibc_lock);
+ /* OK to drop when posted enough NOOPs, since
+ * kiblnd_check_sends_locked will queue NOOP again when
+ * posted NOOPs complete */
+ spin_unlock(&conn->ibc_lock);
+ kiblnd_tx_done(peer->ibp_ni, tx);
+ spin_lock(&conn->ibc_lock);
CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
libcfs_nid2str(peer->ibp_nid),
conn->ibc_noops_posted);
* tx_sending is non-zero if we've not done the tx_complete()
* from the first send; hence the ++ rather than = below. */
tx->tx_sending++;
- list_add(&tx->tx_list, &conn->ibc_active_txs);
+ list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
- if (conn->ibc_state != IBLND_CONN_ESTABLISHED)
+ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
rc = -ECONNABORTED;
- else
- rc = ib_post_send(conn->ibc_cmid->qp,
- tx->tx_wrq, &bad_wrq);
+ } else if (tx->tx_pool->tpo_pool.po_failed ||
+ conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
+ /* close_conn will launch failover */
+ rc = -ENETDOWN;
+ } else {
+ struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
+ struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
+
+ if (frd != NULL) {
+ if (!frd->frd_valid) {
+ wr = &frd->frd_inv_wr.wr;
+ wr->next = &frd->frd_fastreg_wr.wr;
+ } else {
+ wr = &frd->frd_fastreg_wr.wr;
+ }
+ frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
+ }
+
+ LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ "bad wr_id %#llx, opc %d, flags %d, peer: %s\n",
+ bad->wr_id, bad->opcode, bad->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+
+ bad = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+ }
+
conn->ibc_last_send = jiffies;
if (rc == 0)
done = (tx->tx_sending == 0);
if (done)
- list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
- spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
CERROR("Error %d posting transmit to %s\n",
if (done)
kiblnd_tx_done(peer->ibp_ni, tx);
- return -EIO;
+
+ spin_lock(&conn->ibc_lock);
+
+ return -EIO;
}
-void
-kiblnd_check_sends (kib_conn_t *conn)
+static void
+kiblnd_check_sends_locked(kib_conn_t *conn)
{
int ver = conn->ibc_version;
lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
return;
}
- spin_lock(&conn->ibc_lock);
-
- LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+ LASSERT(conn->ibc_nsends_posted <=
+ kiblnd_concurrent_sends(ver, ni));
LASSERT (!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ !list_empty(&conn->ibc_tx_queue_rsrvd)) {
+ tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
+ kib_tx_t, tx_list);
+ list_del(&tx->tx_list);
+ list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
- if (kiblnd_send_noop(conn)) {
- spin_unlock(&conn->ibc_lock);
+ if (kiblnd_need_noop(conn)) {
+ spin_unlock(&conn->ibc_lock);
- tx = kiblnd_get_idle_tx(ni);
- if (tx != NULL)
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
+ tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ if (tx != NULL)
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0);
- spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
if (tx != NULL)
kiblnd_queue_tx_locked(tx, conn);
}
for (;;) {
int credit;
- if (!list_empty(&conn->ibc_tx_queue_nocred)) {
+ if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
- } else if (!list_empty(&conn->ibc_tx_queue)) {
+ tx = list_entry(conn->ibc_tx_queue_nocred.next,
+ kib_tx_t, tx_list);
+ } else if (!list_empty(&conn->ibc_tx_noops)) {
+ LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
+ tx = list_entry(conn->ibc_tx_noops.next,
kib_tx_t, tx_list);
+ } else if (!list_empty(&conn->ibc_tx_queue)) {
+ credit = 1;
+ tx = list_entry(conn->ibc_tx_queue.next,
+ kib_tx_t, tx_list);
} else
break;
if (kiblnd_post_tx_locked(conn, tx, credit) != 0)
break;
}
-
- spin_unlock(&conn->ibc_lock);
}
-void
+static void
kiblnd_tx_complete (kib_tx_t *tx, int status)
{
int failed = (status != IB_WC_SUCCESS);
if (failed) {
if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
- CDEBUG(D_NETERROR, "Tx -> %s cookie "LPX64
- " sending %d waiting %d: failed %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
- status);
+ CNETERR("Tx -> %s cookie %#llx"
+ " sending %d waiting %d: failed %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
+ status);
kiblnd_close_conn(conn, -EIO);
} else {
kiblnd_peer_alive(conn->ibc_peer);
}
- spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
/* I could be racing with rdma completion. Whoever makes 'tx' idle
* gets to free it, which also drops its ref on 'conn'. */
!tx->tx_waiting && /* Not waiting for peer */
!tx->tx_queued; /* Not re-queued (PUT_DONE) */
if (idle)
- list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
- kiblnd_conn_addref(conn); /* 1 ref for me.... */
-
- spin_unlock(&conn->ibc_lock);
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
if (idle)
kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
-
- kiblnd_check_sends(conn);
-
- kiblnd_conn_decref(conn); /* ...until here */
}
-void
+static void
kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob)
{
- kib_net_t *net = ni->ni_data;
- struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq];
- int nob = offsetof (kib_msg_t, ibm_u) + body_nob;
- struct ib_mr *mr;
+ kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
+ struct ib_rdma_wr *wrq;
+ int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ struct ib_mr *mr = hdev->ibh_mrs;
- LASSERT (net != NULL);
- LASSERT (tx->tx_nwrq >= 0);
- LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
- LASSERT (nob <= IBLND_MSG_SIZE);
+ LASSERT(tx->tx_nwrq >= 0);
+ LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
+ LASSERT(nob <= IBLND_MSG_SIZE);
+ LASSERT(mr != NULL);
kiblnd_init_msg(tx->tx_msg, type, body_nob);
- mr = kiblnd_find_dma_mr(net, tx->tx_msgaddr, nob);
- LASSERT (mr != NULL);
-
sge->lkey = mr->lkey;
sge->addr = tx->tx_msgaddr;
sge->length = nob;
- memset(wrq, 0, sizeof(*wrq));
+ wrq = &tx->tx_wrq[tx->tx_nwrq];
+ memset(wrq, 0, sizeof(*wrq));
- wrq->next = NULL;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_SEND;
- wrq->send_flags = IB_SEND_SIGNALED;
+ wrq->wr.next = NULL;
+ wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
+ wrq->wr.sg_list = sge;
+ wrq->wr.num_sge = 1;
+ wrq->wr.opcode = IB_WR_SEND;
+ wrq->wr.send_flags = IB_SEND_SIGNALED;
- tx->tx_nwrq++;
+ tx->tx_nwrq++;
}
-int
-kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+static int
+kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
+ int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_send_wr *wrq = &tx->tx_wrq[0];
- int rc = resid;
- int srcidx;
- int dstidx;
- int wrknob;
-
- LASSERT (!in_interrupt());
- LASSERT (tx->tx_nwrq == 0);
- LASSERT (type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
-
- srcidx = dstidx = 0;
+ kib_msg_t *ibmsg = tx->tx_msg;
+ kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct ib_sge *sge = &tx->tx_sge[0];
+ struct ib_rdma_wr *wrq;
+ int rc = resid;
+ int srcidx;
+ int dstidx;
+ int wrknob;
+
+ LASSERT (!in_interrupt());
+ LASSERT (tx->tx_nwrq == 0);
+ LASSERT (type == IBLND_MSG_GET_DONE ||
+ type == IBLND_MSG_PUT_DONE);
+
+ srcidx = dstidx = 0;
while (resid > 0) {
if (srcidx >= srcrd->rd_nfrags) {
break;
}
- if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) {
- CERROR("RDMA too fragmented for %s (%d): "
- "%d/%d src %d/%d dst frags\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- IBLND_RDMA_FRAGS(conn->ibc_version),
- srcidx, srcrd->rd_nfrags,
- dstidx, dstrd->rd_nfrags);
- rc = -EMSGSIZE;
- break;
- }
+ if (tx->tx_nwrq >= conn->ibc_max_frags) {
+ CERROR("RDMA has too many fragments for peer %s (%d), "
+ "src idx/frags: %d/%d dst idx/frags: %d/%d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ conn->ibc_max_frags,
+ srcidx, srcrd->rd_nfrags,
+ dstidx, dstrd->rd_nfrags);
+ rc = -EMSGSIZE;
+ break;
+ }
wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
kiblnd_rd_frag_size(dstrd, dstidx)), resid);
wrq = &tx->tx_wrq[tx->tx_nwrq];
- wrq->next = wrq + 1;
- wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
- wrq->sg_list = sge;
- wrq->num_sge = 1;
- wrq->opcode = IB_WR_RDMA_WRITE;
- wrq->send_flags = 0;
-
- wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+ wrq->wr.next = &(wrq + 1)->wr;
+ wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
+ wrq->wr.sg_list = sge;
+ wrq->wr.num_sge = 1;
+ wrq->wr.opcode = IB_WR_RDMA_WRITE;
+ wrq->wr.send_flags = 0;
+
+#ifdef HAVE_IB_RDMA_WR
+ wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
+ wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+#else
+ wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
+ wrq->wr.wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+#endif
srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
return rc;
}
-void
-kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn)
+static void
+kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
{
- struct list_head *q;
+ struct list_head *q;
- LASSERT (tx->tx_nwrq > 0); /* work items set up */
- LASSERT (!tx->tx_queued); /* not queued for sending already */
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(tx->tx_nwrq > 0); /* work items set up */
+ LASSERT(!tx->tx_queued); /* not queued for sending already */
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- tx->tx_queued = 1;
- tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ);
+ tx->tx_queued = 1;
+ tx->tx_deadline = jiffies +
+ msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
+ MSEC_PER_SEC);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
if (IBLND_OOB_CAPABLE(conn->ibc_version))
q = &conn->ibc_tx_queue_nocred;
else
- q = &conn->ibc_tx_queue;
+ q = &conn->ibc_tx_noops;
break;
case IBLND_MSG_IMMEDIATE:
break;
}
- list_add_tail(&tx->tx_list, q);
+ list_add_tail(&tx->tx_list, q);
}
-void
+static void
kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
{
- spin_lock(&conn->ibc_lock);
- kiblnd_queue_tx_locked(tx, conn);
- spin_unlock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
+ kiblnd_queue_tx_locked(tx, conn);
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
+}
+
+static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
+{
+ unsigned short port;
+ int rc;
+
+ /* allow the port to be reused */
+ rc = rdma_set_reuseaddr(cmid, 1);
+ if (rc != 0) {
+ CERROR("Unable to set reuse on cmid: %d\n", rc);
+ return rc;
+ }
+
+ /* look for a free privileged port */
+ for (port = PROT_SOCK-1; port > 0; port--) {
+ srcaddr->sin_port = htons(port);
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)srcaddr,
+ (struct sockaddr *)dstaddr,
+ timeout_ms);
+ if (rc == 0) {
+ CDEBUG(D_NET, "bound to port %hu\n", port);
+ return 0;
+ } else if (rc == -EADDRINUSE || rc == -EADDRNOTAVAIL) {
+ CDEBUG(D_NET, "bind to port %hu failed: %d\n",
+ port, rc);
+ } else {
+ return rc;
+ }
+ }
- kiblnd_check_sends(conn);
+ CERROR("Failed to bind to a free privileged port\n");
+ return rc;
}
-void
+static void
kiblnd_connect_peer (kib_peer_t *peer)
{
struct rdma_cm_id *cmid;
LASSERT (net != NULL);
LASSERT (peer->ibp_connecting > 0);
+ LASSERT(!peer->ibp_reconnecting);
+
+ cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP,
+ IB_QPT_RC);
- cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP);
if (IS_ERR(cmid)) {
CERROR("Can't create CMID for %s: %ld\n",
libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid));
kiblnd_peer_addref(peer); /* cmid's ref */
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0) {
- LASSERT (cmid->device != NULL);
- CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
- libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
- HIPQUAD(dev->ibd_ifip), cmid->device->name);
- return;
+ if (*kiblnd_tunables.kib_use_priv_port) {
+ rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ } else {
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr,
+ *kiblnd_tunables.kib_timeout * 1000);
+ }
+ if (rc != 0) {
+ /* Can't initiate address resolution: */
+ CERROR("Can't resolve addr for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), rc);
+ goto failed2;
}
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), rc);
+ LASSERT (cmid->device != NULL);
+ CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
+ libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
+ &dev->ibd_ifip, cmid->device->name);
- kiblnd_peer_decref(peer); /* cmid's ref */
- rdma_destroy_id(cmid);
+ return;
+
+ failed2:
+ kiblnd_peer_connect_failed(peer, 1, rc);
+ kiblnd_peer_decref(peer); /* cmid's ref */
+ rdma_destroy_id(cmid);
+ return;
failed:
- kiblnd_peer_connect_failed(peer, 1, rc);
+ kiblnd_peer_connect_failed(peer, 1, rc);
+}
+
+bool
+kiblnd_reconnect_peer(kib_peer_t *peer)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ struct list_head txs;
+ unsigned long flags;
+
+ INIT_LIST_HEAD(&txs);
+
+ write_lock_irqsave(glock, flags);
+ if (peer->ibp_reconnecting == 0) {
+ if (peer->ibp_accepting)
+ reason = "accepting";
+ else if (peer->ibp_connecting)
+ reason = "connecting";
+ else if (!list_empty(&peer->ibp_conns))
+ reason = "connected";
+ else /* connected then closed */
+ reason = "closed";
+
+ goto no_reconnect;
+ }
+
+ LASSERT(!peer->ibp_accepting && !peer->ibp_connecting &&
+ list_empty(&peer->ibp_conns));
+ peer->ibp_reconnecting = 0;
+
+ if (!kiblnd_peer_active(peer)) {
+ list_splice_init(&peer->ibp_tx_queue, &txs);
+ reason = "unlinked";
+ goto no_reconnect;
+ }
+
+ peer->ibp_connecting++;
+ peer->ibp_reconnected++;
+ write_unlock_irqrestore(glock, flags);
+
+ kiblnd_connect_peer(peer);
+ return true;
+
+ no_reconnect:
+ write_unlock_irqrestore(glock, flags);
+
+ CWARN("Abort reconnection of %s: %s\n",
+ libcfs_nid2str(peer->ibp_nid), reason);
+ kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED);
+ return false;
}
void
kib_peer_t *peer;
kib_peer_t *peer2;
kib_conn_t *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
/* First time, just use a read lock since I expect to find my peer
* connected */
- read_lock_irqsave(g_lock, flags);
+ read_lock_irqsave(g_lock, flags);
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL && !list_empty(&peer->ibp_conns)) {
+ peer = kiblnd_find_peer_locked(ni, nid);
+ if (peer != NULL && !list_empty(&peer->ibp_conns)) {
/* Found a peer with an established connection */
conn = kiblnd_get_conn_locked(peer);
kiblnd_conn_addref(conn); /* 1 ref for me... */
- read_unlock_irqrestore(g_lock, flags);
+ read_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- read_unlock(g_lock);
- /* Re-try with a write lock */
- write_lock(g_lock);
+ read_unlock(g_lock);
+ /* Re-try with a write lock */
+ write_lock(g_lock);
- peer = kiblnd_find_peer_locked(nid);
+ peer = kiblnd_find_peer_locked(ni, nid);
if (peer != NULL) {
- if (list_empty(&peer->ibp_conns)) {
+ if (list_empty(&peer->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0);
+ LASSERT(kiblnd_peer_connecting(peer));
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
+ list_add_tail(&tx->tx_list,
+ &peer->ibp_tx_queue);
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
- write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
return;
}
- write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
- /* Allocate a peer ready to add to the peer table and retry */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
- CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
- if (tx != NULL) {
- tx->tx_status = -EHOSTUNREACH;
- tx->tx_waiting = 0;
- kiblnd_tx_done(ni, tx);
- }
- return;
- }
+ /* Allocate a peer ready to add to the peer table and retry */
+ rc = kiblnd_create_peer(ni, &peer, nid);
+ if (rc != 0) {
+ CERROR("Can't create peer %s\n", libcfs_nid2str(nid));
+ if (tx != NULL) {
+ tx->tx_status = -EHOSTUNREACH;
+ tx->tx_waiting = 0;
+ kiblnd_tx_done(ni, tx);
+ }
+ return;
+ }
- write_lock_irqsave(g_lock, flags);
+ write_lock_irqsave(g_lock, flags);
- peer2 = kiblnd_find_peer_locked(nid);
+ peer2 = kiblnd_find_peer_locked(ni, nid);
if (peer2 != NULL) {
- if (list_empty(&peer2->ibp_conns)) {
+ if (list_empty(&peer2->ibp_conns)) {
/* found a peer, but it's still connecting... */
- LASSERT (peer2->ibp_connecting != 0 ||
- peer2->ibp_accepting != 0);
+ LASSERT(kiblnd_peer_connecting(peer2));
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue);
- write_unlock_irqrestore(g_lock, flags);
- } else {
- conn = kiblnd_get_conn_locked(peer2);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
+ list_add_tail(&tx->tx_list,
+ &peer2->ibp_tx_queue);
+ write_unlock_irqrestore(g_lock, flags);
+ } else {
+ conn = kiblnd_get_conn_locked(peer2);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
- write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
if (tx != NULL)
kiblnd_queue_tx(tx, conn);
LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
if (tx != NULL)
- list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
+ list_add_tail(&tx->tx_list, &peer->ibp_tx_queue);
kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
kiblnd_connect_peer(peer);
kiblnd_peer_decref(peer);
kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
lnet_hdr_t *hdr = &lntmsg->msg_hdr;
- int type = lntmsg->msg_type;
- lnet_process_id_t target = lntmsg->msg_target;
- int target_is_router = lntmsg->msg_target_is_router;
- int routing = lntmsg->msg_routing;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct iovec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_tx_t *tx;
- int nob;
- int rc;
+ int type = lntmsg->msg_type;
+ lnet_process_id_t target = lntmsg->msg_target;
+ int target_is_router = lntmsg->msg_target_is_router;
+ int routing = lntmsg->msg_routing;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct kvec *payload_iov = lntmsg->msg_iov;
+ lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ kib_msg_t *ibmsg;
+ kib_rdma_desc_t *rd;
+ kib_tx_t *tx;
+ int nob;
+ int rc;
/* NB 'private' is different depending on what we're sending.... */
CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
payload_nob, payload_niov, libcfs_id2str(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT (payload_nob == 0 || payload_niov > 0);
+ LASSERT (payload_niov <= LNET_MAX_IOV);
- /* Thread context */
- LASSERT (!in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ /* Thread context */
+ LASSERT (!in_interrupt());
+ /* payload is either all vaddrs or all pages */
+ LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
- switch (type) {
- default:
- LBUG();
- return (-EIO);
+ switch (type) {
+ default:
+ LBUG();
+ return (-EIO);
case LNET_MSG_ACK:
LASSERT (payload_nob == 0);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s: \n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
-
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kiblnd_setup_rd_iov(ni, tx,
- &ibmsg->ibm_u.get.ibgm_rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx,
- &ibmsg->ibm_u.get.ibgm_rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
- if (rc != 0) {
- CERROR("Can't setup GET sink for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
-
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]);
- ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
- ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate txd for GET to %s\n",
+ libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+
+ ibmsg = tx->tx_msg;
+ rd = &ibmsg->ibm_u.get.ibgm_rd;
+ if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
+ rc = kiblnd_setup_rd_iov(ni, tx, rd,
+ lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_iov.iov,
+ 0, lntmsg->msg_md->md_length);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ lntmsg->msg_md->md_niov,
+ lntmsg->msg_md->md_iov.kiov,
+ 0, lntmsg->msg_md->md_length);
+ if (rc != 0) {
+ CERROR("Can't setup GET sink for %s: %d\n",
+ libcfs_nid2str(target.nid), rc);
+ kiblnd_tx_done(ni, tx);
+ return -EIO;
+ }
+
+ nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+ ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
+ ibmsg->ibm_u.get.ibgm_hdr = *hdr;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni);
+ tx = kiblnd_get_idle_tx(ni, target.nid);
if (tx == NULL) {
CERROR("Can't allocate %s txd for %s\n",
type == LNET_MSG_PUT ? "PUT" : "REPLY",
LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
- tx = kiblnd_get_idle_tx(ni);
+ tx = kiblnd_get_idle_tx(ni, target.nid);
if (tx == NULL) {
CERROR ("Can't send %d to %s: tx descs exhausted\n",
type, libcfs_nid2str(target.nid));
return 0;
}
-void
+static void
kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg)
{
lnet_process_id_t target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
- struct iovec *iov = lntmsg->msg_iov;
+ struct kvec *iov = lntmsg->msg_iov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
kib_tx_t *tx;
int rc;
- tx = kiblnd_get_idle_tx(ni);
+ tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
if (tx == NULL) {
CERROR("Can't get tx for REPLY to %s\n",
libcfs_nid2str(target.nid));
}
int
-kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
- unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int mlen, unsigned int rlen)
+kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
+ unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kib_rx_t *rx = private;
kib_msg_t *rxmsg = rx->rx_msg;
kib_conn_t *conn = rx->rx_conn;
kib_tx_t *tx;
- kib_msg_t *txmsg;
- int nob;
- int post_credit = IBLND_POSTRX_PEER_CREDIT;
- int rc = 0;
+ int nob;
+ int post_credit = IBLND_POSTRX_PEER_CREDIT;
+ int rc = 0;
- LASSERT (mlen <= rlen);
- LASSERT (!in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
+ LASSERT (mlen <= rlen);
+ LASSERT (!in_interrupt());
+ /* Either all pages or all vaddrs */
+ LASSERT (!(kiov != NULL && iov != NULL));
- switch (rxmsg->ibm_type) {
- default:
- LBUG();
+ switch (rxmsg->ibm_type) {
+ default:
+ LBUG();
case IBLND_MSG_IMMEDIATE:
nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
lnet_finalize (ni, lntmsg, 0);
break;
- case IBLND_MSG_PUT_REQ:
+ case IBLND_MSG_PUT_REQ: {
+ kib_msg_t *txmsg;
+ kib_rdma_desc_t *rd;
+
if (mlen == 0) {
lnet_finalize(ni, lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
break;
}
- tx = kiblnd_get_idle_tx(ni);
+ tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (tx == NULL) {
CERROR("Can't allocate tx for %s\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
break;
}
- txmsg = tx->tx_msg;
- if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx,
- &txmsg->ibm_u.putack.ibpam_rd,
- niov, iov, offset, mlen);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx,
- &txmsg->ibm_u.putack.ibpam_rd,
- niov, kiov, offset, mlen);
- if (rc != 0) {
- CERROR("Can't setup PUT sink for %s: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_tx_done(ni, tx);
- /* tell peer it's over */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
-
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]);
- txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
- txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
+ txmsg = tx->tx_msg;
+ rd = &txmsg->ibm_u.putack.ibpam_rd;
+ if (kiov == NULL)
+ rc = kiblnd_setup_rd_iov(ni, tx, rd,
+ niov, iov, offset, mlen);
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ niov, kiov, offset, mlen);
+ if (rc != 0) {
+ CERROR("Can't setup PUT sink for %s: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ kiblnd_tx_done(ni, tx);
+ /* tell peer it's over */
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
+ rxmsg->ibm_u.putreq.ibprm_cookie);
+ break;
+ }
+
+ nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
+ txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+ txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
/* reposted buffer reserved for PUT_DONE */
post_credit = IBLND_POSTRX_NO_CREDIT;
break;
+ }
case IBLND_MSG_GET_REQ:
if (lntmsg != NULL) {
}
int
-kiblnd_thread_start (int (*fn)(void *arg), void *arg)
+kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- long pid = kernel_thread (fn, arg, 0);
+ struct task_struct *task = kthread_run(fn, arg, name);
- if (pid < 0)
- return ((int)pid);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
- atomic_inc (&kiblnd_data.kib_nthreads);
- return (0);
+ atomic_inc(&kiblnd_data.kib_nthreads);
+ return 0;
}
-void
+static void
kiblnd_thread_fini (void)
{
- atomic_dec (&kiblnd_data.kib_nthreads);
+ atomic_dec (&kiblnd_data.kib_nthreads);
}
-void
+static void
kiblnd_peer_alive (kib_peer_t *peer)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer->ibp_last_alive = cfs_time_current();
- mb();
+ /* This is racy, but everyone's only writing cfs_time_current() */
+ peer->ibp_last_alive = cfs_time_current();
+ smp_mb();
}
-void
+static void
kiblnd_peer_notify (kib_peer_t *peer)
{
- time_t last_alive = 0;
int error = 0;
+ cfs_time_t last_alive = 0;
unsigned long flags;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (list_empty(&peer->ibp_conns) &&
- peer->ibp_accepting == 0 &&
- peer->ibp_connecting == 0 &&
- peer->ibp_error != 0) {
+ if (kiblnd_peer_idle(peer) && peer->ibp_error != 0) {
error = peer->ibp_error;
peer->ibp_error = 0;
- last_alive = cfs_time_current_sec() -
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive);
+ last_alive = peer->ibp_last_alive;
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (error != 0)
lnet_notify(peer->ibp_ni,
* connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context */
- unsigned long flags;
kib_peer_t *peer = conn->ibc_peer;
+ kib_dev_t *dev;
+ unsigned long flags;
LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
return; /* already being handled */
if (error == 0 &&
- list_empty(&conn->ibc_tx_queue) &&
- list_empty(&conn->ibc_tx_queue_rsrvd) &&
- list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
+ list_empty(&conn->ibc_tx_noops) &&
+ list_empty(&conn->ibc_tx_queue) &&
+ list_empty(&conn->ibc_tx_queue_rsrvd) &&
+ list_empty(&conn->ibc_tx_queue_nocred) &&
+ list_empty(&conn->ibc_active_txs)) {
CDEBUG(D_NET, "closing conn to %s\n",
libcfs_nid2str(peer->ibp_nid));
} else {
- CDEBUG(D_NETERROR, "Closing conn to %s: error %d%s%s%s%s\n",
+ CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
libcfs_nid2str(peer->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ?
+ "" : "(sending_rsrvd)",
+ list_empty(&conn->ibc_tx_queue_nocred) ?
+ "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
- list_del(&conn->ibc_list);
+ dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev;
+ list_del(&conn->ibc_list);
/* connd (see below) takes over ibc_list's ref */
- if (list_empty (&peer->ibp_conns) && /* no more conns */
+ if (list_empty(&peer->ibp_conns) && /* no more conns */
kiblnd_peer_active(peer)) { /* still in peer table */
kiblnd_unlink_peer_locked(peer);
kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING);
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ if (error != 0 &&
+ kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ wake_up(&kiblnd_data.kib_failover_waitq);
+ }
- list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns);
- wake_up (&kiblnd_data.kib_connd_waitq);
+ spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns);
+ wake_up(&kiblnd_data.kib_connd_waitq);
+
+ spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
}
void
-kiblnd_close_conn (kib_conn_t *conn, int error)
+kiblnd_close_conn(kib_conn_t *conn, int error)
{
- unsigned long flags;
+ unsigned long flags;
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- kiblnd_close_conn_locked(conn, error);
+ kiblnd_close_conn_locked(conn, error);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
-void
+static void
kiblnd_handle_early_rxs(kib_conn_t *conn)
{
- unsigned long flags;
- kib_rx_t *rx;
+ unsigned long flags;
+ kib_rx_t *rx;
- LASSERT (!in_interrupt());
- LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ LASSERT(!in_interrupt());
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- kib_rx_t, rx_list);
- list_del(&rx->rx_list);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (!list_empty(&conn->ibc_early_rxs)) {
+ rx = list_entry(conn->ibc_early_rxs.next,
+ kib_rx_t, rx_list);
+ list_del(&rx->rx_list);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_handle_rx(rx);
+ kiblnd_handle_rx(rx);
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- }
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
-void
+static void
kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
{
- LIST_HEAD (zombies);
- struct list_head *tmp;
- struct list_head *nxt;
- kib_tx_t *tx;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head *tmp;
+ struct list_head *nxt;
+ kib_tx_t *tx;
- spin_lock(&conn->ibc_lock);
+ spin_lock(&conn->ibc_lock);
- list_for_each_safe (tmp, nxt, txs) {
- tx = list_entry (tmp, kib_tx_t, tx_list);
+ list_for_each_safe(tmp, nxt, txs) {
+ tx = list_entry(tmp, kib_tx_t, tx_list);
- if (txs == &conn->ibc_active_txs) {
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_waiting ||
- tx->tx_sending != 0);
- } else {
- LASSERT (tx->tx_queued);
- }
+ if (txs == &conn->ibc_active_txs) {
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_waiting ||
+ tx->tx_sending != 0);
+ } else {
+ LASSERT(tx->tx_queued);
+ }
- tx->tx_status = -ECONNABORTED;
- tx->tx_waiting = 0;
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
- if (tx->tx_sending == 0) {
- tx->tx_queued = 0;
- list_del (&tx->tx_list);
- list_add (&tx->tx_list, &zombies);
- }
- }
+ if (tx->tx_sending == 0) {
+ tx->tx_queued = 0;
+ list_del(&tx->tx_list);
+ list_add(&tx->tx_list, &zombies);
+ }
+ }
- spin_unlock(&conn->ibc_lock);
+ spin_unlock(&conn->ibc_lock);
- kiblnd_txlist_done(conn->ibc_peer->ibp_ni,
- &zombies, -ECONNABORTED);
+ kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
}
-void
+static void
kiblnd_finalise_conn (kib_conn_t *conn)
{
- LASSERT (!in_interrupt());
- LASSERT (conn->ibc_state > IBLND_CONN_INIT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
- /* abort_receives moves QP state to IB_QPS_ERR. This is only required
- * for connections that didn't get as far as being connected, because
- * rdma_disconnect() does this for free. */
- kiblnd_abort_receives(conn);
+ /* abort_receives moves QP state to IB_QPS_ERR. This is only required
+ * for connections that didn't get as far as being connected, because
+ * rdma_disconnect() does this for free. */
+ kiblnd_abort_receives(conn);
- /* Complete all tx descs not waiting for sends to complete.
- * NB we should be safe from RDMA now that the QP has changed state */
+ /* Complete all tx descs not waiting for sends to complete.
+ * NB we should be safe from RDMA now that the QP has changed state */
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
- kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
- kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
+ kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
+ kiblnd_abort_txs(conn, &conn->ibc_active_txs);
- kiblnd_handle_early_rxs(conn);
+ kiblnd_handle_early_rxs(conn);
}
-void
-kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error)
+static void
+kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error)
{
- LIST_HEAD (zombies);
- unsigned long flags;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ unsigned long flags;
- LASSERT (error != 0);
- LASSERT (!in_interrupt());
+ LASSERT (error != 0);
+ LASSERT (!in_interrupt());
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (active) {
- LASSERT (peer->ibp_connecting > 0);
- peer->ibp_connecting--;
- } else {
- LASSERT (peer->ibp_accepting > 0);
- peer->ibp_accepting--;
- }
+ if (active) {
+ LASSERT (peer->ibp_connecting > 0);
+ peer->ibp_connecting--;
+ } else {
+ LASSERT (peer->ibp_accepting > 0);
+ peer->ibp_accepting--;
+ }
- if (peer->ibp_connecting != 0 ||
- peer->ibp_accepting != 0) {
- /* another connection attempt under way... */
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return;
- }
+ if (kiblnd_peer_connecting(peer)) {
+ /* another connection attempt under way... */
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock,
+ flags);
+ return;
+ }
- if (list_empty(&peer->ibp_conns)) {
- /* Take peer's blocked transmits to complete with error */
- list_add(&zombies, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
+ peer->ibp_reconnected = 0;
+ if (list_empty(&peer->ibp_conns)) {
+ /* Take peer's blocked transmits to complete with error */
+ list_add(&zombies, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
- if (kiblnd_peer_active(peer))
- kiblnd_unlink_peer_locked(peer);
+ if (kiblnd_peer_active(peer))
+ kiblnd_unlink_peer_locked(peer);
- peer->ibp_error = error;
- } else {
- /* Can't have blocked transmits if there are connections */
- LASSERT (list_empty(&peer->ibp_tx_queue));
- }
+ peer->ibp_error = error;
+ } else {
+ /* Can't have blocked transmits if there are connections */
+ LASSERT(list_empty(&peer->ibp_tx_queue));
+ }
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_peer_notify(peer);
+ kiblnd_peer_notify(peer);
- if (list_empty (&zombies))
- return;
+ if (list_empty(&zombies))
+ return;
- CDEBUG (D_NETERROR, "Deleting messages for %s: connection failed\n",
- libcfs_nid2str(peer->ibp_nid));
+ CNETERR("Deleting messages for %s: connection failed\n",
+ libcfs_nid2str(peer->ibp_nid));
- kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
+ kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH);
}
-void
+static void
kiblnd_connreq_done(kib_conn_t *conn, int status)
{
- kib_peer_t *peer = conn->ibc_peer;
- kib_tx_t *tx;
- struct list_head txs;
- unsigned long flags;
- int active;
+ kib_peer_t *peer = conn->ibc_peer;
+ kib_tx_t *tx;
+ struct list_head txs;
+ unsigned long flags;
+ int active;
active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
- libcfs_nid2str(peer->ibp_nid), active,
- conn->ibc_version, status);
+ CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n",
+ libcfs_nid2str(peer->ibp_nid), active,
+ conn->ibc_version, status);
- LASSERT (!in_interrupt());
- LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
- peer->ibp_connecting > 0) ||
- (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
- peer->ibp_accepting > 0));
+ LASSERT (!in_interrupt());
+ LASSERT ((conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT &&
+ peer->ibp_connecting > 0) ||
+ (conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
+ peer->ibp_accepting > 0));
LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
conn->ibc_connvars = NULL;
}
/* connection established */
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
conn->ibc_last_send = jiffies;
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer);
- /* Add conn to peer's list and nuke any dangling conns from a different
- * peer instance... */
- kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
- list_add(&conn->ibc_list, &peer->ibp_conns);
- if (active)
- peer->ibp_connecting--;
- else
- peer->ibp_accepting--;
+ /* Add conn to peer's list and nuke any dangling conns from a different
+ * peer instance... */
+ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */
+ list_add(&conn->ibc_list, &peer->ibp_conns);
+ peer->ibp_reconnected = 0;
+ if (active)
+ peer->ibp_connecting--;
+ else
+ peer->ibp_accepting--;
if (peer->ibp_version == 0) {
peer->ibp_version = conn->ibc_version;
peer->ibp_incarnation = conn->ibc_incarnation;
}
- /* grab pending txs while I have the lock */
- list_add(&txs, &peer->ibp_tx_queue);
- list_del_init(&peer->ibp_tx_queue);
+ /* grab pending txs while I have the lock */
+ list_add(&txs, &peer->ibp_tx_queue);
+ list_del_init(&peer->ibp_tx_queue);
if (!kiblnd_peer_active(peer) || /* peer has been deleted */
conn->ibc_comms_error != 0) { /* error has happened already */
/* start to shut down connection */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
-
- return;
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- /* Schedule blocked txs */
- spin_lock (&conn->ibc_lock);
- while (!list_empty (&txs)) {
- tx = list_entry (txs.next, kib_tx_t, tx_list);
- list_del(&tx->tx_list);
-
- kiblnd_queue_tx_locked(tx, conn);
- }
- spin_unlock (&conn->ibc_lock);
-
- kiblnd_check_sends(conn);
-
- /* schedule blocked rxs */
- kiblnd_handle_early_rxs(conn);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
+
+ return;
+ }
+
+ /* +1 ref for myself, this connection is visible to other threads
+ * now, refcount of peer:ibp_conns can be released by connection
+ * close from either a different thread, or the calling of
+ * kiblnd_check_sends_locked() below. See bz21911 for details.
+ */
+ kiblnd_conn_addref(conn);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Schedule blocked txs */
+ spin_lock(&conn->ibc_lock);
+ while (!list_empty(&txs)) {
+ tx = list_entry(txs.next, kib_tx_t, tx_list);
+ list_del(&tx->tx_list);
+
+ kiblnd_queue_tx_locked(tx, conn);
+ }
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
+
+ /* schedule blocked rxs */
+ kiblnd_handle_early_rxs(conn);
+ kiblnd_conn_decref(conn);
}
-void
+static void
kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
{
int rc;
CWARN("Error %d sending reject\n", rc);
}
-int
-kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
+static int
+kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
kib_msg_t *reqmsg = priv;
kib_msg_t *ackmsg;
kib_dev_t *ibdev;
lnet_nid_t nid;
struct rdma_conn_param cp;
kib_rej_t rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int rc;
-
- LASSERT (!in_interrupt());
+ int version = IBLND_MSG_VERSION;
+ unsigned long flags;
+ int rc;
+ struct sockaddr_in *peer_addr;
+ LASSERT (!in_interrupt());
- /* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT (ibdev != NULL);
+ /* cmid inherits 'context' from the corresponding listener id */
+ ibdev = (kib_dev_t *)cmid->context;
+ LASSERT (ibdev != NULL);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
rej.ibr_why = IBLND_REJECT_FATAL;
rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
- CERROR("Short connection request\n");
- goto failed;
- }
-
- /* Future protocol version compatibility support! If the
- * o2iblnd-specific protocol changes, or when LNET unifies
- * protocols over all LNDs, the initial connection will
- * negotiate a protocol version. I trap this here to avoid
- * console errors; the reject tells the peer which protocol I
- * speak. */
- if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
- reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
- goto failed;
- if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
- reqmsg->ibm_version != IBLND_MSG_VERSION &&
- reqmsg->ibm_version != IBLND_MSG_VERSION_1)
- goto failed;
- if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
- reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
- goto failed;
-
- rc = kiblnd_unpack_msg(reqmsg, priv_nob);
- if (rc != 0) {
- CERROR("Can't parse connection request: %d\n", rc);
- goto failed;
- }
-
- nid = reqmsg->ibm_srcnid;
- ni = lnet_net2ni(LNET_NIDNET(reqmsg->ibm_dstnid));
-
- if (ni != NULL) {
- net = (kib_net_t *)ni->ni_data;
- rej.ibr_incarnation = net->ibn_incarnation;
- }
-
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): "
- "bad dst nid %s\n", libcfs_nid2str(nid),
- ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
- ibdev->ibd_ifname, ibdev->ibd_nnets,
- HIPQUAD(ibdev->ibd_ifip),
- libcfs_nid2str(reqmsg->ibm_dstnid));
-
- goto failed;
- }
+ peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ if (*kiblnd_tunables.kib_require_priv_port &&
+ ntohs(peer_addr->sin_port) >= PROT_SOCK) {
+ __u32 ip = ntohl(peer_addr->sin_addr.s_addr);
+ CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
+ &ip, ntohs(peer_addr->sin_port));
+ goto failed;
+ }
+
+ if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ CERROR("Short connection request\n");
+ goto failed;
+ }
+
+ /* Future protocol version compatibility support! If the
+ * o2iblnd-specific protocol changes, or when LNET unifies
+ * protocols over all LNDs, the initial connection will
+ * negotiate a protocol version. I trap this here to avoid
+ * console errors; the reject tells the peer which protocol I
+ * speak. */
+ if (reqmsg->ibm_magic == LNET_PROTO_MAGIC ||
+ reqmsg->ibm_magic == __swab32(LNET_PROTO_MAGIC))
+ goto failed;
+ if (reqmsg->ibm_magic == IBLND_MSG_MAGIC &&
+ reqmsg->ibm_version != IBLND_MSG_VERSION &&
+ reqmsg->ibm_version != IBLND_MSG_VERSION_1)
+ goto failed;
+ if (reqmsg->ibm_magic == __swab32(IBLND_MSG_MAGIC) &&
+ reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION) &&
+ reqmsg->ibm_version != __swab16(IBLND_MSG_VERSION_1))
+ goto failed;
+
+ rc = kiblnd_unpack_msg(reqmsg, priv_nob);
+ if (rc != 0) {
+ CERROR("Can't parse connection request: %d\n", rc);
+ goto failed;
+ }
+
+ nid = reqmsg->ibm_srcnid;
+ ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
+
+ if (ni != NULL) {
+ net = (kib_net_t *)ni->ni_data;
+ rej.ibr_incarnation = net->ibn_incarnation;
+ }
+
+ if (ni == NULL || /* no matching net */
+ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
+ CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
+ "bad dst nid %s\n", libcfs_nid2str(nid),
+ ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+
+ goto failed;
+ }
/* check time stamp as soon as possible */
- if (reqmsg->ibm_dststamp != 0 &&
- reqmsg->ibm_dststamp != net->ibn_incarnation) {
- CWARN("Stale connection request\n");
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /* I can accept peer's version */
- version = reqmsg->ibm_version;
-
- if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
- CERROR("Unexpected connreq msg type: %x from %s\n",
- reqmsg->ibm_type, libcfs_nid2str(nid));
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(version)) {
- CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n",
- libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(version));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
-
- goto failed;
- }
-
- if (reqmsg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(version)) {
- CERROR("Can't accept %s(version %x): "
- "incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(nid), version,
- reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
-
- if (version == IBLND_MSG_VERSION)
- rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
-
- goto failed;
-
- }
+ if (reqmsg->ibm_dststamp != 0 &&
+ reqmsg->ibm_dststamp != net->ibn_incarnation) {
+ CWARN("Stale connection request\n");
+ rej.ibr_why = IBLND_REJECT_CONN_STALE;
+ goto failed;
+ }
+
+ /* I can accept peer's version */
+ version = reqmsg->ibm_version;
+
+ if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) {
+ CERROR("Unexpected connreq msg type: %x from %s\n",
+ reqmsg->ibm_type, libcfs_nid2str(nid));
+ goto failed;
+ }
+
+ if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
+ kiblnd_msg_queue_size(version, ni)) {
+ CERROR("Can't accept conn from %s, queue depth too large: "
+ " %d (<=%d wanted)\n",
+ libcfs_nid2str(nid),
+ reqmsg->ibm_u.connparams.ibcp_queue_depth,
+ kiblnd_msg_queue_size(version, ni));
+
+ if (version == IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
+
+ goto failed;
+ }
+
+ if (reqmsg->ibm_u.connparams.ibcp_max_frags >
+ kiblnd_rdma_frags(version, ni)) {
+ CWARN("Can't accept conn from %s (version %x): "
+ "max_frags %d too large (%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ kiblnd_rdma_frags(version, ni));
+
+ if (version >= IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
+ goto failed;
+ } else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
+ kiblnd_rdma_frags(version, ni) &&
+ net->ibn_fmr_ps == NULL) {
+ CWARN("Can't accept conn from %s (version %x): "
+ "max_frags %d incompatible without FMR pool "
+ "(%d wanted)\n",
+ libcfs_nid2str(nid), version,
+ reqmsg->ibm_u.connparams.ibcp_max_frags,
+ kiblnd_rdma_frags(version, ni));
+
+ if (version == IBLND_MSG_VERSION)
+ rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
+
+ goto failed;
+ }
if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
CERROR("Can't accept %s: message size %d too big (%d max)\n",
goto failed;
}
- /* assume 'nid' is a new peer; create */
- rc = kiblnd_create_peer(ni, &peer, nid);
- if (rc != 0) {
- CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
+ /* assume 'nid' is a new peer; create */
+ rc = kiblnd_create_peer(ni, &peer, nid);
+ if (rc != 0) {
+ CERROR("Can't create peer for %s\n", libcfs_nid2str(nid));
+ rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
+ goto failed;
+ }
- write_lock_irqsave(g_lock, flags);
+ /* We have validated the peer's parameters so use those */
+ peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags;
+ peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth;
- peer2 = kiblnd_find_peer_locked(nid);
+ write_lock_irqsave(g_lock, flags);
+
+ peer2 = kiblnd_find_peer_locked(ni, nid);
if (peer2 != NULL) {
if (peer2->ibp_version == 0) {
peer2->ibp_version = version;
/* not the guy I've talked with */
if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
peer2->ibp_version != version) {
- kiblnd_close_peer_conns_locked(peer2, -ESTALE);
- write_unlock_irqrestore(g_lock, flags);
-
- CWARN("Conn stale %s [old ver: %x, new ver: %x]\n",
- libcfs_nid2str(nid), peer2->ibp_version, version);
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
-
- /* tie-break connection race in favour of the higher NID */
- if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid) {
- write_unlock_irqrestore(g_lock, flags);
-
- CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid));
-
- kiblnd_peer_decref(peer);
- rej.ibr_why = IBLND_REJECT_CONN_RACE;
- goto failed;
- }
-
- peer2->ibp_accepting++;
- kiblnd_peer_addref(peer2);
-
- write_unlock_irqrestore(g_lock, flags);
+ kiblnd_close_peer_conns_locked(peer2, -ESTALE);
+
+ if (kiblnd_peer_active(peer2)) {
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ peer2->ibp_version = version;
+ }
+ write_unlock_irqrestore(g_lock, flags);
+
+ CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n",
+ libcfs_nid2str(nid), peer2->ibp_version, version,
+ peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
+
+ kiblnd_peer_decref(peer);
+ rej.ibr_why = IBLND_REJECT_CONN_STALE;
+ goto failed;
+ }
+
+ /* Tie-break connection race in favour of the higher NID.
+ * If we keep running into a race condition multiple times,
+ * we have to assume that the connection attempt with the
+ * higher NID is stuck in a connecting state and will never
+ * recover. As such, we pass through this if-block and let
+ * the lower NID connection win so we can move forward.
+ */
+ if (peer2->ibp_connecting != 0 &&
+ nid < ni->ni_nid && peer2->ibp_races <
+ MAX_CONN_RACES_BEFORE_ABORT) {
+ peer2->ibp_races++;
+ write_unlock_irqrestore(g_lock, flags);
+
+ CDEBUG(D_NET, "Conn race %s\n",
+ libcfs_nid2str(peer2->ibp_nid));
+
+ kiblnd_peer_decref(peer);
+ rej.ibr_why = IBLND_REJECT_CONN_RACE;
+ goto failed;
+ }
+ if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT)
+ CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n",
+ libcfs_nid2str(peer2->ibp_nid),
+ MAX_CONN_RACES_BEFORE_ABORT);
+ /*
+ * passive connection is allowed even this peer is waiting for
+ * reconnection.
+ */
+ peer2->ibp_reconnecting = 0;
+ peer2->ibp_races = 0;
+ peer2->ibp_accepting++;
+ kiblnd_peer_addref(peer2);
+
+ /* Race with kiblnd_launch_tx (active connect) to create peer
+ * so copy validated parameters since we now know what the
+ * peer's limits are */
+ peer2->ibp_max_frags = peer->ibp_max_frags;
+ peer2->ibp_queue_depth = peer->ibp_queue_depth;
+
+ write_unlock_irqrestore(g_lock, flags);
kiblnd_peer_decref(peer);
peer = peer2;
} else {
LASSERT (net->ibn_shutdown == 0);
kiblnd_peer_addref(peer);
- list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
+ list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid));
- write_unlock_irqrestore(g_lock, flags);
+ write_unlock_irqrestore(g_lock, flags);
}
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version);
if (conn == NULL) {
kiblnd_peer_connect_failed(peer, 0, -ENOMEM);
kiblnd_peer_decref(peer);
/* conn now "owns" cmid, so I return success from here on to ensure the
* CM callback doesn't destroy cmid. */
-
- conn->ibc_incarnation = reqmsg->ibm_srcstamp;
- conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version);
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version);
- LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version)
- <= IBLND_RX_MSGS(version));
+ conn->ibc_incarnation = reqmsg->ibm_srcstamp;
+ conn->ibc_credits = conn->ibc_queue_depth;
+ conn->ibc_reserved_credits = conn->ibc_queue_depth;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
ackmsg = &conn->ibc_connvars->cv_msg;
memset(ackmsg, 0, sizeof(*ackmsg));
kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
sizeof(ackmsg->ibm_u.connparams));
- ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
- ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
+ ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
return 0;
failed:
- if (ni != NULL)
- lnet_ni_decref(ni);
+ if (ni != NULL) {
+ rej.ibr_cp.ibcp_queue_depth =
+ kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ lnet_ni_decref(ni);
+ }
- rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
- kiblnd_reject(cmid, &rej);
+ rej.ibr_version = version;
+ kiblnd_reject(cmid, &rej);
- return -ECONNREFUSED;
+ return -ECONNREFUSED;
}
-void
-kiblnd_reconnect (kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+static void
+kiblnd_check_reconnect(kib_conn_t *conn, int version,
+ __u64 incarnation, int why, kib_connparams_t *cp)
{
- kib_peer_t *peer = conn->ibc_peer;
- char *reason;
- int retry = 0;
- unsigned long flags;
-
- LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */
-
- write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_peer_t *peer = conn->ibc_peer;
+ char *reason;
+ int msg_size = IBLND_MSG_SIZE;
+ int frag_num = -1;
+ int queue_dep = -1;
+ bool reconnect;
+ unsigned long flags;
+
+ LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+ LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */
+ LASSERT(!peer->ibp_reconnecting);
+
+ if (cp) {
+ msg_size = cp->ibcp_max_msg_size;
+ frag_num = cp->ibcp_max_frags;
+ queue_dep = cp->ibcp_queue_depth;
+ }
+
+ write_lock_irqsave(glock, flags);
/* retry connection if it's still needed and no other connection
- * attempts (active or passive) are in progress */
- if (!list_empty(&peer->ibp_tx_queue) &&
- peer->ibp_connecting == 1 &&
- peer->ibp_accepting == 0) {
- retry = 1;
- peer->ibp_connecting++;
-
- peer->ibp_version = version;
- peer->ibp_incarnation = incarnation;
- }
-
- write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
-
- if (!retry)
- return;
+ * attempts (active or passive) are in progress
+ * NB: reconnect is still needed even when ibp_tx_queue is
+ * empty if ibp_version != version because reconnect may be
+ * initiated by kiblnd_query() */
+ reconnect = (!list_empty(&peer->ibp_tx_queue) ||
+ peer->ibp_version != version) &&
+ peer->ibp_connecting == 1 &&
+ peer->ibp_accepting == 0;
+ if (!reconnect) {
+ reason = "no need";
+ goto out;
+ }
switch (why) {
default:
reason = "Unknown";
break;
+ case IBLND_REJECT_RDMA_FRAGS: {
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+ if (!cp) {
+ reason = "can't negotiate max frags";
+ goto out;
+ }
+ tunables = &peer->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ if (!tunables->lnd_map_on_demand) {
+ reason = "map_on_demand must be enabled";
+ goto out;
+ }
+ if (conn->ibc_max_frags <= frag_num) {
+ reason = "unsupported max frags";
+ goto out;
+ }
+
+ peer->ibp_max_frags = frag_num;
+ reason = "rdma fragments";
+ break;
+ }
+ case IBLND_REJECT_MSG_QUEUE_SIZE:
+ if (!cp) {
+ reason = "can't negotiate queue depth";
+ goto out;
+ }
+ if (conn->ibc_queue_depth <= queue_dep) {
+ reason = "unsupported queue depth";
+ goto out;
+ }
+
+ peer->ibp_queue_depth = queue_dep;
+ reason = "queue depth";
+ break;
+
case IBLND_REJECT_CONN_STALE:
reason = "stale";
break;
break;
}
- CDEBUG(D_NETERROR, "%s: retrying (%s), %x, %x, "
- "queue_dep: %d, max_frag: %d, msg_size: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- reason, IBLND_MSG_VERSION, version,
- cp != NULL ? cp->ibcp_queue_depth : IBLND_MSG_QUEUE_SIZE(version),
- cp != NULL ? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version),
- cp != NULL ? cp->ibcp_max_msg_size: IBLND_MSG_SIZE);
-
- kiblnd_connect_peer(peer);
+ conn->ibc_reconnect = 1;
+ peer->ibp_reconnecting = 1;
+ peer->ibp_version = version;
+ if (incarnation != 0)
+ peer->ibp_incarnation = incarnation;
+ out:
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ reconnect ? "reconnect" : "don't reconnect",
+ reason, IBLND_MSG_VERSION, version, msg_size,
+ conn->ibc_queue_depth, queue_dep,
+ conn->ibc_max_frags, frag_num);
+ /*
+ * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer
+ * while destroying the zombie
+ */
}
-void
+static void
kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_t *peer = conn->ibc_peer;
+ kib_peer_t *peer = conn->ibc_peer;
- LASSERT (!in_interrupt());
- LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
+ LASSERT (!in_interrupt());
+ LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
- switch (reason) {
- case IB_CM_REJ_STALE_CONN:
- kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_CONN_STALE, NULL);
- break;
+ switch (reason) {
+ case IB_CM_REJ_STALE_CONN:
+ kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
+ IBLND_REJECT_CONN_STALE, NULL);
+ break;
case IB_CM_REJ_INVALID_SERVICE_ID:
- CDEBUG(D_NETERROR, "%s rejected: no listener at %d\n",
- libcfs_nid2str(peer->ibp_nid),
- *kiblnd_tunables.kib_service);
+ CNETERR("%s rejected: no listener at %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ *kiblnd_tunables.kib_service);
break;
case IB_CM_REJ_CONSUMER_DEFINED:
case IBLND_REJECT_CONN_RACE:
case IBLND_REJECT_CONN_STALE:
case IBLND_REJECT_CONN_UNCOMPAT:
- kiblnd_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
- case IBLND_REJECT_MSG_QUEUE_SIZE:
- CERROR("%s rejected: incompatible message queue depth %d, %d\n",
- libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(conn->ibc_version));
- break;
-
- case IBLND_REJECT_RDMA_FRAGS:
- CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n",
- libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags,
- IBLND_RDMA_FRAGS(conn->ibc_version));
+ case IBLND_REJECT_MSG_QUEUE_SIZE:
+ case IBLND_REJECT_RDMA_FRAGS:
+ kiblnd_check_reconnect(conn, rej->ibr_version,
+ incarnation, rej->ibr_why, cp);
break;
case IBLND_REJECT_NO_RESOURCES:
}
/* fall through */
default:
- CDEBUG(D_NETERROR, "%s rejected: reason %d, size %d\n",
- libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
+ CNETERR("%s rejected: reason %d, size %d\n",
+ libcfs_nid2str(peer->ibp_nid), reason, priv_nob);
break;
}
kiblnd_connreq_done(conn, -ECONNREFUSED);
}
-void
+static void
kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
{
kib_peer_t *peer = conn->ibc_peer;
goto failed;
}
- if (msg->ibm_u.connparams.ibcp_queue_depth !=
- IBLND_MSG_QUEUE_SIZE(ver)) {
- CERROR("%s has incompatible queue depth %d(%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(ver));
- rc = -EPROTO;
- goto failed;
- }
-
- if (msg->ibm_u.connparams.ibcp_max_frags !=
- IBLND_RDMA_FRAGS(ver)) {
- CERROR("%s has incompatible max_frags %d (%d wanted)\n",
- libcfs_nid2str(peer->ibp_nid),
- msg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(ver));
- rc = -EPROTO;
- goto failed;
- }
+ if (msg->ibm_u.connparams.ibcp_queue_depth >
+ conn->ibc_queue_depth) {
+ CERROR("%s has incompatible queue depth %d (<=%d wanted)\n",
+ libcfs_nid2str(peer->ibp_nid),
+ msg->ibm_u.connparams.ibcp_queue_depth,
+ conn->ibc_queue_depth);
+ rc = -EPROTO;
+ goto failed;
+ }
+
+ if (msg->ibm_u.connparams.ibcp_max_frags >
+ conn->ibc_max_frags) {
+ CERROR("%s has incompatible max_frags %d (<=%d wanted)\n",
+ libcfs_nid2str(peer->ibp_nid),
+ msg->ibm_u.connparams.ibcp_max_frags,
+ conn->ibc_max_frags);
+ rc = -EPROTO;
+ goto failed;
+ }
if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
CERROR("%s max message size %d too big (%d max)\n",
goto failed;
}
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
- msg->ibm_dststamp == net->ibn_incarnation)
- rc = 0;
- else
- rc = -ESTALE;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ if (msg->ibm_dstnid == ni->ni_nid &&
+ msg->ibm_dststamp == net->ibn_incarnation)
+ rc = 0;
+ else
+ rc = -ESTALE;
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (rc != 0) {
CERROR("Bad connection reply from %s, rc = %d, "
goto failed;
}
- conn->ibc_incarnation = msg->ibm_srcstamp;
- conn->ibc_credits =
- conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver);
- LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver)
- <= IBLND_RX_MSGS(ver));
+ conn->ibc_incarnation = msg->ibm_srcstamp;
+ conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth;
+ conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags;
+ LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
+ IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn));
kiblnd_connreq_done(conn, 0);
return;
kiblnd_connreq_done(conn, 0);
}
-int
+static int
kiblnd_active_connect (struct rdma_cm_id *cmid)
{
kib_peer_t *peer = (kib_peer_t *)cmid->context;
struct rdma_conn_param cp;
int version;
__u64 incarnation;
- long flags;
+ unsigned long flags;
int rc;
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- incarnation = peer->ibp_incarnation;
- version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version;
+ incarnation = peer->ibp_incarnation;
+ version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION :
+ peer->ibp_version;
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version);
+ conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT,
+ version);
if (conn == NULL) {
kiblnd_peer_connect_failed(peer, 1, -ENOMEM);
kiblnd_peer_decref(peer); /* lose cmid's ref */
msg = &conn->ibc_connvars->cv_msg;
- memset(msg, 0, sizeof(*msg));
- kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
- msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
- msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
+ memset(msg, 0, sizeof(*msg));
+ kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams));
+ msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
+ msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
+ msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
kiblnd_pack_msg(peer->ibp_ni, msg, version,
0, peer->ibp_nid, incarnation);
case RDMA_CM_EVENT_ADDR_ERROR:
peer = (kib_peer_t *)cmid->context;
- CDEBUG(D_NETERROR, "%s: ADDR ERROR %d\n",
+ CNETERR("%s: ADDR ERROR %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
libcfs_nid2str(peer->ibp_nid), event->status);
if (event->status != 0) {
- CDEBUG(D_NETERROR, "Can't resolve address for %s: %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ CNETERR("Can't resolve address for %s: %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
rc = event->status;
} else {
rc = rdma_resolve_route(
case RDMA_CM_EVENT_ROUTE_ERROR:
peer = (kib_peer_t *)cmid->context;
- CDEBUG(D_NETERROR, "%s: ROUTE ERROR %d\n",
- libcfs_nid2str(peer->ibp_nid), event->status);
+ CNETERR("%s: ROUTE ERROR %d\n",
+ libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
if (event->status == 0)
return kiblnd_active_connect(cmid);
- CDEBUG(D_NETERROR, "Can't resolve route for %s: %d\n",
+ CNETERR("Can't resolve route for %s: %d\n",
libcfs_nid2str(peer->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer, 1, event->status);
kiblnd_peer_decref(peer);
conn = (kib_conn_t *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CDEBUG(D_NETERROR, "%s: UNREACHABLE %d\n",
+ CNETERR("%s: UNREACHABLE %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENETDOWN);
kiblnd_conn_decref(conn);
conn = (kib_conn_t *)cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CDEBUG(D_NETERROR, "%s: CONNECT ERROR %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
+ CNETERR("%s: CONNECT ERROR %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
kiblnd_connreq_done(conn, -ENOTCONN);
kiblnd_conn_decref(conn);
return 0;
/* net keeps its ref on conn! */
return 0;
-#ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n");
return 0;
-#endif
+
case RDMA_CM_EVENT_DISCONNECTED:
conn = (kib_conn_t *)cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
* to ignore this */
return 0;
-#ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE
case RDMA_CM_EVENT_ADDR_CHANGE:
LCONSOLE_INFO("Physical link changed (eg hca/port)\n");
return 0;
-#endif
}
}
-int
-kiblnd_check_txs (kib_conn_t *conn, struct list_head *txs)
+static int
+kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
{
- kib_tx_t *tx;
- struct list_head *ttmp;
- int timed_out = 0;
-
- spin_lock(&conn->ibc_lock);
-
- list_for_each (ttmp, txs) {
- tx = list_entry (ttmp, kib_tx_t, tx_list);
-
- if (txs != &conn->ibc_active_txs) {
- LASSERT (tx->tx_queued);
- } else {
- LASSERT (!tx->tx_queued);
- LASSERT (tx->tx_waiting || tx->tx_sending != 0);
- }
-
- if (time_after_eq (jiffies, tx->tx_deadline)) {
- timed_out = 1;
- CERROR("Timed out tx: %s, %lu seconds\n",
- kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
- break;
- }
- }
-
- spin_unlock(&conn->ibc_lock);
- return timed_out;
+ kib_tx_t *tx;
+ struct list_head *ttmp;
+
+ list_for_each(ttmp, txs) {
+ tx = list_entry(ttmp, kib_tx_t, tx_list);
+
+ if (txs != &conn->ibc_active_txs) {
+ LASSERT(tx->tx_queued);
+ } else {
+ LASSERT(!tx->tx_queued);
+ LASSERT(tx->tx_waiting || tx->tx_sending != 0);
+ }
+
+ if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
+ CERROR("Timed out tx: %s, %lu seconds\n",
+ kiblnd_queue2str(conn, txs),
+ cfs_duration_sec(jiffies - tx->tx_deadline));
+ return 1;
+ }
+ }
+
+ return 0;
}
-int
-kiblnd_conn_timed_out (kib_conn_t *conn)
+static int
+kiblnd_conn_timed_out_locked(kib_conn_t *conn)
{
- return kiblnd_check_txs(conn, &conn->ibc_tx_queue) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) ||
- kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) ||
- kiblnd_check_txs(conn, &conn->ibc_active_txs);
+ return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) ||
+ kiblnd_check_txs_locked(conn, &conn->ibc_active_txs);
}
-void
+static void
kiblnd_check_conns (int idx)
{
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
- struct list_head *ptmp;
- kib_peer_t *peer;
- kib_conn_t *conn;
- struct list_head *ctmp;
- unsigned long flags;
-
- again:
- /* NB. We expect to have a look at all the peers and not find any
- * rdmas to time out, so we just use a shared lock while we
- * take a look... */
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
-
- list_for_each (ptmp, peers) {
- peer = list_entry (ptmp, kib_peer_t, ibp_list);
-
- list_for_each (ctmp, &peer->ibp_conns) {
- conn = list_entry (ctmp, kib_conn_t, ibc_list);
-
- LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED);
-
- /* In case we have enough credits to return via a
- * NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- kiblnd_check_sends(conn);
-
- if (!kiblnd_conn_timed_out(conn))
- continue;
-
- /* Handle timeout by closing the whole connection. We
- * can only be sure RDMA activity has ceased once the
- * QP has been modified. */
-
- kiblnd_conn_addref(conn); /* 1 ref for me... */
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
- flags);
-
- CERROR("Timed out RDMA with %s (%lu)\n",
- libcfs_nid2str(peer->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer->ibp_last_alive));
-
- kiblnd_close_conn(conn, -ETIMEDOUT);
- kiblnd_conn_decref(conn); /* ...until here */
-
- /* start again now I've dropped the lock */
- goto again;
- }
- }
-
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ struct list_head closes = LIST_HEAD_INIT(closes);
+ struct list_head checksends = LIST_HEAD_INIT(checksends);
+ struct list_head *peers = &kiblnd_data.kib_peers[idx];
+ struct list_head *ptmp;
+ kib_peer_t *peer;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ unsigned long flags;
+
+ /* NB. We expect to have a look at all the peers and not find any
+ * RDMAs to time out, so we just use a shared lock while we
+ * take a look... */
+ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+
+ list_for_each(ptmp, peers) {
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+
+ list_for_each(ctmp, &peer->ibp_conns) {
+ int timedout;
+ int sendnoop;
+
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+
+ LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
+
+ spin_lock(&conn->ibc_lock);
+
+ sendnoop = kiblnd_need_noop(conn);
+ timedout = kiblnd_conn_timed_out_locked(conn);
+ if (!sendnoop && !timedout) {
+ spin_unlock(&conn->ibc_lock);
+ continue;
+ }
+
+ if (timedout) {
+ CERROR("Timed out RDMA with %s (%lu): "
+ "c: %u, oc: %u, rc: %u\n",
+ libcfs_nid2str(peer->ibp_nid),
+ cfs_duration_sec(cfs_time_current() -
+ peer->ibp_last_alive),
+ conn->ibc_credits,
+ conn->ibc_outstanding_credits,
+ conn->ibc_reserved_credits);
+ list_add(&conn->ibc_connd_list, &closes);
+ } else {
+ list_add(&conn->ibc_connd_list, &checksends);
+ }
+ /* +ref for 'closes' or 'checksends' */
+ kiblnd_conn_addref(conn);
+
+ spin_unlock(&conn->ibc_lock);
+ }
+ }
+
+ read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ /* Handle timeout by closing the whole
+ * connection. We can only be sure RDMA activity
+ * has ceased once the QP has been modified. */
+ while (!list_empty(&closes)) {
+ conn = list_entry(closes.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+ kiblnd_close_conn(conn, -ETIMEDOUT);
+ kiblnd_conn_decref(conn);
+ }
+
+ /* In case we have enough credits to return via a
+ * NOOP, but there were no non-blocking tx descs
+ * free to do it last time... */
+ while (!list_empty(&checksends)) {
+ conn = list_entry(checksends.next,
+ kib_conn_t, ibc_connd_list);
+ list_del(&conn->ibc_connd_list);
+
+ spin_lock(&conn->ibc_lock);
+ kiblnd_check_sends_locked(conn);
+ spin_unlock(&conn->ibc_lock);
+
+ kiblnd_conn_decref(conn);
+ }
}
-void
+static void
kiblnd_disconnect_conn (kib_conn_t *conn)
{
- LASSERT (!in_interrupt());
- LASSERT (current == kiblnd_data.kib_connd);
- LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
+ LASSERT (!in_interrupt());
+ LASSERT (current == kiblnd_data.kib_connd);
+ LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
- rdma_disconnect(conn->ibc_cmid);
- kiblnd_finalise_conn(conn);
+ rdma_disconnect(conn->ibc_cmid);
+ kiblnd_finalise_conn(conn);
- kiblnd_peer_notify(conn->ibc_peer);
+ kiblnd_peer_notify(conn->ibc_peer);
}
+/*
+ * High-water for reconnection to the same peer, reconnection attempt should
+ * be delayed after trying more than KIB_RECONN_HIGH_RACE.
+ */
+#define KIB_RECONN_HIGH_RACE 10
+/*
+ * Allow connd to take a break and handle other things after consecutive
+ * reconnection attemps.
+ */
+#define KIB_RECONN_BREAK 100
+
int
kiblnd_connd (void *arg)
{
- wait_queue_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
+ spinlock_t *lock= &kiblnd_data.kib_connd_lock;
+ wait_queue_t wait;
+ unsigned long flags;
+ kib_conn_t *conn;
+ int timeout;
+ int i;
+ int dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
- cfs_daemonize ("kiblnd_connd");
- cfs_block_allsigs ();
+ cfs_block_allsigs();
- init_waitqueue_entry (&wait, current);
- kiblnd_data.kib_connd = current;
+ init_waitqueue_entry(&wait, current);
+ kiblnd_data.kib_connd = current;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
+ spin_lock_irqsave(lock, flags);
- while (!kiblnd_data.kib_shutdown) {
+ while (!kiblnd_data.kib_shutdown) {
+ int reconn = 0;
dropped_lock = 0;
- if (!list_empty (&kiblnd_data.kib_connd_zombies)) {
- conn = list_entry (kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
+ if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ kib_peer_t *peer = NULL;
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
- dropped_lock = 1;
+ conn = list_entry(kiblnd_data.kib_connd_zombies.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+ if (conn->ibc_reconnect) {
+ peer = conn->ibc_peer;
+ kiblnd_peer_addref(peer);
+ }
- kiblnd_destroy_conn(conn);
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
- }
+ kiblnd_destroy_conn(conn, !peer);
- if (!list_empty (&kiblnd_data.kib_connd_conns)) {
- conn = list_entry (kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
- list_del(&conn->ibc_list);
+ spin_lock_irqsave(lock, flags);
+ if (!peer)
+ continue;
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
- dropped_lock = 1;
+ conn->ibc_peer = peer;
+ if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_list);
+ else
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_reconn_wait);
+ }
+
+ if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_entry(kiblnd_data.kib_connd_conns.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
- kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
+ kiblnd_disconnect_conn(conn);
+ kiblnd_conn_decref(conn);
+
+ spin_lock_irqsave(lock, flags);
}
+ while (reconn < KIB_RECONN_BREAK) {
+ if (kiblnd_data.kib_reconn_sec !=
+ ktime_get_real_seconds()) {
+ kiblnd_data.kib_reconn_sec = ktime_get_real_seconds();
+ list_splice_init(&kiblnd_data.kib_reconn_wait,
+ &kiblnd_data.kib_reconn_list);
+ }
+
+ if (list_empty(&kiblnd_data.kib_reconn_list))
+ break;
+
+ conn = list_entry(kiblnd_data.kib_reconn_list.next,
+ kib_conn_t, ibc_list);
+ list_del(&conn->ibc_list);
+
+ spin_unlock_irqrestore(lock, flags);
+ dropped_lock = 1;
+
+ reconn += kiblnd_reconnect_peer(conn->ibc_peer);
+ kiblnd_peer_decref(conn->ibc_peer);
+ LIBCFS_FREE(conn, sizeof(*conn));
+
+ spin_lock_irqsave(lock, flags);
+ }
+
/* careful with the jiffy wrap... */
timeout = (int)(deadline - jiffies);
if (timeout <= 0) {
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
- spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
/* Time to check for RDMA timeouts on a few more
if (chunk == 0)
chunk = 1;
- for (i = 0; i < chunk; i++) {
- kiblnd_check_conns(peer_index);
- peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
- }
+ for (i = 0; i < chunk; i++) {
+ kiblnd_check_conns(peer_index);
+ peer_index = (peer_index + 1) %
+ kiblnd_data.kib_peer_hash_size;
+ }
- deadline += p * HZ;
- spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags);
- }
+ deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ spin_lock_irqsave(lock, flags);
+ }
- if (dropped_lock)
- continue;
+ if (dropped_lock)
+ continue;
- /* Nothing to do for 'timeout' */
- set_current_state (TASK_INTERRUPTIBLE);
- add_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ /* Nothing to do for 'timeout' */
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_unlock_irqrestore(lock, flags);
- schedule_timeout (timeout);
+ schedule_timeout(timeout);
- set_current_state (TASK_RUNNING);
- remove_wait_queue (&kiblnd_data.kib_connd_waitq, &wait);
- spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags);
- }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
+ spin_lock_irqsave(lock, flags);
+ }
- spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags);
+ spin_unlock_irqrestore(lock, flags);
- kiblnd_thread_fini();
- return (0);
+ kiblnd_thread_fini();
+ return 0;
}
void
case IB_EVENT_COMM_EST:
CDEBUG(D_NET, "%s established\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ /* We received a packet but connection isn't established
+ * probably handshake packet was lost, so free to
+ * force make connection established */
+ rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
return;
-
+
default:
CERROR("%s: Async QP event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
}
}
-void
+static void
kiblnd_complete (struct ib_wc *wc)
{
- switch (kiblnd_wreqid2type(wc->wr_id)) {
- default:
- LBUG();
+ switch (kiblnd_wreqid2type(wc->wr_id)) {
+ default:
+ LBUG();
+
+ case IBLND_WID_MR:
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
+ CNETERR("FastReg failed: %d\n", wc->status);
+ return;
case IBLND_WID_RDMA:
/* We only get RDMA completion notification if it fails. All
* failing RDMA because 'tx' might be back on the idle list or
* even reused already if we didn't manage to post all our work
* items */
- CDEBUG(D_NETERROR, "RDMA (tx: %p) failed: %d\n",
- kiblnd_wreqid2ptr(wc->wr_id), wc->status);
+ CNETERR("RDMA (tx: %p) failed: %d\n",
+ kiblnd_wreqid2ptr(wc->wr_id), wc->status);
return;
case IBLND_WID_TX:
}
void
-kiblnd_cq_completion (struct ib_cq *cq, void *arg)
+kiblnd_cq_completion(struct ib_cq *cq, void *arg)
{
- /* NB I'm not allowed to schedule this conn once its refcount has
- * reached 0. Since fundamentally I'm racing with scheduler threads
- * consuming my CQ I could be called after all completions have
- * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
- * and this CQ is about to be destroyed so I NOOP. */
- kib_conn_t *conn = (kib_conn_t *)arg;
- unsigned long flags;
-
- LASSERT (cq == conn->ibc_cq);
+ /* NB I'm not allowed to schedule this conn once its refcount has
+ * reached 0. Since fundamentally I'm racing with scheduler threads
+ * consuming my CQ I could be called after all completions have
+ * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
+ * and this CQ is about to be destroyed so I NOOP. */
+ kib_conn_t *conn = (kib_conn_t *)arg;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ LASSERT(cq == conn->ibc_cq);
- conn->ibc_ready = 1;
+ spin_lock_irqsave(&sched->ibs_lock, flags);
- if (!conn->ibc_scheduled &&
- (conn->ibc_nrx > 0 ||
- conn->ibc_nsends_posted > 0)) {
- kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
- conn->ibc_scheduled = 1;
- list_add_tail(&conn->ibc_sched_list,
- &kiblnd_data.kib_sched_conns);
- wake_up(&kiblnd_data.kib_sched_waitq);
- }
+ conn->ibc_ready = 1;
+
+ if (!conn->ibc_scheduled &&
+ (conn->ibc_nrx > 0 ||
+ conn->ibc_nsends_posted > 0)) {
+ kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
+ conn->ibc_scheduled = 1;
+ list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
+ }
+
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
}
void
int
kiblnd_scheduler(void *arg)
{
- long id = (long)arg;
- wait_queue_t wait;
- char name[16];
- unsigned long flags;
- kib_conn_t *conn;
- struct ib_wc wc;
- int rc;
- int did_something;
- int busy_loops = 0;
+ long id = (long)arg;
+ struct kib_sched_info *sched;
+ kib_conn_t *conn;
+ wait_queue_t wait;
+ unsigned long flags;
+ struct ib_wc wc;
+ int did_something;
+ int busy_loops = 0;
+ int rc;
- snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id);
- cfs_daemonize(name);
- cfs_block_allsigs();
+ cfs_block_allsigs();
- init_waitqueue_entry(&wait, current);
+ init_waitqueue_entry(&wait, current);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
- while (!kiblnd_data.kib_shutdown) {
- if (busy_loops++ >= IBLND_RESCHED) {
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
+ if (rc != 0) {
+ CWARN("Unable to bind on CPU partition %d, please verify "
+ "whether all CPUs are healthy and reload modules if "
+ "necessary, otherwise your system might under risk of "
+ "low performance\n", sched->ibs_cpt);
+ }
- our_cond_resched();
- busy_loops = 0;
+ spin_lock_irqsave(&sched->ibs_lock, flags);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
- }
+ while (!kiblnd_data.kib_shutdown) {
+ if (busy_loops++ >= IBLND_RESCHED) {
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ cond_resched();
+ busy_loops = 0;
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ }
+
+ did_something = 0;
- did_something = 0;
+ if (!list_empty(&sched->ibs_conns)) {
+ conn = list_entry(sched->ibs_conns.next,
+ kib_conn_t, ibc_sched_list);
+ /* take over kib_sched_conns' ref on conn... */
+ LASSERT(conn->ibc_scheduled);
+ list_del(&conn->ibc_sched_list);
+ conn->ibc_ready = 0;
- if (!list_empty(&kiblnd_data.kib_sched_conns)) {
- conn = list_entry(kiblnd_data.kib_sched_conns.next,
- kib_conn_t, ibc_sched_list);
- /* take over kib_sched_conns' ref on conn... */
- LASSERT(conn->ibc_scheduled);
- list_del(&conn->ibc_sched_list);
- conn->ibc_ready = 0;
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ wc.wr_id = IBLND_WID_INVAL;
rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
if (rc == 0) {
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
- continue;
- }
-
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ spin_lock_irqsave(&sched->ibs_lock,
+ flags);
+ continue;
+ }
+
+ rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ }
+
+ if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) {
+ LCONSOLE_ERROR(
+ "ib_poll_cq (rc: %d) returned invalid "
+ "wr_id, opcode %d, status: %d, "
+ "vendor_err: %d, conn: %s status: %d\n"
+ "please upgrade firmware and OFED or "
+ "contact vendor.\n", rc,
+ wc.opcode, wc.status, wc.vendor_err,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ conn->ibc_state);
+ rc = -EINVAL;
+ }
+
+ if (rc < 0) {
+ CWARN("%s: ib_poll_cq failed: %d, "
+ "closing connection\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc);
+ kiblnd_close_conn(conn, -EIO);
+ kiblnd_conn_decref(conn);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+
+ if (rc != 0 || conn->ibc_ready) {
+ /* There may be another completion waiting; get
+ * another scheduler to check while I handle
+ * this one... */
+ /* +1 ref for sched_conns */
+ kiblnd_conn_addref(conn);
+ list_add_tail(&conn->ibc_sched_list,
+ &sched->ibs_conns);
+ if (waitqueue_active(&sched->ibs_waitq))
+ wake_up(&sched->ibs_waitq);
+ } else {
+ conn->ibc_scheduled = 0;
+ }
+
+ if (rc != 0) {
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+ kiblnd_complete(&wc);
+
+ spin_lock_irqsave(&sched->ibs_lock, flags);
}
- if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, "
- "closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ kiblnd_conn_decref(conn); /* ...drop my ref from above */
+ did_something = 1;
+ }
+
+ if (did_something)
+ continue;
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ schedule();
+ busy_loops = 0;
+
+ remove_wait_queue(&sched->ibs_waitq, &wait);
+ set_current_state(TASK_RUNNING);
+ spin_lock_irqsave(&sched->ibs_lock, flags);
+ }
+
+ spin_unlock_irqrestore(&sched->ibs_lock, flags);
+
+ kiblnd_thread_fini();
+ return 0;
+}
+
+int
+kiblnd_failover_thread(void *arg)
+{
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_dev_t *dev;
+ wait_queue_t wait;
+ unsigned long flags;
+ int rc;
+
+ LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
+
+ cfs_block_allsigs();
+
+ init_waitqueue_entry(&wait, current);
+ write_lock_irqsave(glock, flags);
+
+ while (!kiblnd_data.kib_shutdown) {
+ int do_failover = 0;
+ int long_sleep;
+
+ list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
+ ibd_fail_list) {
+ if (cfs_time_before(cfs_time_current(),
+ dev->ibd_next_failover))
continue;
- }
+ do_failover = 1;
+ break;
+ }
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
- flags);
-
- if (rc != 0 || conn->ibc_ready) {
- /* There may be another completion waiting; get
- * another scheduler to check while I handle
- * this one... */
- kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
- list_add_tail(&conn->ibc_sched_list,
- &kiblnd_data.kib_sched_conns);
- wake_up(&kiblnd_data.kib_sched_waitq);
- } else {
- conn->ibc_scheduled = 0;
- }
-
- if (rc != 0) {
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock,
- flags);
+ if (do_failover) {
+ list_del_init(&dev->ibd_fail_list);
+ dev->ibd_failover = 1;
+ write_unlock_irqrestore(glock, flags);
+
+ rc = kiblnd_dev_failover(dev);
- kiblnd_complete(&wc);
+ write_lock_irqsave(glock, flags);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock,
- flags);
+ LASSERT (dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
+ dev->ibd_next_failover = cfs_time_shift(3);
+ continue;
}
- kiblnd_conn_decref(conn); /* ...drop my ref from above */
- did_something = 1;
- }
+ /* failed to failover, retry later */
+ dev->ibd_next_failover =
+ cfs_time_shift(min(dev->ibd_failed_failover, 10));
+ if (kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
- if (did_something)
continue;
+ }
+
+ /* long sleep if no more pending failover */
+ long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&kiblnd_data.kib_sched_waitq, &wait);
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+ write_unlock_irqrestore(glock, flags);
- schedule();
- busy_loops = 0;
+ rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
+ cfs_time_seconds(1));
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
+ write_lock_irqsave(glock, flags);
+
+ if (!long_sleep || rc != 0)
+ continue;
- remove_wait_queue(&kiblnd_data.kib_sched_waitq, &wait);
- set_current_state(TASK_RUNNING);
- spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags);
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover */
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ if (kiblnd_dev_can_failover(dev)) {
+ list_add_tail(&dev->ibd_fail_list,
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
}
- spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags);
+ write_unlock_irqrestore(glock, flags);
kiblnd_thread_fini();
- return (0);
+ return 0;
}