* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd_cb.c
*
#define MAX_CONN_RACES_BEFORE_ABORT 20
-static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni);
-static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error);
-static void kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx,
- int type, int body_nob);
-static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx);
-static void kiblnd_check_sends_locked(kib_conn_t *conn);
+static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
+static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+ int error);
+static struct ib_rdma_wr *
+kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx,
+ int type, int body_nob, int payload_nob);
+#define kiblnd_init_tx_msg(ni, tx, type, body) \
+ kiblnd_init_tx_msg_payload(ni, tx, type, body, 0)
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
+
+static void kiblnd_unmap_tx(struct kib_tx *tx);
+static void kiblnd_check_sends_locked(struct kib_conn *conn);
-void
-kiblnd_tx_done(struct lnet_ni *ni, kib_tx_t *tx)
+static void
+kiblnd_tx_done(struct kib_tx *tx)
{
struct lnet_msg *lntmsg[2];
- kib_net_t *net = ni->ni_data;
int rc;
int i;
- LASSERT (net != NULL);
LASSERT (!in_interrupt());
LASSERT (!tx->tx_queued); /* mustn't be queued for sending */
LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */
LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer_ni response */
LASSERT (tx->tx_pool != NULL);
- kiblnd_unmap_tx(ni, tx);
+ kiblnd_unmap_tx(tx);
/* tx may have up to 2 lnet msgs to finalise */
lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL;
rc = tx->tx_status;
if (tx->tx_conn != NULL) {
- LASSERT (ni == tx->tx_conn->ibc_peer->ibp_ni);
-
kiblnd_conn_decref(tx->tx_conn);
tx->tx_conn = NULL;
}
- tx->tx_nwrq = 0;
+ tx->tx_nwrq = tx->tx_nsge = 0;
tx->tx_status = 0;
kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list);
if (lntmsg[i] == NULL)
continue;
- lnet_finalize(ni, lntmsg[i], rc);
+ /* propagate health status to LNet for requests */
+ if (i == 0 && lntmsg[i])
+ lntmsg[i]->msg_health_status = tx->tx_hstatus;
+
+ lnet_finalize(lntmsg[i], rc);
}
}
void
-kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist, int status)
+kiblnd_txlist_done(struct list_head *txlist, int status,
+ enum lnet_msg_hstatus hstatus)
{
- kib_tx_t *tx;
-
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, kib_tx_t, tx_list);
+ struct kib_tx *tx;
+ while ((tx = list_first_entry_or_null(txlist,
+ struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
tx->tx_status = status;
- kiblnd_tx_done(ni, tx);
+ if (hstatus != LNET_MSG_STATUS_OK)
+ tx->tx_hstatus = hstatus;
+ kiblnd_tx_done(tx);
}
}
-static kib_tx_t *
+static struct kib_tx *
kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
- struct list_head *node;
- kib_tx_t *tx;
- kib_tx_poolset_t *tps;
+ struct kib_net *net = ni->ni_data;
+ struct list_head *node;
+ struct kib_tx *tx;
+ struct kib_tx_poolset *tps;
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
if (node == NULL)
return NULL;
- tx = container_of(node, kib_tx_t, tx_list);
+ tx = container_of(node, struct kib_tx, tx_list);
LASSERT (tx->tx_nwrq == 0);
LASSERT (!tx->tx_queued);
LASSERT (tx->tx_lntmsg[1] == NULL);
LASSERT (tx->tx_nfrags == 0);
+ tx->tx_gpu = 0;
+ tx->tx_gaps = false;
+ tx->tx_hstatus = LNET_MSG_STATUS_OK;
+
return tx;
}
static void
-kiblnd_drop_rx(kib_rx_t *rx)
+kiblnd_drop_rx(struct kib_rx *rx)
{
- kib_conn_t *conn = rx->rx_conn;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
spin_lock_irqsave(&sched->ibs_lock, flags);
LASSERT(conn->ibc_nrx > 0);
}
int
-kiblnd_post_rx (kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
- struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
- int rc;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+ struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
+#endif
+ int rc;
LASSERT (net != NULL);
LASSERT (!in_interrupt());
LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
+#ifdef HAVE_OFED_IB_GET_DMA_MR
LASSERT(mr != NULL);
- rx->rx_sge.lkey = mr->lkey;
+ rx->rx_sge.lkey = mr->lkey;
+#else
+ rx->rx_sge.lkey = conn->ibc_hdev->ibh_pd->local_dma_lkey;
+#endif
rx->rx_sge.addr = rx->rx_msgaddr;
rx->rx_sge.length = IBLND_MSG_SIZE;
* own this rx (and rx::rx_conn) anymore, LU-5678.
*/
kiblnd_conn_addref(conn);
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
+ rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
+ (const struct ib_recv_wr **)&bad_wrq);
+#else
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+#endif
if (unlikely(rc != 0)) {
CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
return rc;
}
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ struct kib_tx *tx;
+ list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
}
static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
{
- kib_tx_t *tx;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int idle;
+ struct kib_tx *tx;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ int idle;
spin_lock(&conn->ibc_lock);
spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie %#llx from %s\n",
- txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_close_conn(conn, -EPROTO);
- return;
- }
+ txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_close_conn(conn, -EPROTO);
+ return;
+ }
- if (tx->tx_status == 0) { /* success so far */
- if (status < 0) { /* failed? */
- tx->tx_status = status;
- } else if (txtype == IBLND_MSG_GET_REQ) {
- lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
- }
- }
+ if (tx->tx_status == 0) { /* success so far */
+ if (status < 0) { /* failed? */
+ if (status == -ECONNABORTED) {
+ CDEBUG(D_NET, "bad status for connection to %s "
+ "with completion type %x\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ txtype);
+ }
- tx->tx_waiting = 0;
+ tx->tx_status = status;
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
+ } else if (txtype == IBLND_MSG_GET_REQ) {
+ lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
+ }
+ }
- idle = !tx->tx_queued && (tx->tx_sending == 0);
- if (idle)
+ tx->tx_waiting = 0;
+
+ idle = !tx->tx_queued && (tx->tx_sending == 0);
+ if (idle)
list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
if (idle)
- kiblnd_tx_done(ni, tx);
+ kiblnd_tx_done(tx);
}
static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
{
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (tx == NULL) {
CERROR("Can't get tx for completion %x for %s\n",
tx->tx_msg->ibm_u.completion.ibcm_status = status;
tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+ kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
kiblnd_queue_tx(tx, conn);
}
static void
-kiblnd_handle_rx (kib_rx_t *rx)
+kiblnd_handle_rx(struct kib_rx *rx)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int credits = msg->ibm_credits;
- kib_tx_t *tx;
- int rc = 0;
- int rc2;
- int post_credit;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ int credits = msg->ibm_credits;
+ struct kib_tx *tx;
+ int rc = 0;
+ int rc2;
+ int post_credit;
+ struct lnet_hdr hdr;
+ struct lnet_nid srcnid;
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- CDEBUG (D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "Received %x[%d] nob %u cm_id %p qp_num 0x%x\n",
+ msg->ibm_type, credits,
+ msg->ibm_nob,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
if (credits != 0) {
/* Have I received credits that will let me send? */
post_credit = IBLND_POSTRX_PEER_CREDIT;
break;
- case IBLND_MSG_IMMEDIATE:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
- msg->ibm_srcnid, rx, 0);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_IMMEDIATE:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.immediate.ibim_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 0);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
- case IBLND_MSG_PUT_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_PUT_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.putreq.ibprm_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
case IBLND_MSG_PUT_NAK:
CWARN ("PUT_NACK from %s\n",
* (a) I can overwrite tx_msg since my peer_ni has received it!
* (b) tx_waiting set tells tx_complete() it's not done. */
- tx->tx_nwrq = 0; /* overwrite PUT_REQ */
+ tx->tx_nwrq = tx->tx_nsge = 0; /* overwrite PUT_REQ */
rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE,
kiblnd_rd_size(&msg->ibm_u.putack.ibpam_rd),
msg->ibm_u.completion.ibcm_cookie);
break;
- case IBLND_MSG_GET_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_GET_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.get.ibgm_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
case IBLND_MSG_GET_DONE:
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
if (rc < 0) /* protocol error */
kiblnd_close_conn(conn, rc);
- if (post_credit != IBLND_POSTRX_DONT_POST)
- kiblnd_post_rx(rx, post_credit);
+ if (post_credit != IBLND_POSTRX_DONT_POST)
+ kiblnd_post_rx(rx, post_credit);
}
static void
-kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- int rc;
- int err = -EIO;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ struct kib_net *net = ni->ni_data;
+ int rc;
+ int err = -EIO;
- LASSERT (net != NULL);
- LASSERT (rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
+ LASSERT(net);
+ LASSERT(rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
- LASSERT (nob >= 0);
- rx->rx_nob = nob;
+ LASSERT(nob >= 0);
+ rx->rx_nob = nob;
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR ("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR ("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
+ /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
return;
}
write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
-
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
-}
-
-static struct page *
-kiblnd_kvaddr_to_page (unsigned long vaddr)
-{
- struct page *page;
+ }
+ kiblnd_handle_rx(rx);
+ return;
- if (is_vmalloc_addr((void *)vaddr)) {
- page = vmalloc_to_page ((void *)vaddr);
- LASSERT (page != NULL);
- return page;
- }
-#ifdef CONFIG_HIGHMEM
- if (vaddr >= PKMAP_BASE &&
- vaddr < (PKMAP_BASE + LAST_PKMAP * PAGE_SIZE)) {
- /* No highmem pages only used for bulk (kiov) I/O */
- CERROR("find page for address in highmem\n");
- LBUG();
- }
-#endif
- page = virt_to_page (vaddr);
- LASSERT (page != NULL);
- return page;
+failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, u32 nob)
{
- kib_hca_dev_t *hdev;
- kib_fmr_poolset_t *fps;
+ struct kib_hca_dev *hdev;
+ struct kib_dev *dev;
+ struct kib_fmr_poolset *fps;
int cpt;
int rc;
+ int i;
LASSERT(tx->tx_pool != NULL);
LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL);
+ dev = net->ibn_dev;
hdev = tx->tx_pool->tpo_hdev;
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
+ /*
+ * If we're dealing with FastReg, but the device doesn't
+ * support GAPS and the tx has GAPS, then there is no real point
+ * in trying to map the memory, because it'll just fail. So
+ * preemptively fail with an appropriate message
+ */
+ if (IS_FAST_REG_DEV(dev) &&
+ !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
+ tx->tx_gaps) {
+ CERROR("Using FastReg with no GAPS support, but tx has gaps. "
+ "Try setting use_fastreg_gaps to 1\n");
+ return -EPROTONOSUPPORT;
+ }
+
+#ifdef HAVE_OFED_FMR_POOL_API
+ /*
+ * FMR does not support gaps but the tx has gaps then
+ * we should make sure that the number of fragments we'll be sending
+ * over fits within the number of fragments negotiated on the
+ * connection, otherwise, we won't be able to RDMA the data.
+ * We need to maintain the number of fragments negotiation on the
+ * connection for backwards compatibility.
+ */
+ if (tx->tx_gaps && (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) {
+ if (tx->tx_conn &&
+ tx->tx_conn->ibc_max_frags <= rd->rd_nfrags) {
+ CERROR("TX number of frags (%d) is <= than connection"
+ " number of frags (%d). Consider setting peer's"
+ " map_on_demand to 256\n", tx->tx_nfrags,
+ tx->tx_conn->ibc_max_frags);
+ return -EFBIG;
+ }
+ }
+#endif
+
fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
+ rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr);
if (rc != 0) {
- CERROR("Can't map %u pages: %d\n", nob, rc);
+ CERROR("Can't map %u bytes (%u/%u)s: %d\n", nob,
+ tx->tx_nfrags, rd->rd_nfrags, rc);
return rc;
}
- /* If rd is not tx_rd, it's going to get sent to a peer_ni, who will need
- * the rkey */
- rd->rd_key = tx->fmr.fmr_key;
- rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
- rd->rd_frags[0].rf_nob = nob;
- rd->rd_nfrags = 1;
+ /*
+ * If rd is not tx_rd, it's going to get sent to a peer_ni, who will
+ * need the rkey
+ */
+ rd->rd_key = tx->tx_fmr.fmr_key;
+ /*
+ * for FastReg or FMR with no gaps we can accumulate all
+ * the fragments in one FastReg or FMR fragment.
+ */
+ if (
+#ifdef HAVE_OFED_FMR_POOL_API
+ ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+ && !tx->tx_gaps) ||
+#endif
+ IS_FAST_REG_DEV(dev)) {
+ /* FMR requires zero based address */
+#ifdef HAVE_OFED_FMR_POOL_API
+ if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+ rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
+#endif
+ rd->rd_frags[0].rf_nob = nob;
+ rd->rd_nfrags = 1;
+ } else {
+ /*
+ * We're transmitting with gaps using FMR.
+ * We'll need to use multiple fragments and identify the
+ * zero based address of each fragment.
+ */
+ for (i = 0; i < rd->rd_nfrags; i++) {
+ rd->rd_frags[i].rf_addr &= ~hdev->ibh_page_mask;
+ rd->rd_frags[i].rf_addr += i << hdev->ibh_page_shift;
+ }
+ }
return 0;
}
static void
-kiblnd_unmap_tx(struct lnet_ni *ni, kib_tx_t *tx)
+kiblnd_unmap_tx(struct kib_tx *tx)
{
- kib_net_t *net = ni->ni_data;
+ if (
+#ifdef HAVE_OFED_FMR_POOL_API
+ tx->tx_fmr.fmr_pfmr ||
+#endif
+ tx->tx_fmr.fmr_frd)
+ kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
- LASSERT(net != NULL);
+ if (tx->tx_nfrags != 0) {
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev, tx);
+ tx->tx_nfrags = 0;
+ }
+}
- if (net->ibn_fmr_ps != NULL)
- kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+static struct ib_mr *
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
+{
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
- if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
- tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
- tx->tx_nfrags = 0;
- }
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
+ /*
+ * if map-on-demand is turned on and the device supports
+ * either FMR or FastReg then use that. Otherwise use global
+ * memory regions. If that's not available either, then you're
+ * dead in the water and fail the operation.
+ */
+ if (tunables->lnd_map_on_demand && (IS_FAST_REG_DEV(net->ibn_dev)
+#ifdef HAVE_OFED_FMR_POOL_API
+ || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
+#endif
+ ))
+ return NULL;
+
+ /*
+ * hdev->ibh_mrs can be NULL. This case is dealt with gracefully
+ * in the call chain. The mapping will fail with appropriate error
+ * message.
+ */
+ return hdev->ibh_mrs;
}
+#endif
-static int
-kiblnd_map_tx(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags)
+static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
- struct ib_mr *mr = NULL;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+ struct ib_mr *mr = NULL;
+#endif
__u32 nob;
int i;
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
- tx->tx_nfrags, tx->tx_dmadir);
-
+ rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
hdev->ibh_ibdev, &tx->tx_frags[i]);
nob += rd->rd_frags[i].rf_nob;
}
- mr = kiblnd_find_rd_dma_mr(ni, rd,
- (tx->tx_conn != NULL) ?
- tx->tx_conn->ibc_max_frags : -1);
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+ mr = kiblnd_find_rd_dma_mr(ni, rd);
if (mr != NULL) {
/* found pre-mapping MR */
rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey;
return 0;
}
+#endif
if (net->ibn_fmr_ps != NULL)
return kiblnd_fmr_map_tx(net, tx, rd, nob);
return -EINVAL;
}
-
-static int
-kiblnd_setup_rd_iov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- unsigned int niov, struct kvec *iov, int offset, int nob)
+static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nkiov,
+ struct bio_vec *kiov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
- struct page *page;
- struct scatterlist *sg;
- unsigned long vaddr;
- int fragnob;
- int page_offset;
-
- LASSERT (nob > 0);
- LASSERT (niov > 0);
- LASSERT (net != NULL);
+ struct kib_net *net = ni->ni_data;
+ struct scatterlist *sg;
+ int fragnob;
+ int max_nkiov;
+ int sg_count = 0;
- while (offset >= iov->iov_len) {
- offset -= iov->iov_len;
- niov--;
- iov++;
- LASSERT (niov > 0);
- }
+ CDEBUG(D_NET, "niov %d offset %d nob %d gpu %d\n",
+ nkiov, offset, nob, tx->tx_gpu);
- sg = tx->tx_frags;
- do {
- LASSERT (niov > 0);
+ LASSERT(nob > 0);
+ LASSERT(nkiov > 0);
+ LASSERT(net != NULL);
- vaddr = ((unsigned long)iov->iov_base) + offset;
- page_offset = vaddr & (PAGE_SIZE - 1);
- page = kiblnd_kvaddr_to_page(vaddr);
- if (page == NULL) {
- CERROR ("Can't find page\n");
- return -EFAULT;
- }
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
+ nkiov--;
+ kiov++;
+ LASSERT(nkiov > 0);
+ }
- fragnob = min((int)(iov->iov_len - offset), nob);
- fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
+ max_nkiov = nkiov;
+
+ sg = tx->tx_frags;
+ do {
+ LASSERT(nkiov > 0);
- sg_set_page(sg, page, fragnob, page_offset);
- sg = sg_next(sg);
if (!sg) {
CERROR("lacking enough sg entries to map tx\n");
return -EFAULT;
}
+ sg_count++;
- if (offset + fragnob < iov->iov_len) {
- offset += fragnob;
- } else {
- offset = 0;
- iov++;
- niov--;
- }
- nob -= fragnob;
- } while (nob > 0);
+ fragnob = min((int)(kiov->bv_len - offset), nob);
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
-}
-
-static int
-kiblnd_setup_rd_kiov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
-{
- kib_net_t *net = ni->ni_data;
- struct scatterlist *sg;
- int fragnob;
-
- CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
-
- LASSERT (nob > 0);
- LASSERT (nkiov > 0);
- LASSERT (net != NULL);
-
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
- nkiov--;
- kiov++;
- LASSERT (nkiov > 0);
- }
-
- sg = tx->tx_frags;
- do {
- LASSERT (nkiov > 0);
-
- fragnob = min((int)(kiov->kiov_len - offset), nob);
+ /*
+ * We're allowed to start at a non-aligned page offset in
+ * the first fragment and end at a non-aligned page offset
+ * in the last fragment.
+ */
+ if ((fragnob < (int)(kiov->bv_len - offset)) &&
+ nkiov < max_nkiov && nob > fragnob) {
+ CDEBUG(D_NET, "fragnob %d < available page %d: with"
+ " remaining %d kiovs with %d nob left\n",
+ fragnob, (int)(kiov->bv_len - offset),
+ nkiov, nob);
+ tx->tx_gaps = true;
+ }
- sg_set_page(sg, kiov->kiov_page, fragnob,
- kiov->kiov_offset + offset);
+ sg_set_page(sg, kiov->bv_page, fragnob,
+ kiov->bv_offset + offset);
sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
- offset = 0;
- kiov++;
- nkiov--;
- nob -= fragnob;
- } while (nob > 0);
+ offset = 0;
+ kiov++;
+ nkiov--;
+ nob -= fragnob;
+ } while (nob > 0);
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+ return kiblnd_map_tx(ni, tx, rd, sg_count);
}
static int
-kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
__must_hold(&conn->ibc_lock)
{
- kib_msg_t *msg = tx->tx_msg;
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_msg *msg = tx->tx_msg;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
int ver = conn->ibc_version;
int rc;
int done;
LASSERT(tx->tx_queued);
/* We rely on this for QP sizing */
- LASSERT(tx->tx_nwrq > 0);
+ LASSERT(tx->tx_nwrq > 0 && tx->tx_nsge >= 0);
LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags);
LASSERT(credit == 0 || credit == 1);
if (conn->ibc_nsends_posted ==
kiblnd_concurrent_sends(ver, ni)) {
- /* tx completions outstanding... */
- CDEBUG(D_NET, "%s: posted enough\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- return -EAGAIN;
- }
+ /* tx completions outstanding... */
+ CDEBUG(D_NET, "%s: posted enough\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ return -EAGAIN;
+ }
if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
- CDEBUG(D_NET, "%s: no credits\n",
- libcfs_nid2str(peer_ni->ibp_nid));
+ CDEBUG(D_NET, "%s: no credits cm_id %p qp_num 0x%x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
return -EAGAIN;
}
* kiblnd_check_sends_locked will queue NOOP again when
* posted NOOPs complete */
spin_unlock(&conn->ibc_lock);
- kiblnd_tx_done(peer_ni->ibp_ni, tx);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ kiblnd_tx_done(tx);
spin_lock(&conn->ibc_lock);
- CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- conn->ibc_noops_posted);
- return 0;
+ CDEBUG(D_NET, "%s(%d): redundant or enough NOOP cm_id %p qp_num 0x%x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ conn->ibc_noops_posted,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
+ return 0;
}
+ CDEBUG(D_NET, "Transmit %x[%d] nob %u cm_id %p qp_num 0x%x\n",
+ msg->ibm_type, credit,
+ msg->ibm_nob,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
+
kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
peer_ni->ibp_nid, conn->ibc_incarnation);
- conn->ibc_credits -= credit;
- conn->ibc_outstanding_credits = 0;
- conn->ibc_nsends_posted++;
- if (msg->ibm_type == IBLND_MSG_NOOP)
- conn->ibc_noops_posted++;
-
- /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
- * PUT. If so, it was first queued here as a PUT_REQ, sent and
- * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
- * and then re-queued here. It's (just) possible that
- * tx_sending is non-zero if we've not done the tx_complete()
- * from the first send; hence the ++ rather than = below. */
- tx->tx_sending++;
+ conn->ibc_credits -= credit;
+ conn->ibc_outstanding_credits = 0;
+ conn->ibc_nsends_posted++;
+ if (msg->ibm_type == IBLND_MSG_NOOP)
+ conn->ibc_noops_posted++;
+
+ /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA
+ * PUT. If so, it was first queued here as a PUT_REQ, sent and
+ * stashed on ibc_active_txs, matched by an incoming PUT_ACK,
+ * and then re-queued here. It's (just) possible that
+ * tx_sending is non-zero if we've not done the tx_complete()
+ * from the first send; hence the ++ rather than = below. */
+ tx->tx_sending++;
list_add(&tx->tx_list, &conn->ibc_active_txs);
/* I'm still holding ibc_lock! */
if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
+ CDEBUG(D_NET, "connection to %s is not established\n",
+ conn->ibc_peer? libcfs_nid2str(conn->ibc_peer->ibp_nid): "NULL");
rc = -ECONNABORTED;
} else if (tx->tx_pool->tpo_pool.po_failed ||
conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
- if (frd != NULL) {
- if (!frd->frd_valid) {
- wr = &frd->frd_inv_wr.wr;
- wr->next = &frd->frd_fastreg_wr.wr;
- } else {
- wr = &frd->frd_fastreg_wr.wr;
- }
+ if (frd != NULL && !frd->frd_posted) {
+ wr = &frd->frd_inv_wr.wr;
+ wr->next = &frd->frd_fastreg_wr.wr;
frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
}
libcfs_nid2str(conn->ibc_peer->ibp_nid));
bad = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+ if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
+ rc = -EINVAL;
+ else
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
+ rc = ib_post_send(conn->ibc_cmid->qp, wr,
+ (const struct ib_send_wr **)&bad);
+#else
+ rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+#endif
+ if (frd && !frd->frd_posted) {
+ /* The local invalidate becomes invalid (has been
+ * successfully used) if the post succeeds or the
+ * failing wr was not the invalidate. */
+ frd->frd_valid =
+ !(rc == 0 || (bad != &frd->frd_inv_wr.wr));
+ }
}
- conn->ibc_last_send = jiffies;
+ conn->ibc_last_send = ktime_get();
- if (rc == 0)
- return 0;
+ if (rc == 0) {
+ if (frd != NULL)
+ frd->frd_posted = true;
+ return 0;
+ }
/* NB credits are transferred in the actual
* message, which can only be the last work item */
kiblnd_close_conn(conn, rc);
- if (done)
- kiblnd_tx_done(peer_ni->ibp_ni, tx);
+ if (done)
+ kiblnd_tx_done(tx);
spin_lock(&conn->ibc_lock);
}
static void
-kiblnd_check_sends_locked(kib_conn_t *conn)
+kiblnd_check_sends_locked(struct kib_conn *conn)
{
- int ver = conn->ibc_version;
+ int ver = conn->ibc_version;
struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx;
+ struct kib_tx *tx;
/* Don't send anything until after the connection is established */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+ struct kib_tx, tx_list)) != NULL) {
+ list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- kib_tx_t, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_noops,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue,
+ struct kib_tx, tx_list);
} else
break;
}
static void
-kiblnd_tx_complete (kib_tx_t *tx, int status)
+kiblnd_tx_complete(struct kib_tx *tx, int status)
{
- int failed = (status != IB_WC_SUCCESS);
- kib_conn_t *conn = tx->tx_conn;
- int idle;
+ int failed = (status != IB_WC_SUCCESS);
+ struct kib_conn *conn = tx->tx_conn;
+ int idle;
- LASSERT (tx->tx_sending > 0);
+ if (tx->tx_sending <= 0) {
+ CERROR("Received an event on a freed tx: %p status %d\n",
+ tx, tx->tx_status);
+ return;
+ }
- if (failed) {
- if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
+ if (failed) {
+ if (conn->ibc_state == IBLND_CONN_ESTABLISHED)
CNETERR("Tx -> %s cookie %#llx"
- " sending %d waiting %d: failed %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
- status);
+ " sending %d waiting %d: failed %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ tx->tx_cookie, tx->tx_sending, tx->tx_waiting,
+ status);
- kiblnd_close_conn(conn, -EIO);
- } else {
- kiblnd_peer_alive(conn->ibc_peer);
- }
+ kiblnd_close_conn(conn, -EIO);
+ } else {
+ kiblnd_peer_alive(conn->ibc_peer);
+ }
spin_lock(&conn->ibc_lock);
conn->ibc_noops_posted--;
if (failed) {
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
tx->tx_waiting = 0; /* don't wait for peer_ni */
tx->tx_status = -EIO;
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
}
idle = (tx->tx_sending == 0) && /* This is the final callback */
kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
- if (idle)
- kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx);
+ if (idle)
+ kiblnd_tx_done(tx);
}
+
static void
-kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, int type, int body_nob)
+kiblnd_init_tx_sge(struct kib_tx *tx, u64 addr, unsigned int len)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
- struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq];
- struct ib_rdma_wr *wrq;
- int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ struct ib_sge *sge = &tx->tx_sge[tx->tx_nsge];
+ struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = hdev->ibh_mrs;
+#endif
+
+ *sge = (struct ib_sge) {
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+ .lkey = mr->lkey,
+#else
+ .lkey = hdev->ibh_pd->local_dma_lkey,
+#endif
+ .addr = addr,
+ .length = len,
+ };
+
+ tx->tx_nsge++;
+}
+
+static struct ib_rdma_wr *
+kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx, int type,
+ int body_nob, int payload)
+{
+ struct ib_rdma_wr *wrq;
+ int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
LASSERT(tx->tx_nwrq >= 0);
LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT(nob <= IBLND_MSG_SIZE);
- LASSERT(mr != NULL);
- kiblnd_init_msg(tx->tx_msg, type, body_nob);
-
- sge->lkey = mr->lkey;
- sge->addr = tx->tx_msgaddr;
- sge->length = nob;
+ kiblnd_init_msg(tx->tx_msg, type, body_nob + payload);
wrq = &tx->tx_wrq[tx->tx_nwrq];
- memset(wrq, 0, sizeof(*wrq));
- wrq->wr.next = NULL;
- wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->wr.sg_list = sge;
- wrq->wr.num_sge = 1;
- wrq->wr.opcode = IB_WR_SEND;
- wrq->wr.send_flags = IB_SEND_SIGNALED;
+ *wrq = (struct ib_rdma_wr) {
+ .wr = {
+ .wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ .num_sge = 1,
+ .sg_list = &tx->tx_sge[tx->tx_nsge],
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ },
+ };
+
+ kiblnd_init_tx_sge(tx, tx->tx_msgaddr, nob);
tx->tx_nwrq++;
+ return wrq;
}
static int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
- struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_rdma_wr *wrq;
- int rc = resid;
- int srcidx;
- int dstidx;
- int wrknob;
+ struct kib_msg *ibmsg = tx->tx_msg;
+ struct kib_rdma_desc *srcrd = tx->tx_rd;
+ struct ib_rdma_wr *wrq = NULL;
+ struct ib_sge *sge;
+ int rc = resid;
+ int srcidx;
+ int dstidx;
+ int sge_nob;
+ int wrq_sge;
- LASSERT (!in_interrupt());
- LASSERT (tx->tx_nwrq == 0);
- LASSERT (type == IBLND_MSG_GET_DONE ||
- type == IBLND_MSG_PUT_DONE);
+ LASSERT(!in_interrupt());
+ LASSERT(tx->tx_nwrq == 0 && tx->tx_nsge == 0);
+ LASSERT(type == IBLND_MSG_GET_DONE || type == IBLND_MSG_PUT_DONE);
- srcidx = dstidx = 0;
+ for (srcidx = dstidx = wrq_sge = sge_nob = 0;
+ resid > 0; resid -= sge_nob) {
+ int prev = dstidx;
- while (resid > 0) {
- if (srcidx >= srcrd->rd_nfrags) {
- CERROR("Src buffer exhausted: %d frags\n", srcidx);
- rc = -EPROTO;
- break;
- }
+ if (srcidx >= srcrd->rd_nfrags) {
+ CERROR("Src buffer exhausted: %d frags %px\n",
+ srcidx, tx);
+ rc = -EPROTO;
+ break;
+ }
- if (dstidx == dstrd->rd_nfrags) {
- CERROR("Dst buffer exhausted: %d frags\n", dstidx);
- rc = -EPROTO;
- break;
- }
+ if (dstidx >= dstrd->rd_nfrags) {
+ CERROR("Dst buffer exhausted: %d frags\n", dstidx);
+ rc = -EPROTO;
+ break;
+ }
if (tx->tx_nwrq >= conn->ibc_max_frags) {
CERROR("RDMA has too many fragments for peer_ni %s (%d), "
break;
}
- wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx)), resid);
-
- sge = &tx->tx_sge[tx->tx_nwrq];
- sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
- sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
- sge->length = wrknob;
-
- wrq = &tx->tx_wrq[tx->tx_nwrq];
-
- wrq->wr.next = &(wrq + 1)->wr;
- wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
- wrq->wr.sg_list = sge;
- wrq->wr.num_sge = 1;
- wrq->wr.opcode = IB_WR_RDMA_WRITE;
- wrq->wr.send_flags = 0;
-
-#ifdef HAVE_IB_RDMA_WR
- wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+ sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
+ kiblnd_rd_frag_size(dstrd, dstidx),
+ resid);
+
+ sge = &tx->tx_sge[tx->tx_nsge];
+ sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
+ sge->lkey = kiblnd_rd_frag_key(srcrd, srcidx);
+ sge->length = sge_nob;
+
+ if (wrq_sge == 0) {
+ wrq = &tx->tx_wrq[tx->tx_nwrq];
+
+ wrq->wr.next = &(wrq + 1)->wr;
+ wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA);
+ wrq->wr.sg_list = sge;
+ wrq->wr.opcode = IB_WR_RDMA_WRITE;
+ wrq->wr.send_flags = 0;
+
+#ifdef HAVE_OFED_IB_RDMA_WR
+ wrq->remote_addr = kiblnd_rd_frag_addr(dstrd,
+ dstidx);
+ wrq->rkey = kiblnd_rd_frag_key(dstrd,
+ dstidx);
#else
- wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx);
- wrq->wr.wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx);
+ wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd,
+ dstidx);
+ wrq->wr.wr.rdma.rkey = kiblnd_rd_frag_key(dstrd,
+ dstidx);
#endif
+ }
- srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob);
- dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob);
-
- resid -= wrknob;
+ srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, sge_nob);
+ dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, sge_nob);
- tx->tx_nwrq++;
- wrq++;
- sge++;
- }
+ wrq_sge++;
+ if (wrq_sge == *kiblnd_tunables.kib_wrq_sge || dstidx != prev) {
+ tx->tx_nwrq++;
+ wrq->wr.num_sge = wrq_sge;
+ wrq_sge = 0;
+ }
+ tx->tx_nsge++;
+ }
- if (rc < 0) /* no RDMA if completing with failure */
- tx->tx_nwrq = 0;
+ if (rc < 0) /* no RDMA if completing with failure */
+ tx->tx_nwrq = tx->tx_nsge = 0;
ibmsg->ibm_u.completion.ibcm_status = rc;
ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof (kib_completion_msg_t));
+ type, sizeof(struct kib_completion_msg));
return rc;
}
static void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
{
struct list_head *q;
+ s64 timeout_ns;
LASSERT(tx->tx_nwrq > 0); /* work items set up */
LASSERT(!tx->tx_queued); /* not queued for sending already */
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
+ CDEBUG(D_NET, "connection with %s is disconnected\n",
+ conn->ibc_peer? libcfs_nid2str(conn->ibc_peer->ibp_nid): "NULL");
+
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
+ if (tx->tx_conn != NULL) {
+ /* PUT_DONE first attached to conn as a PUT_REQ */
+ LASSERT(tx->tx_conn == conn);
+ LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
+ tx->tx_conn = NULL;
+ kiblnd_conn_decref(conn);
+ }
+ list_add(&tx->tx_list, &conn->ibc_zombie_txs);
+
+ return;
+ }
+
+ timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
tx->tx_queued = 1;
- tx->tx_deadline = jiffies +
- msecs_to_jiffies(*kiblnd_tunables.kib_timeout *
- MSEC_PER_SEC);
+ tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
if (tx->tx_conn == NULL) {
kiblnd_conn_addref(conn);
}
static void
-kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
{
spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
spin_unlock(&conn->ibc_lock);
}
-static int kiblnd_resolve_addr(struct rdma_cm_id *cmid,
- struct sockaddr_in *srcaddr,
- struct sockaddr_in *dstaddr,
- int timeout_ms)
+static int
+kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
{
unsigned short port;
int rc;
}
}
- CERROR("Failed to bind to a free privileged port\n");
- return rc;
+ CERROR("cannot bind to a free privileged port: rc = %d\n", rc);
+
+ return rc;
+}
+
+static int
+kiblnd_resolve_addr(struct rdma_cm_id *cmid,
+ struct sockaddr_in *srcaddr,
+ struct sockaddr_in *dstaddr,
+ int timeout_ms)
+{
+ const struct cred *old_creds = NULL;
+ struct cred *new_creds;
+ int rc;
+
+ if (!capable(CAP_NET_BIND_SERVICE)) {
+ new_creds = prepare_kernel_cred(NULL);
+ if (!new_creds)
+ return -ENOMEM;
+
+ cap_raise(new_creds->cap_effective, CAP_NET_BIND_SERVICE);
+ old_creds = override_creds(new_creds);
+ }
+
+ rc = kiblnd_resolve_addr_cap(cmid, srcaddr, dstaddr, timeout_ms);
+
+ if (old_creds)
+ revert_creds(old_creds);
+
+ return rc;
}
static void
-kiblnd_connect_peer (kib_peer_ni_t *peer_ni)
+kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
{
struct rdma_cm_id *cmid;
- kib_dev_t *dev;
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
struct sockaddr_in srcaddr;
struct sockaddr_in dstaddr;
- int rc;
+ int rc;
LASSERT (net != NULL);
LASSERT (peer_ni->ibp_connecting > 0);
- LASSERT(!peer_ni->ibp_reconnecting);
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns,
+ kiblnd_cm_callback, peer_ni,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cmid)) {
CERROR("Can't create CMID for %s: %ld\n",
kiblnd_peer_addref(peer_ni); /* cmid's ref */
- if (*kiblnd_tunables.kib_use_priv_port) {
- rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- } else {
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- }
- if (rc != 0) {
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
- goto failed2;
- }
-
- LASSERT (cmid->device != NULL);
- CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
- libcfs_nid2str(peer_ni->ibp_nid), dev->ibd_ifname,
- &dev->ibd_ifip, cmid->device->name);
+ if (*kiblnd_tunables.kib_use_priv_port) {
+ rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+ kiblnd_timeout() * 1000);
+ } else {
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr,
+ kiblnd_timeout() * 1000);
+ }
+ if (rc != 0) {
+ /* Can't initiate address resolution: */
+ CERROR("Can't resolve addr for %s: %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc);
+ goto failed2;
+ }
return;
}
bool
-kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni)
+kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- char *reason = NULL;
- struct list_head txs;
- unsigned long flags;
-
- INIT_LIST_HEAD(&txs);
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ LIST_HEAD(txs);
+ unsigned long flags;
write_lock_irqsave(glock, flags);
if (peer_ni->ibp_reconnecting == 0) {
goto no_reconnect;
}
- LASSERT(!peer_ni->ibp_accepting && !peer_ni->ibp_connecting &&
- list_empty(&peer_ni->ibp_conns));
- peer_ni->ibp_reconnecting = 0;
+ if (peer_ni->ibp_accepting)
+ CNETERR("Detecting race between accepting and reconnecting\n");
+ peer_ni->ibp_reconnecting--;
if (!kiblnd_peer_active(peer_ni)) {
list_splice_init(&peer_ni->ibp_tx_queue, &txs);
CWARN("Abort reconnection of %s: %s\n",
libcfs_nid2str(peer_ni->ibp_nid), reason);
- kiblnd_txlist_done(peer_ni->ibp_ni, &txs, -ECONNABORTED);
+ kiblnd_txlist_done(&txs, -ECONNABORTED,
+ LNET_MSG_STATUS_LOCAL_ABORTED);
return false;
}
void
-kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
{
- kib_peer_ni_t *peer_ni;
- kib_peer_ni_t *peer2;
- kib_conn_t *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- unsigned long flags;
- int rc;
+ struct kib_peer_ni *peer_ni;
+ struct kib_peer_ni *peer2;
+ struct kib_conn *conn;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+ int rc;
+ int i;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ s64 timeout_ns;
- /* If I get here, I've committed to send, so I complete the tx with
- * failure on any problems */
+ /* If I get here, I've committed to send, so I complete the tx with
+ * failure on any problems
+ */
- LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */
- LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */
+ LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */
+ LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */
- /* First time, just use a read lock since I expect to find my peer_ni
- * connected */
+ /* First time, just use a read lock since I expect to find my peer_ni
+ * connected
+ */
read_lock_irqsave(g_lock, flags);
- peer_ni = kiblnd_find_peer_locked(ni, nid);
+ peer_ni = kiblnd_find_peer_locked(ni, nid);
if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) {
- /* Found a peer_ni with an established connection */
- conn = kiblnd_get_conn_locked(peer_ni);
- kiblnd_conn_addref(conn); /* 1 ref for me... */
+ /* Found a peer_ni with an established connection */
+ conn = kiblnd_get_conn_locked(peer_ni);
+ kiblnd_conn_addref(conn); /* 1 ref for me... */
read_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- return;
- }
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ return;
+ }
+ timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
read_unlock(g_lock);
/* Re-try with a write lock */
write_lock(g_lock);
- peer_ni = kiblnd_find_peer_locked(ni, nid);
- if (peer_ni != NULL) {
+ peer_ni = kiblnd_find_peer_locked(ni, nid);
+ if (peer_ni != NULL) {
if (list_empty(&peer_ni->ibp_conns)) {
- /* found a peer_ni, but it's still connecting... */
+ /* found a peer_ni, but it's still connecting... */
LASSERT(kiblnd_peer_connecting(peer_ni));
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(),
+ timeout_ns);
list_add_tail(&tx->tx_list,
- &peer_ni->ibp_tx_queue);
+ &peer_ni->ibp_tx_queue);
+ }
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer_ni);
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
- return;
- }
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ }
+ return;
+ }
write_unlock_irqrestore(g_lock, flags);
if (tx != NULL) {
tx->tx_status = -EHOSTUNREACH;
tx->tx_waiting = 0;
- kiblnd_tx_done(ni, tx);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ kiblnd_tx_done(tx);
}
return;
}
write_lock_irqsave(g_lock, flags);
- peer2 = kiblnd_find_peer_locked(ni, nid);
- if (peer2 != NULL) {
+ peer2 = kiblnd_find_peer_locked(ni, nid);
+ if (peer2 != NULL) {
if (list_empty(&peer2->ibp_conns)) {
- /* found a peer_ni, but it's still connecting... */
+ /* found a peer_ni, but it's still connecting... */
LASSERT(kiblnd_peer_connecting(peer2));
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(),
+ timeout_ns);
list_add_tail(&tx->tx_list,
- &peer2->ibp_tx_queue);
+ &peer2->ibp_tx_queue);
+ }
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer2);
write_unlock_irqrestore(g_lock, flags);
- if (tx != NULL)
- kiblnd_queue_tx(tx, conn);
- kiblnd_conn_decref(conn); /* ...to here */
- }
+ if (tx != NULL)
+ kiblnd_queue_tx(tx, conn);
+ kiblnd_conn_decref(conn); /* ...to here */
+ }
- kiblnd_peer_decref(peer_ni);
- return;
- }
+ kiblnd_peer_decref(peer_ni);
+ return;
+ }
- /* Brand new peer_ni */
- LASSERT (peer_ni->ibp_connecting == 0);
- peer_ni->ibp_connecting = 1;
+ /* Brand new peer_ni */
+ LASSERT(peer_ni->ibp_connecting == 0);
+ tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+ /* always called with a ref on ni, which prevents ni being shutdown */
+ LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
+ }
- kiblnd_peer_addref(peer_ni);
- list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
+ kiblnd_peer_addref(peer_ni);
+ hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
write_unlock_irqrestore(g_lock, flags);
- kiblnd_connect_peer(peer_ni);
- kiblnd_peer_decref(peer_ni);
+ for (i = 0; i < tunables->lnd_conns_per_peer; i++)
+ kiblnd_connect_peer(peer_ni);
+ kiblnd_peer_decref(peer_ni);
}
int
kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
+ struct kib_dev *dev = ((struct kib_net *)ni->ni_data)->ibn_dev;
struct lnet_hdr *hdr = &lntmsg->msg_hdr;
- int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
- int target_is_router = lntmsg->msg_target_is_router;
- int routing = lntmsg->msg_routing;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_rdma_desc_t *rd;
- kib_tx_t *tx;
- int nob;
- int rc;
-
- /* NB 'private' is different depending on what we're sending.... */
-
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
-
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ int type = lntmsg->msg_type;
+ struct lnet_processid *target = &lntmsg->msg_target;
+ int target_is_router = lntmsg->msg_target_is_router;
+ int routing = lntmsg->msg_routing;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
+ bool gpu;
+ struct kib_msg *ibmsg;
+ struct kib_rdma_desc *rd;
+ struct kib_tx *tx;
+ int nob;
+ int rc;
+
+ /* NB 'private' is different depending on what we're sending.... */
+
+ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
+ payload_nob, payload_niov, libcfs_idstr(target));
+
+ LASSERT(payload_nob == 0 || payload_niov > 0);
/* Thread context */
- LASSERT (!in_interrupt());
- /* payload is either all vaddrs or all pages */
- LASSERT (!(payload_kiov != NULL && payload_iov != NULL));
+ LASSERT(!in_interrupt());
+
+ tx = kiblnd_get_idle_tx(ni, lnet_nid_to_nid4(&target->nid));
+ if (tx == NULL) {
+ CERROR("Can't allocate %s txd for %s\n",
+ lnet_msgtyp2str(type),
+ libcfs_nidstr(&target->nid));
+ return -ENOMEM;
+ }
+ ibmsg = tx->tx_msg;
+ gpu = lnet_md_is_gpu(msg_md);
switch (type) {
default:
LBUG();
return (-EIO);
- case LNET_MSG_ACK:
- LASSERT (payload_nob == 0);
- break;
+ case LNET_MSG_ACK:
+ LASSERT(payload_nob == 0);
+ break;
- case LNET_MSG_GET:
- if (routing || target_is_router)
- break; /* send IMMEDIATE */
+ case LNET_MSG_GET:
+ if (routing || target_is_router)
+ break; /* send IMMEDIATE */
- /* is the REPLY message too small for RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
+ /* is the REPLY message too small for RDMA? */
+ nob = offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload[msg_md->md_length]);
+ if (nob <= IBLND_MSG_SIZE && !gpu)
+ break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
- if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0)
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.iov,
- 0, lntmsg->msg_md->md_length);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_iov.kiov,
- 0, lntmsg->msg_md->md_length);
+ tx->tx_gpu = gpu;
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ msg_md->md_niov,
+ msg_md->md_kiov,
+ 0, msg_md->md_length);
if (rc != 0) {
CERROR("Can't setup GET sink for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
+ libcfs_nidstr(&target->nid), rc);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ kiblnd_tx_done(tx);
return -EIO;
}
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
- ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.get.ibgm_hdr);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
- tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
- if (tx->tx_lntmsg[1] == NULL) {
- CERROR("Can't create reply for GET -> %s\n",
- libcfs_nid2str(target.nid));
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
+ tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
+ if (tx->tx_lntmsg[1] == NULL) {
+ CERROR("Can't create reply for GET -> %s\n",
+ libcfs_nidstr(&target->nid));
+ kiblnd_tx_done(tx);
+ return -EIO;
+ }
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for GET_DONE */
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
+ case LNET_MSG_REPLY:
+ case LNET_MSG_PUT:
+ /* Is the payload small enough not to need RDMA? */
+ nob = offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload[payload_nob]);
+ if (nob <= IBLND_MSG_SIZE && !gpu)
+ break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ tx->tx_gpu = gpu;
- if (payload_kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- payload_niov, payload_iov,
- payload_offset, payload_nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- if (rc != 0) {
- CERROR("Can't setup PUT src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- kiblnd_tx_done(ni, tx);
- return -EIO;
- }
+ rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ if (rc != 0) {
+ CERROR("Can't setup PUT src for %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ kiblnd_tx_done(tx);
+ return -EIO;
+ }
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.putreq.ibprm_hdr);
+ ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
+ sizeof(struct kib_putreq_msg));
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
+ }
- /* send IMMEDIATE */
+ /* send IMMEDIATE */
+ LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
+ <= IBLND_MSG_SIZE);
- LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
- <= IBLND_MSG_SIZE);
+ ibmsg = tx->tx_msg;
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.immediate.ibim_hdr);
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR ("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ if (IS_FAST_REG_DEV(dev) && payload_nob) {
+ struct ib_rdma_wr *wrq;
+ int i;
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
+ nob = offsetof(struct kib_immediate_msg, ibim_payload[0]);
+ wrq = kiblnd_init_tx_msg_payload(ni, tx, IBLND_MSG_IMMEDIATE,
+ nob, payload_nob);
- if (payload_kiov != NULL)
- lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
- else
- lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- payload_niov, payload_iov,
- payload_offset, payload_nob);
+ rd = tx->tx_rd;
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ if (rc != 0) {
+ CERROR("Can't setup IMMEDIATE src for %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ kiblnd_tx_done(tx);
+ return -EIO;
+ }
- nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+ /* lets generate a SGE chain */
+ for (i = 0; i < rd->rd_nfrags; i++) {
+ kiblnd_init_tx_sge(tx, rd->rd_frags[i].rf_addr,
+ rd->rd_frags[i].rf_nob);
+ wrq->wr.num_sge++;
+ }
+ } else {
+ lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
+ offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload),
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ nob = offsetof(struct kib_immediate_msg,
+ ibim_payload[payload_nob]);
+
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+ }
+
+ /* finalise lntmsg on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
}
static void
-kiblnd_reply(struct lnet_ni *ni, kib_rx_t *rx, struct lnet_msg *lntmsg)
+kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
{
- struct lnet_process_id target = lntmsg->msg_target;
- unsigned int niov = lntmsg->msg_niov;
- struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
- unsigned int offset = lntmsg->msg_offset;
- unsigned int nob = lntmsg->msg_len;
- kib_tx_t *tx;
- int rc;
+ struct lnet_processid *target = &lntmsg->msg_target;
+ unsigned int niov = lntmsg->msg_niov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
+ unsigned int offset = lntmsg->msg_offset;
+ unsigned int nob = lntmsg->msg_len;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
+ struct kib_tx *tx;
+ int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't get tx for REPLY to %s\n",
- libcfs_nid2str(target.nid));
- goto failed_0;
- }
+ if (tx == NULL) {
+ CERROR("Can't get tx for REPLY to %s\n",
+ libcfs_nidstr(&target->nid));
+ goto failed_0;
+ }
- if (nob == 0)
- rc = 0;
- else if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd,
- niov, iov, offset, nob);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- niov, kiov, offset, nob);
+ tx->tx_gpu = lnet_md_is_gpu(msg_md);
- if (rc != 0) {
- CERROR("Can't setup GET src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
+ if (nob == 0)
+ rc = 0;
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
+ niov, kiov, offset, nob);
- rc = kiblnd_init_rdma(rx->rx_conn, tx,
- IBLND_MSG_GET_DONE, nob,
- &rx->rx_msg->ibm_u.get.ibgm_rd,
- rx->rx_msg->ibm_u.get.ibgm_cookie);
- if (rc < 0) {
- CERROR("Can't setup rdma for GET from %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
-
- if (nob == 0) {
- /* No RDMA: local completion may happen now! */
- lnet_finalize(ni, lntmsg, 0);
- } else {
- /* RDMA: lnet_finalize(lntmsg) when it
- * completes */
- tx->tx_lntmsg[0] = lntmsg;
- }
+ if (rc != 0) {
+ CERROR("Can't setup GET src for %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ goto failed_1;
+ }
- kiblnd_queue_tx(tx, rx->rx_conn);
- return;
+ rc = kiblnd_init_rdma(rx->rx_conn, tx,
+ IBLND_MSG_GET_DONE, nob,
+ &rx->rx_msg->ibm_u.get.ibgm_rd,
+ rx->rx_msg->ibm_u.get.ibgm_cookie);
+ if (rc < 0) {
+ CERROR("Can't setup rdma for GET from %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ goto failed_1;
+ }
+
+ if (nob == 0) {
+ /* No RDMA: local completion may happen now! */
+ lnet_finalize(lntmsg, 0);
+ } else {
+ /* RDMA: lnet_finalize(lntmsg) when it
+ * completes */
+ tx->tx_lntmsg[0] = lntmsg;
+ }
+
+ kiblnd_queue_tx(tx, rx->rx_conn);
+ return;
+
+
+failed_1:
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ kiblnd_tx_done(tx);
+failed_0:
+ lnet_finalize(lntmsg, -EIO);
+}
+
+unsigned int
+kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx)
+{
+ struct kib_net *net = ni->ni_data;
+ struct device *dev = NULL;
+
+ if (net)
+ dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device;
+
+ return lnet_get_dev_prio(dev, dev_idx);
- failed_1:
- kiblnd_tx_done(ni, tx);
- failed_0:
- lnet_finalize(ni, lntmsg, -EIO);
}
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ int delayed, unsigned int niov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kib_rx_t *rx = private;
- kib_msg_t *rxmsg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- kib_tx_t *tx;
+ struct kib_rx *rx = private;
+ struct kib_msg *rxmsg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_tx *tx;
+ __u64 ibprm_cookie;
int nob;
int post_credit = IBLND_POSTRX_PEER_CREDIT;
int rc = 0;
LASSERT (mlen <= rlen);
LASSERT (!in_interrupt());
- /* Either all pages or all vaddrs */
- LASSERT (!(kiov != NULL && iov != NULL));
switch (rxmsg->ibm_type) {
default:
LBUG();
+ /* fallthrough */
+ case IBLND_MSG_IMMEDIATE:
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
+ if (nob > rx->rx_nob) {
+ CERROR("Immediate message from %s too big: %d(%d)\n",
+ libcfs_nidstr(&lntmsg->msg_hdr.src_nid),
+ nob, rx->rx_nob);
+ rc = -EPROTO;
+ break;
+ }
- case IBLND_MSG_IMMEDIATE:
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
- if (nob > rx->rx_nob) {
- CERROR ("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
- rc = -EPROTO;
- break;
- }
-
- if (kiov != NULL)
- lnet_copy_flat2kiov(niov, kiov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- mlen);
- else
- lnet_copy_flat2iov(niov, iov, offset,
- IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
- mlen);
- lnet_finalize (ni, lntmsg, 0);
- break;
+ lnet_copy_flat2kiov(niov, kiov, offset,
+ IBLND_MSG_SIZE, rxmsg,
+ offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload),
+ mlen);
+ lnet_finalize(lntmsg, 0);
+ break;
case IBLND_MSG_PUT_REQ: {
- kib_msg_t *txmsg;
- kib_rdma_desc_t *rd;
-
- if (mlen == 0) {
- lnet_finalize(ni, lntmsg, 0);
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0,
- rxmsg->ibm_u.putreq.ibprm_cookie);
- break;
- }
+ struct kib_msg *txmsg;
+ struct kib_rdma_desc *rd;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
+
+ ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+ if (mlen == 0) {
+ lnet_finalize(lntmsg, 0);
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+ 0, ibprm_cookie);
+ break;
+ }
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't allocate tx for %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- /* Not replying will break the connection */
- rc = -ENOMEM;
- break;
- }
+ if (tx == NULL) {
+ CERROR("Can't allocate tx for %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ /* Not replying will break the connection */
+ rc = -ENOMEM;
+ break;
+ }
+
+ tx->tx_gpu = lnet_md_is_gpu(msg_md);
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
- if (kiov == NULL)
- rc = kiblnd_setup_rd_iov(ni, tx, rd,
- niov, iov, offset, mlen);
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- niov, kiov, offset, mlen);
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ niov, kiov, offset, mlen);
if (rc != 0) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_tx_done(ni, tx);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ kiblnd_tx_done(tx);
/* tell peer_ni it's over */
- kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc,
- rxmsg->ibm_u.putreq.ibprm_cookie);
+ kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
+ rc, ibprm_cookie);
break;
}
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
- txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+ nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
+ txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob);
return rc;
}
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
static void
kiblnd_thread_fini (void)
{
}
static void
-kiblnd_peer_alive (kib_peer_ni_t *peer_ni)
+kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
{
- /* This is racy, but everyone's only writing cfs_time_current() */
- peer_ni->ibp_last_alive = cfs_time_current();
+ /* This is racy, but everyone's only writing ktime_get_seconds() */
+ peer_ni->ibp_last_alive = ktime_get_seconds();
smp_mb();
}
static void
-kiblnd_peer_notify (kib_peer_ni_t *peer_ni)
+kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
{
- int error = 0;
- cfs_time_t last_alive = 0;
- unsigned long flags;
+ int error = 0;
+ time64_t last_alive = 0;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
- error = peer_ni->ibp_error;
- peer_ni->ibp_error = 0;
+ error = peer_ni->ibp_error;
+ peer_ni->ibp_error = 0;
- last_alive = peer_ni->ibp_last_alive;
- }
+ last_alive = peer_ni->ibp_last_alive;
+ }
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (error != 0)
- lnet_notify(peer_ni->ibp_ni,
- peer_ni->ibp_nid, 0, last_alive);
+ if (error != 0) {
+ struct lnet_nid nid;
+
+ lnet_nid4_to_nid(peer_ni->ibp_nid, &nid);
+ lnet_notify(peer_ni->ibp_ni, &nid,
+ false, false, last_alive);
+ }
}
void
-kiblnd_close_conn_locked (kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
{
/* This just does the immediate housekeeping. 'error' is zero for a
* normal shutdown which can happen only after the connection has been
* connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context */
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- kib_dev_t *dev;
- unsigned long flags;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct kib_dev *dev;
+ unsigned long flags;
LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
list_empty(&conn->ibc_tx_queue) &&
list_empty(&conn->ibc_tx_queue_rsrvd) &&
list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
- CDEBUG(D_NET, "closing conn to %s\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- } else {
- CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer_ni->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ?
+ list_empty(&conn->ibc_active_txs))
+ CDEBUG(D_NET, "closing conn %p to %s\n",
+ conn,
+ libcfs_nid2str(peer_ni->ibp_nid));
+ else
+ CNETERR("Closing conn %p to %s: error %d%s%s%s%s%s\n",
+ conn,
+ libcfs_nid2str(peer_ni->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ?
+ "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ?
"" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ?
- "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
- }
-
- dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev;
+ list_empty(&conn->ibc_tx_queue_nocred) ?
+ "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
+
+ dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
+ if (peer_ni->ibp_next_conn == conn)
+ /* clear next_conn so it won't be used */
+ peer_ni->ibp_next_conn = NULL;
list_del(&conn->ibc_list);
- /* connd (see below) takes over ibc_list's ref */
+ /* connd (see below) takes over ibc_list's ref */
if (list_empty(&peer_ni->ibp_conns) && /* no more conns */
kiblnd_peer_active(peer_ni)) { /* still in peer_ni table */
}
void
-kiblnd_close_conn(kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
{
unsigned long flags;
}
static void
-kiblnd_handle_early_rxs(kib_conn_t *conn)
+kiblnd_handle_early_rxs(struct kib_conn *conn)
{
- unsigned long flags;
- kib_rx_t *rx;
+ unsigned long flags;
+ struct kib_rx *rx;
LASSERT(!in_interrupt());
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- kib_rx_t, rx_list);
+ while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+ struct kib_rx,
+ rx_list)) != NULL) {
list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
}
-static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+void
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- struct list_head *tmp;
- struct list_head *nxt;
- kib_tx_t *tx;
+ LIST_HEAD(zombies);
+ struct kib_tx *nxt;
+ struct kib_tx *tx;
spin_lock(&conn->ibc_lock);
- list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, kib_tx_t, tx_list);
-
+ list_for_each_entry_safe(tx, nxt, txs, tx_list) {
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_waiting ||
tx->tx_sending != 0);
+ if (conn->ibc_comms_error == -ETIMEDOUT) {
+ if (tx->tx_waiting && !tx->tx_sending)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_REMOTE_TIMEOUT;
+ else if (tx->tx_sending)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_NETWORK_TIMEOUT;
+ }
} else {
LASSERT(tx->tx_queued);
+ if (conn->ibc_comms_error == -ETIMEDOUT)
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
+ else
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
+ }
+
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
+
+ /*
+ * TODO: This makes an assumption that
+ * kiblnd_tx_complete() will be called for each tx. If
+ * that event is dropped we could end up with stale
+ * connections floating around. We'd like to deal with
+ * that in a better way.
+ *
+ * Also that means we can exceed the timeout by many
+ * seconds.
+ */
+ if (tx->tx_sending == 0) {
+ tx->tx_queued = 0;
+ list_move(&tx->tx_list, &zombies);
+ } else {
+ /* keep tx until cq destroy */
+ list_move(&tx->tx_list, &conn->ibc_zombie_txs);
+ conn->ibc_waits ++;
}
+ }
+
+ spin_unlock(&conn->ibc_lock);
+
+ /*
+ * aborting transmits occurs when finalizing the connection.
+ * The connection is finalized on error.
+ * Passing LNET_MSG_STATUS_OK to txlist_done() will not
+ * override the value already set in tx->tx_hstatus above.
+ */
+ kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
+}
+
+static bool
+kiblnd_tx_may_discard(struct kib_conn *conn)
+{
+ bool rc = false;
+ struct kib_tx *nxt;
+ struct kib_tx *tx;
- tx->tx_status = -ECONNABORTED;
- tx->tx_waiting = 0;
+ spin_lock(&conn->ibc_lock);
- if (tx->tx_sending == 0) {
- tx->tx_queued = 0;
- list_del(&tx->tx_list);
- list_add(&tx->tx_list, &zombies);
+ list_for_each_entry_safe(tx, nxt, &conn->ibc_zombie_txs, tx_list) {
+ if (tx->tx_sending > 0 && tx->tx_lntmsg[0] &&
+ lnet_md_discarded(tx->tx_lntmsg[0]->msg_md)) {
+ tx->tx_sending --;
+ if (tx->tx_sending == 0) {
+ kiblnd_conn_decref(tx->tx_conn);
+ tx->tx_conn = NULL;
+ rc = true;
+ }
}
}
spin_unlock(&conn->ibc_lock);
-
- kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED);
+ return rc;
}
static void
-kiblnd_finalise_conn (kib_conn_t *conn)
+kiblnd_finalise_conn(struct kib_conn *conn)
{
LASSERT (!in_interrupt());
LASSERT (conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
-
/* abort_receives moves QP state to IB_QPS_ERR. This is only required
* for connections that didn't get as far as being connected, because
* rdma_disconnect() does this for free. */
kiblnd_abort_receives(conn);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+
/* Complete all tx descs not waiting for sends to complete.
* NB we should be safe from RDMA now that the QP has changed state */
+ CDEBUG(D_NET, "abort connection with %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
+
kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
}
static void
-kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error)
+kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+ int error)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- unsigned long flags;
+ LIST_HEAD(zombies);
+ unsigned long flags;
+ enum lnet_msg_hstatus hstatus;
- LASSERT (error != 0);
- LASSERT (!in_interrupt());
+ LASSERT(error != 0);
+ LASSERT(!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
peer_ni->ibp_reconnected = 0;
if (list_empty(&peer_ni->ibp_conns)) {
/* Take peer_ni's blocked transmits to complete with error */
- list_add(&zombies, &peer_ni->ibp_tx_queue);
- list_del_init(&peer_ni->ibp_tx_queue);
+ list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
if (kiblnd_peer_active(peer_ni))
kiblnd_unlink_peer_locked(peer_ni);
CNETERR("Deleting messages for %s: connection failed\n",
libcfs_nid2str(peer_ni->ibp_nid));
- kiblnd_txlist_done(peer_ni->ibp_ni, &zombies, -EHOSTUNREACH);
+ switch (error) {
+ case -EHOSTUNREACH:
+ case -ETIMEDOUT:
+ hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
+ break;
+ case -ECONNREFUSED:
+ hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
+ break;
+ default:
+ hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
+ break;
+ }
+
+ kiblnd_txlist_done(&zombies, error, hstatus);
}
static void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+kiblnd_connreq_done(struct kib_conn *conn, int status)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- kib_tx_t *tx;
- struct list_head txs;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct kib_tx *tx;
+ LIST_HEAD(txs);
unsigned long flags;
int active;
(conn->ibc_state == IBLND_CONN_PASSIVE_WAIT &&
peer_ni->ibp_accepting > 0));
- LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
- conn->ibc_connvars = NULL;
+ LIBCFS_FREE(conn->ibc_connvars, sizeof(*conn->ibc_connvars));
+ conn->ibc_connvars = NULL;
- if (status != 0) {
- /* failed to establish connection */
- kiblnd_peer_connect_failed(peer_ni, active, status);
- kiblnd_finalise_conn(conn);
- return;
- }
+ if (status != 0) {
+ /* failed to establish connection */
+ kiblnd_peer_connect_failed(peer_ni, active, status);
+ kiblnd_finalise_conn(conn);
+ return;
+ }
- /* connection established */
+ /* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- conn->ibc_last_send = jiffies;
- kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
- kiblnd_peer_alive(peer_ni);
+ conn->ibc_last_send = ktime_get();
+ kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
+ kiblnd_peer_alive(peer_ni);
/* Add conn to peer_ni's list and nuke any dangling conns from a different
* peer_ni instance... */
}
/* grab pending txs while I have the lock */
- list_add(&txs, &peer_ni->ibp_tx_queue);
- list_del_init(&peer_ni->ibp_tx_queue);
+ list_splice_init(&peer_ni->ibp_tx_queue, &txs);
if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */
conn->ibc_comms_error != 0) { /* error has happened already */
- struct lnet_ni *ni = peer_ni->ibp_ni;
/* start to shut down connection */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &txs, -ECONNABORTED);
+ kiblnd_txlist_done(&txs, -ECONNABORTED,
+ LNET_MSG_STATUS_LOCAL_ERROR);
return;
}
kiblnd_conn_addref(conn);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* Schedule blocked txs */
+ /* Schedule blocked txs
+ * Note: if we are running with conns_per_peer > 1, these blocked
+ * txs will all get scheduled to the first connection which gets
+ * scheduled. We won't be using round robin on this first batch.
+ */
spin_lock(&conn->ibc_lock);
- while (!list_empty(&txs)) {
- tx = list_entry(txs.next, kib_tx_t, tx_list);
+ while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
}
static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
{
int rc;
+#ifdef HAVE_OFED_RDMA_REJECT_4ARGS
+ rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
+#else
rc = rdma_reject(cmid, rej, sizeof(*rej));
+#endif
if (rc != 0)
CWARN("Error %d sending reject\n", rc);
static int
kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- kib_msg_t *reqmsg = priv;
- kib_msg_t *ackmsg;
- kib_dev_t *ibdev;
- kib_peer_ni_t *peer_ni;
- kib_peer_ni_t *peer2;
- kib_conn_t *conn;
- struct lnet_ni *ni = NULL;
- kib_net_t *net = NULL;
- lnet_nid_t nid;
- struct rdma_conn_param cp;
- kib_rej_t rej;
- int version = IBLND_MSG_VERSION;
- unsigned long flags;
- int rc;
- struct sockaddr_in *peer_addr;
- LASSERT (!in_interrupt());
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ struct kib_msg *reqmsg = priv;
+ struct kib_msg *ackmsg;
+ struct kib_dev *ibdev;
+ struct kib_peer_ni *peer_ni;
+ struct kib_peer_ni *peer2;
+ struct kib_conn *conn;
+ struct lnet_ni *ni = NULL;
+ struct kib_net *net = NULL;
+ struct lnet_nid destnid;
+ lnet_nid_t nid;
+ struct rdma_conn_param cp;
+ struct kib_rej rej;
+ int version = IBLND_MSG_VERSION;
+ unsigned long flags;
+ int rc;
+ struct sockaddr_in *peer_addr;
+ LASSERT(!in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT (ibdev != NULL);
+ ibdev = cmid->context;
+ LASSERT(ibdev);
- memset(&rej, 0, sizeof(rej));
- rej.ibr_magic = IBLND_MSG_MAGIC;
- rej.ibr_why = IBLND_REJECT_FATAL;
- rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
+ memset(&rej, 0, sizeof(rej));
+ rej.ibr_magic = IBLND_MSG_MAGIC;
+ rej.ibr_why = IBLND_REJECT_FATAL;
+ rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE;
- peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
- if (*kiblnd_tunables.kib_require_priv_port &&
- ntohs(peer_addr->sin_port) >= PROT_SOCK) {
+ peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr);
+ if (*kiblnd_tunables.kib_require_priv_port &&
+ ntohs(peer_addr->sin_port) >= PROT_SOCK) {
__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n",
&ip, ntohs(peer_addr->sin_port));
goto failed;
}
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
CERROR("Short connection request\n");
goto failed;
}
}
nid = reqmsg->ibm_srcnid;
- ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
+ lnet_nid4_to_nid(reqmsg->ibm_dstnid, &destnid);
+ ni = lnet_nid_to_ni_addref(&destnid);
if (ni != NULL) {
- net = (kib_net_t *)ni->ni_data;
+ net = (struct kib_net *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
+ } else {
+ if (ibdev->ibd_nnets == 0) {
+ rej.ibr_why = IBLND_REJECT_EARLY;
+ CNETERR("Can't accept conn from %s (%s:%d:%pI4h): net for nid %s not added yet\n",
+ libcfs_nid2str(nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+ goto failed;
+ }
+ list_for_each_entry(net, &ibdev->ibd_nets, ibn_list) {
+ if ((net->ibn_dev == ibdev) &&
+ (net->ibn_ni != NULL) &&
+ (net->ibn_ni->ni_state != LNET_NI_STATE_ACTIVE)) {
+ rej.ibr_why = IBLND_REJECT_EARLY;
+ CNETERR("Can't accept conn from %s on %s (%s:%d:%pI4h): nid %s not ready\n",
+ libcfs_nid2str(nid),
+ libcfs_nidstr(&net->ibn_ni->ni_nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+ goto failed;
+ }
+ }
}
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): "
- "bad dst nid %s\n", libcfs_nid2str(nid),
- ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
+ if (ni == NULL || /* no matching net */
+ !nid_same(&ni->ni_nid, &destnid) || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
+ CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ libcfs_nid2str(nid),
+ ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
ibdev->ibd_ifname, ibdev->ibd_nnets,
- &ibdev->ibd_ifip,
+ &ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
goto failed;
}
- /* check time stamp as soon as possible */
+ /* check time stamp as soon as possible */
if (reqmsg->ibm_dststamp != 0 &&
reqmsg->ibm_dststamp != net->ibn_incarnation) {
CWARN("Stale connection request\n");
if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
kiblnd_msg_queue_size(version, ni)) {
- CERROR("Can't accept conn from %s, queue depth too large: "
- " %d (<=%d wanted)\n",
+ CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
libcfs_nid2str(nid),
reqmsg->ibm_u.connparams.ibcp_queue_depth,
kiblnd_msg_queue_size(version, ni));
}
if (reqmsg->ibm_u.connparams.ibcp_max_frags >
- kiblnd_rdma_frags(version, ni)) {
- CWARN("Can't accept conn from %s (version %x): "
- "max_frags %d too large (%d wanted)\n",
+ IBLND_MAX_RDMA_FRAGS) {
+ CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- kiblnd_rdma_frags(version, ni));
+ IBLND_MAX_RDMA_FRAGS);
if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
} else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
- kiblnd_rdma_frags(version, ni) &&
+ IBLND_MAX_RDMA_FRAGS &&
net->ibn_fmr_ps == NULL) {
- CWARN("Can't accept conn from %s (version %x): "
- "max_frags %d incompatible without FMR pool "
- "(%d wanted)\n",
+ CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- kiblnd_rdma_frags(version, ni));
+ IBLND_MAX_RDMA_FRAGS);
if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
}
- if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
- CERROR("Can't accept %s: message size %d too big (%d max)\n",
- libcfs_nid2str(nid),
- reqmsg->ibm_u.connparams.ibcp_max_msg_size,
- IBLND_MSG_SIZE);
- goto failed;
- }
+ if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) {
+ CERROR("Can't accept %s: message size %d too big (%d max)\n",
+ libcfs_nid2str(nid),
+ reqmsg->ibm_u.connparams.ibcp_max_msg_size,
+ IBLND_MSG_SIZE);
+ goto failed;
+ }
/* assume 'nid' is a new peer_ni; create */
rc = kiblnd_create_peer(ni, &peer_ni, nid);
write_lock_irqsave(g_lock, flags);
- peer2 = kiblnd_find_peer_locked(ni, nid);
- if (peer2 != NULL) {
- if (peer2->ibp_version == 0) {
- peer2->ibp_version = version;
- peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
- }
+ peer2 = kiblnd_find_peer_locked(ni, nid);
+ if (peer2 != NULL) {
+ if (peer2->ibp_version == 0) {
+ peer2->ibp_version = version;
+ peer2->ibp_incarnation = reqmsg->ibm_srcstamp;
+ }
- /* not the guy I've talked with */
- if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
- peer2->ibp_version != version) {
+ /* not the guy I've talked with */
+ if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp ||
+ peer2->ibp_version != version) {
kiblnd_close_peer_conns_locked(peer2, -ESTALE);
if (kiblnd_peer_active(peer2)) {
libcfs_nid2str(nid), peer2->ibp_version, version,
peer2->ibp_incarnation, reqmsg->ibm_srcstamp);
- kiblnd_peer_decref(peer_ni);
- rej.ibr_why = IBLND_REJECT_CONN_STALE;
- goto failed;
- }
+ kiblnd_peer_decref(peer_ni);
+ rej.ibr_why = IBLND_REJECT_CONN_STALE;
+ goto failed;
+ }
/* Tie-break connection race in favour of the higher NID.
* If we keep running into a race condition multiple times,
* the lower NID connection win so we can move forward.
*/
if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
+ nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
peer2->ibp_queue_depth = peer_ni->ibp_queue_depth;
write_unlock_irqrestore(g_lock, flags);
- kiblnd_peer_decref(peer_ni);
- peer_ni = peer2;
- } else {
- /* Brand new peer_ni */
- LASSERT (peer_ni->ibp_accepting == 0);
- LASSERT (peer_ni->ibp_version == 0 &&
- peer_ni->ibp_incarnation == 0);
+ kiblnd_peer_decref(peer_ni);
+ peer_ni = peer2;
+ } else {
+ /* Brand new peer_ni */
+ LASSERT(peer_ni->ibp_accepting == 0);
+ LASSERT(peer_ni->ibp_version == 0 &&
+ peer_ni->ibp_incarnation == 0);
- peer_ni->ibp_accepting = 1;
- peer_ni->ibp_version = version;
- peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
+ peer_ni->ibp_accepting = 1;
+ peer_ni->ibp_version = version;
+ peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp;
- /* I have a ref on ni that prevents it being shutdown */
- LASSERT (net->ibn_shutdown == 0);
+ /* I have a ref on ni that prevents it being shutdown */
+ LASSERT(net->ibn_shutdown == 0);
- kiblnd_peer_addref(peer_ni);
- list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid));
+ kiblnd_peer_addref(peer_ni);
+ hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
write_unlock_irqrestore(g_lock, flags);
- }
+ }
- conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version);
- if (conn == NULL) {
- kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
- kiblnd_peer_decref(peer_ni);
- rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
- goto failed;
- }
+ conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT,
+ version);
+ if (!conn) {
+ kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM);
+ kiblnd_peer_decref(peer_ni);
+ rej.ibr_why = IBLND_REJECT_NO_RESOURCES;
+ goto failed;
+ }
- /* conn now "owns" cmid, so I return success from here on to ensure the
- * CM callback doesn't destroy cmid. */
+ /* conn now "owns" cmid, so I return success from here on to ensure the
+ * CM callback doesn't destroy cmid.
+ */
conn->ibc_incarnation = reqmsg->ibm_srcstamp;
conn->ibc_credits = conn->ibc_queue_depth;
conn->ibc_reserved_credits = conn->ibc_queue_depth;
LASSERT(conn->ibc_credits + conn->ibc_reserved_credits +
IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn));
- ackmsg = &conn->ibc_connvars->cv_msg;
- memset(ackmsg, 0, sizeof(*ackmsg));
+ ackmsg = &conn->ibc_connvars->cv_msg;
+ memset(ackmsg, 0, sizeof(*ackmsg));
- kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
- sizeof(ackmsg->ibm_u.connparams));
+ kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK,
+ sizeof(ackmsg->ibm_u.connparams));
ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth;
ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags;
ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE;
- kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
+ kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp);
- memset(&cp, 0, sizeof(cp));
- cp.private_data = ackmsg;
- cp.private_data_len = ackmsg->ibm_nob;
- cp.responder_resources = 0; /* No atomic ops or RDMA reads */
- cp.initiator_depth = 0;
- cp.flow_control = 1;
- cp.retry_count = *kiblnd_tunables.kib_retry_count;
- cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
+ memset(&cp, 0, sizeof(cp));
+ cp.private_data = ackmsg;
+ cp.private_data_len = ackmsg->ibm_nob;
+ cp.responder_resources = 0; /* No atomic ops or RDMA reads */
+ cp.initiator_depth = 0;
+ cp.flow_control = 1;
+ cp.retry_count = *kiblnd_tunables.kib_retry_count;
+ cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
- CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
+ CDEBUG(D_NET, "Accept %s conn %p\n", libcfs_nid2str(nid), conn);
- rc = rdma_accept(cmid, &cp);
- if (rc != 0) {
- CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
- rej.ibr_version = version;
- rej.ibr_why = IBLND_REJECT_FATAL;
+ rc = rdma_accept(cmid, &cp);
+ if (rc != 0) {
+ CNETERR("Can't accept %s: %d cm_id %p\n", libcfs_nid2str(nid), rc, cmid);
+ rej.ibr_version = version;
+ rej.ibr_why = IBLND_REJECT_FATAL;
- kiblnd_reject(cmid, &rej);
- kiblnd_connreq_done(conn, rc);
- kiblnd_conn_decref(conn);
- }
+ kiblnd_reject(cmid, &rej);
+ kiblnd_connreq_done(conn, rc);
+ kiblnd_conn_decref(conn);
+ }
- lnet_ni_decref(ni);
- return 0;
+ lnet_ni_decref(ni);
+ return 0;
failed:
if (ni != NULL) {
rej.ibr_cp.ibcp_queue_depth =
kiblnd_msg_queue_size(version, ni);
- rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+ rej.ibr_cp.ibcp_max_frags = IBLND_MAX_RDMA_FRAGS;
lnet_ni_decref(ni);
}
}
static void
-kiblnd_check_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+ u64 incarnation, int why, struct kib_connparams *cp)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
char *reason;
int msg_size = IBLND_MSG_SIZE;
int frag_num = -1;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
LASSERT(peer_ni->ibp_connecting > 0); /* 'conn' at least */
- LASSERT(!peer_ni->ibp_reconnecting);
if (cp) {
msg_size = cp->ibcp_max_msg_size;
}
write_lock_irqsave(glock, flags);
- /* retry connection if it's still needed and no other connection
- * attempts (active or passive) are in progress
- * NB: reconnect is still needed even when ibp_tx_queue is
- * empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query() */
+ /* retry connection if it's still needed and no other connection
+ * attempts (active or passive) are in progress
+ * NB: reconnect is still needed even when ibp_tx_queue is
+ * empty if ibp_version != version because reconnect may be
+ * initiated.
+ */
reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
peer_ni->ibp_version != version) &&
- peer_ni->ibp_connecting == 1 &&
+ peer_ni->ibp_connecting &&
peer_ni->ibp_accepting == 0;
if (!reconnect) {
reason = "no need";
goto out;
}
- switch (why) {
- default:
- reason = "Unknown";
- break;
+ switch (why) {
+ default:
+ reason = "Unknown";
+ break;
case IBLND_REJECT_RDMA_FRAGS: {
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
- if (!tunables->lnd_map_on_demand) {
- reason = "map_on_demand must be enabled";
- goto out;
- }
+
if (conn->ibc_max_frags <= frag_num) {
reason = "unsupported max frags";
goto out;
case IBLND_REJECT_CONN_UNCOMPAT:
reason = "version negotiation";
break;
-
- case IBLND_REJECT_INVALID_SRV_ID:
- reason = "invalid service id";
- break;
}
conn->ibc_reconnect = 1;
- peer_ni->ibp_reconnecting = 1;
+ peer_ni->ibp_reconnecting++;
peer_ni->ibp_version = version;
if (incarnation != 0)
peer_ni->ibp_incarnation = incarnation;
}
static void
-kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ int status = -ECONNREFUSED;
LASSERT (!in_interrupt());
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
IBLND_REJECT_CONN_STALE, NULL);
break;
- case IB_CM_REJ_INVALID_SERVICE_ID:
- kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_INVALID_SRV_ID, NULL);
- CNETERR("%s rejected: no listener at %d\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- *kiblnd_tunables.kib_service);
- break;
+ case IB_CM_REJ_INVALID_SERVICE_ID:
+ status = -EHOSTUNREACH;
+ CNETERR("%s rejected: no listener at %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ *kiblnd_tunables.kib_service);
+ break;
+
+ case IB_CM_REJ_CONSUMER_DEFINED:
+ if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+ struct kib_rej *rej = priv;
+ struct kib_connparams *cp = NULL;
+ bool flip = false;
+ __u64 incarnation = -1;
+
+ /* NB. default incarnation is -1 because:
+ * a) V1 will ignore dst incarnation in connreq.
+ * b) V2 will provide incarnation while rejecting me,
+ * -1 will be overwrote.
+ *
+ * if I try to connect to a V1 peer_ni with V2 protocol,
+ * it rejected me then upgrade to V2, I have no idea
+ * about the upgrading and try to reconnect with V1,
+ * in this case upgraded V2 can find out I'm trying to
+ * talk to the old guy and reject me(incarnation is -1).
+ */
+
+ if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
+ rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
+ __swab32s(&rej->ibr_magic);
+ __swab16s(&rej->ibr_version);
+ flip = true;
+ }
+
+ if (priv_nob >= sizeof(struct kib_rej) &&
+ rej->ibr_version > IBLND_MSG_VERSION_1) {
+ /* priv_nob is always 148 in current version
+ * of OFED, so we still need to check version.
+ * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+ */
+ cp = &rej->ibr_cp;
+
+ if (flip) {
+ __swab64s(&rej->ibr_incarnation);
+ __swab16s(&cp->ibcp_queue_depth);
+ __swab16s(&cp->ibcp_max_frags);
+ __swab32s(&cp->ibcp_max_msg_size);
+ }
+
+ incarnation = rej->ibr_incarnation;
+ }
+
+ if (rej->ibr_magic != IBLND_MSG_MAGIC &&
+ rej->ibr_magic != LNET_PROTO_MAGIC) {
+ CERROR("%s rejected: consumer defined fatal error\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+ }
+
+ if (rej->ibr_version != IBLND_MSG_VERSION &&
+ rej->ibr_version != IBLND_MSG_VERSION_1) {
+ CERROR("%s rejected: o2iblnd version %x error\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_version);
+ break;
+ }
+
+ if (rej->ibr_why == IBLND_REJECT_FATAL &&
+ rej->ibr_version == IBLND_MSG_VERSION_1) {
+ CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_version);
+
+ if (conn->ibc_version != IBLND_MSG_VERSION_1)
+ rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+ }
- case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
- kib_rej_t *rej = priv;
- kib_connparams_t *cp = NULL;
- int flip = 0;
- __u64 incarnation = -1;
-
- /* NB. default incarnation is -1 because:
- * a) V1 will ignore dst incarnation in connreq.
- * b) V2 will provide incarnation while rejecting me,
- * -1 will be overwrote.
- *
- * if I try to connect to a V1 peer_ni with V2 protocol,
- * it rejected me then upgrade to V2, I have no idea
- * about the upgrading and try to reconnect with V1,
- * in this case upgraded V2 can find out I'm trying to
- * talk to the old guy and reject me(incarnation is -1).
- */
-
- if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
- rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
- __swab32s(&rej->ibr_magic);
- __swab16s(&rej->ibr_version);
- flip = 1;
- }
-
- if (priv_nob >= sizeof(kib_rej_t) &&
- rej->ibr_version > IBLND_MSG_VERSION_1) {
- /* priv_nob is always 148 in current version
- * of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
- cp = &rej->ibr_cp;
-
- if (flip) {
- __swab64s(&rej->ibr_incarnation);
- __swab16s(&cp->ibcp_queue_depth);
- __swab16s(&cp->ibcp_max_frags);
- __swab32s(&cp->ibcp_max_msg_size);
- }
-
- incarnation = rej->ibr_incarnation;
- }
-
- if (rej->ibr_magic != IBLND_MSG_MAGIC &&
- rej->ibr_magic != LNET_PROTO_MAGIC) {
- CERROR("%s rejected: consumer defined fatal error\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
- }
-
- if (rej->ibr_version != IBLND_MSG_VERSION &&
- rej->ibr_version != IBLND_MSG_VERSION_1) {
- CERROR("%s rejected: o2iblnd version %x error\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- rej->ibr_version);
- break;
- }
-
- if (rej->ibr_why == IBLND_REJECT_FATAL &&
- rej->ibr_version == IBLND_MSG_VERSION_1) {
- CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
- libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
-
- if (conn->ibc_version != IBLND_MSG_VERSION_1)
- rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
- }
-
- switch (rej->ibr_why) {
- case IBLND_REJECT_CONN_RACE:
- case IBLND_REJECT_CONN_STALE:
- case IBLND_REJECT_CONN_UNCOMPAT:
+ switch (rej->ibr_why) {
+ case IBLND_REJECT_CONN_RACE:
+ case IBLND_REJECT_CONN_STALE:
+ case IBLND_REJECT_CONN_UNCOMPAT:
case IBLND_REJECT_MSG_QUEUE_SIZE:
case IBLND_REJECT_RDMA_FRAGS:
kiblnd_check_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
- case IBLND_REJECT_NO_RESOURCES:
- CERROR("%s rejected: o2iblnd no resources\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
-
- case IBLND_REJECT_FATAL:
- CERROR("%s rejected: o2iblnd fatal error\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
-
- default:
- CERROR("%s rejected: o2iblnd reason %d\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- rej->ibr_why);
- break;
- }
- break;
- }
- /* fall through */
- default:
- CNETERR("%s rejected: reason %d, size %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
- break;
- }
+ incarnation,
+ rej->ibr_why, cp);
+ break;
+
+ case IBLND_REJECT_NO_RESOURCES:
+ CERROR("%s rejected: o2iblnd no resources\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
+ case IBLND_REJECT_FATAL:
+ CERROR("%s rejected: o2iblnd fatal error\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
+ case IBLND_REJECT_EARLY:
+ CNETERR("%s rejected: tried too early\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
+ default:
+ CERROR("%s rejected: o2iblnd reason %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_why);
+ break;
+ }
+ break;
+ }
+ fallthrough;
+ default:
+ CNETERR("%s rejected: reason %d, size %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
+ break;
+ }
- kiblnd_connreq_done(conn, -ECONNREFUSED);
+ kiblnd_connreq_done(conn, status);
}
static void
-kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- struct lnet_ni *ni = peer_ni->ibp_ni;
- kib_net_t *net = ni->ni_data;
- kib_msg_t *msg = priv;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_net *net = ni->ni_data;
+ struct kib_msg *msg = priv;
int ver = conn->ibc_version;
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
}
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
+ if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
}
static int
-kiblnd_active_connect (struct rdma_cm_id *cmid)
+kiblnd_active_connect(struct rdma_cm_id *cmid)
{
- kib_peer_ni_t *peer_ni = (kib_peer_ni_t *)cmid->context;
- kib_conn_t *conn;
- kib_msg_t *msg;
- struct rdma_conn_param cp;
+ struct kib_peer_ni *peer_ni = cmid->context;
+ struct kib_conn *conn;
+ struct kib_msg *msg;
+ struct rdma_conn_param cp;
int version;
__u64 incarnation;
unsigned long flags;
LASSERT(cmid->context == (void *)conn);
LASSERT(conn->ibc_cmid == cmid);
-
- rc = rdma_connect(cmid, &cp);
+ rc = rdma_connect_locked(cmid, &cp);
if (rc != 0) {
- CERROR("Can't connect to %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
+ CNETERR("Can't connect to %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc, cmid);
kiblnd_connreq_done(conn, rc);
kiblnd_conn_decref(conn);
- }
+ } else {
+ CDEBUG(D_NET, "Connected to %s: cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), cmid);
+ }
return 0;
}
+/* set the IP ToS ("Type of Service") used by the RoCE QoS */
+static void
+kiblnd_set_tos(struct rdma_cm_id *cmid)
+{
+ struct kib_peer_ni *peer_ni = cmid->context;
+ struct lnet_ioctl_config_o2iblnd_tunables *t;
+
+ t = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ if (t->lnd_tos < 0)
+ return;
+
+ rdma_set_service_type(cmid, t->lnd_tos);
+}
+
int
kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
{
- kib_peer_ni_t *peer_ni;
- kib_conn_t *conn;
- int rc;
+ struct kib_peer_ni *peer_ni;
+ struct kib_conn *conn;
+ int rc;
switch (event->event) {
default:
case RDMA_CM_EVENT_CONNECT_REQUEST:
/* destroy cmid on failure */
- rc = kiblnd_passive_connect(cmid,
+ rc = kiblnd_passive_connect(cmid,
(void *)KIBLND_CONN_PARAM(event),
KIBLND_CONN_PARAM_LEN(event));
- CDEBUG(D_NET, "connreq: %d\n", rc);
+ CDEBUG(D_NET, "connreq: %d cm_id %p\n", rc, cmid);
return rc;
-
+
case RDMA_CM_EVENT_ADDR_ERROR:
- peer_ni = (kib_peer_ni_t *)cmid->context;
- CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ peer_ni = cmid->context;
+ CNETERR("%s: ADDR ERROR %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer_ni);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
- CDEBUG(D_NET,"%s Addr resolved: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CDEBUG(D_NET, "%s Addr resolved: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
if (event->status != 0) {
- CNETERR("Can't resolve address for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("Can't resolve address for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ event->status, cmid);
rc = event->status;
- } else {
- rc = rdma_resolve_route(
- cmid, *kiblnd_tunables.kib_timeout * 1000);
- if (rc == 0)
- return 0;
+ } else {
+ kiblnd_set_tos(cmid);
+ rc = rdma_resolve_route(
+ cmid, kiblnd_timeout() * 1000);
+ if (rc == 0) {
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev = net->ibn_dev;
+
+ CDEBUG(D_NET, "%s: connection bound to "\
+ "%s:%pI4h:%s\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ dev->ibd_ifname,
+ &dev->ibd_ifip, cmid->device->name);
+
+ return 0;
+ }
+
/* Can't initiate route resolution */
- CERROR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
+ CNETERR("Can't resolve route for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc, cmid);
}
kiblnd_peer_connect_failed(peer_ni, 1, rc);
kiblnd_peer_decref(peer_ni);
return rc; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
- peer_ni = (kib_peer_ni_t *)cmid->context;
- CNETERR("%s: ROUTE ERROR %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ peer_ni = cmid->context;
+ CNETERR("%s: ROUTE ERROR %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer_ni);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
CDEBUG(D_NET,"%s Route resolved: %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
if (event->status == 0)
return kiblnd_active_connect(cmid);
- CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("Can't resolve route for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, event->status);
kiblnd_peer_decref(peer_ni);
return event->status; /* rc != 0 destroys cmid */
-
+
case RDMA_CM_EVENT_UNREACHABLE:
- conn = (kib_conn_t *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENETDOWN);
- kiblnd_conn_decref(conn);
+ conn = cmid->context;
+ CNETERR("%s: UNREACHABLE %d cm_id %p conn %p ibc_state: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid, conn, conn->ibc_state);
+ LASSERT(conn->ibc_state != IBLND_CONN_INIT);
+ if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) {
+ kiblnd_connreq_done(conn, -ENETDOWN);
+ kiblnd_conn_decref(conn);
+ }
return 0;
case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (kib_conn_t *)cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: CONNECT ERROR %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENOTCONN);
- kiblnd_conn_decref(conn);
- return 0;
+ conn = cmid->context;
+ CNETERR("%s: CONNECT ERROR %d cm_id %p conn %p state: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid, conn, conn->ibc_state);
+ if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) {
+ kiblnd_connreq_done(conn, -ENOTCONN);
+ kiblnd_conn_decref(conn);
+ }
+ return 0;
case RDMA_CM_EVENT_REJECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
case IBLND_CONN_PASSIVE_WAIT:
- CERROR ("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
+ CERROR("%s: REJECTED %d cm_id %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid);
kiblnd_connreq_done(conn, -ECONNRESET);
break;
return 0;
case RDMA_CM_EVENT_ESTABLISHED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
case IBLND_CONN_PASSIVE_WAIT:
- CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "ESTABLISHED (passive): %s cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_connreq_done(conn, 0);
break;
case IBLND_CONN_ACTIVE_CONNECT:
- CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "ESTABLISHED(active): %s cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_check_connreply(conn,
(void *)KIBLND_CONN_PARAM(event),
KIBLND_CONN_PARAM_LEN(event));
return 0;
case RDMA_CM_EVENT_DISCONNECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CERROR("%s DISCONNECTED\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CERROR("%s DISCONNECTED cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_connreq_done(conn, -ECONNRESET);
} else {
kiblnd_close_conn(conn, 0);
}
static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
- kib_tx_t *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, kib_tx_t, tx_list);
+ struct kib_tx *tx;
+ list_for_each_entry(tx, txs, tx_list) {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
LASSERT(tx->tx_waiting || tx->tx_sending != 0);
}
- if (cfs_time_aftereq(jiffies, tx->tx_deadline)) {
- CERROR("Timed out tx: %s, %lu seconds\n",
+ if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+ CERROR("Timed out tx: %s(WSQ:%d%d%d), %lld seconds\n",
kiblnd_queue2str(conn, txs),
- cfs_duration_sec(jiffies - tx->tx_deadline));
+ tx->tx_waiting, tx->tx_sending, tx->tx_queued,
+ kiblnd_timeout() +
+ ktime_ms_delta(ktime_get(),
+ tx->tx_deadline) / MSEC_PER_SEC);
return 1;
}
}
}
static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
{
return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
static void
kiblnd_check_conns (int idx)
{
- struct list_head closes = LIST_HEAD_INIT(closes);
- struct list_head checksends = LIST_HEAD_INIT(checksends);
- struct list_head *peers = &kiblnd_data.kib_peers[idx];
- struct list_head *ptmp;
- kib_peer_ni_t *peer_ni;
- kib_conn_t *conn;
- struct list_head *ctmp;
- unsigned long flags;
+ LIST_HEAD(closes);
+ LIST_HEAD(checksends);
+ LIST_HEAD(timedout_txs);
+ struct hlist_head *peers = &kiblnd_data.kib_peers[idx];
+ struct kib_peer_ni *peer_ni;
+ struct kib_conn *conn;
+ struct kib_tx *tx, *tx_tmp;
+ unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
* RDMAs to time out, so we just use a shared lock while we
- * take a look... */
- read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ * take a look...
+ */
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- list_for_each(ptmp, peers) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ hlist_for_each_entry(peer_ni, peers, ibp_list) {
+ /* Check tx_deadline */
+ list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
+ if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) {
+ CWARN("Timed out tx for %s: %lld seconds\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ ktime_ms_delta(ktime_get(),
+ tx->tx_deadline) / MSEC_PER_SEC);
+ list_move(&tx->tx_list, &timedout_txs);
+ }
+ }
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
-
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
}
if (timedout) {
- CERROR("Timed out RDMA with %s (%lu): "
- "c: %u, oc: %u, rc: %u\n",
+ CERROR("Timed out RDMA with %s (%lld): c: %u, oc: %u, rc: %u\n",
libcfs_nid2str(peer_ni->ibp_nid),
- cfs_duration_sec(cfs_time_current() -
- peer_ni->ibp_last_alive),
+ ktime_get_seconds()
+ - peer_ni->ibp_last_alive,
conn->ibc_credits,
conn->ibc_outstanding_credits,
conn->ibc_reserved_credits);
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
list_add(&conn->ibc_connd_list, &closes);
} else {
list_add(&conn->ibc_connd_list, &checksends);
}
}
- read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ if (!list_empty(&timedout_txs))
+ kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
+ LNET_MSG_STATUS_NETWORK_TIMEOUT);
/* Handle timeout by closing the whole
* connection. We can only be sure RDMA activity
- * has ceased once the QP has been modified. */
- while (!list_empty(&closes)) {
- conn = list_entry(closes.next,
- kib_conn_t, ibc_connd_list);
+ * has ceased once the QP has been modified.
+ */
+ while ((conn = list_first_entry_or_null(&closes,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
/* In case we have enough credits to return via a
* NOOP, but there were no non-blocking tx descs
- * free to do it last time... */
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
+ * free to do it last time...
+ */
+ while ((conn = list_first_entry_or_null(&checksends,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
}
static void
-kiblnd_disconnect_conn (kib_conn_t *conn)
+kiblnd_disconnect_conn(struct kib_conn *conn)
{
LASSERT (!in_interrupt());
LASSERT (current == kiblnd_data.kib_connd);
LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
-
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
rdma_disconnect(conn->ibc_cmid);
kiblnd_finalise_conn(conn);
int
kiblnd_connd (void *arg)
{
- spinlock_t *lock= &kiblnd_data.kib_connd_lock;
- wait_queue_t wait;
- unsigned long flags;
- kib_conn_t *conn;
- int timeout;
- int i;
- int dropped_lock;
- int peer_index = 0;
- unsigned long deadline = jiffies;
-
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
+ spinlock_t *lock = &kiblnd_data.kib_connd_lock;
+ wait_queue_entry_t wait;
+ unsigned long flags;
+ struct kib_conn *conn;
+ int timeout;
+ int i;
+ bool dropped_lock;
+ int peer_index = 0;
+ unsigned long deadline = jiffies;
+
+ init_wait(&wait);
kiblnd_data.kib_connd = current;
spin_lock_irqsave(lock, flags);
while (!kiblnd_data.kib_shutdown) {
int reconn = 0;
- dropped_lock = 0;
+ dropped_lock = false;
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- kib_peer_ni_t *peer_ni = NULL;
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+ struct kib_conn, ibc_list);
+ if (conn) {
+ struct kib_peer_ni *peer_ni = NULL;
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
}
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
- kiblnd_destroy_conn(conn, !peer_ni);
+ kiblnd_destroy_conn(conn);
spin_lock_irqsave(lock, flags);
- if (!peer_ni)
+ if (!peer_ni) {
+ LIBCFS_FREE(conn, sizeof(*conn));
continue;
+ }
conn->ibc_peer = peer_ni;
if (peer_ni->ibp_reconnected < KIB_RECONN_HIGH_RACE)
&kiblnd_data.kib_reconn_wait);
}
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+ struct kib_conn, ibc_list);
+ if (conn) {
+ int wait;
+
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
kiblnd_disconnect_conn(conn);
- kiblnd_conn_decref(conn);
+ wait = conn->ibc_waits;
+ if (wait == 0) /* keep ref for connd_wait, see below */
+ kiblnd_conn_decref(conn);
spin_lock_irqsave(lock, flags);
- }
+
+ if (wait)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_connd_waits);
+ }
while (reconn < KIB_RECONN_BREAK) {
if (kiblnd_data.kib_reconn_sec !=
&kiblnd_data.kib_reconn_list);
}
- if (list_empty(&kiblnd_data.kib_reconn_list))
+ conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+ struct kib_conn, ibc_list);
+ if (!conn)
break;
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- kib_conn_t, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
reconn += kiblnd_reconnect_peer(conn->ibc_peer);
kiblnd_peer_decref(conn->ibc_peer);
spin_lock_irqsave(lock, flags);
}
- /* careful with the jiffy wrap... */
- timeout = (int)(deadline - jiffies);
- if (timeout <= 0) {
- const int n = 4;
- const int p = 1;
- int chunk = kiblnd_data.kib_peer_hash_size;
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+ struct kib_conn, ibc_list);
+ if (conn) {
+ list_del(&conn->ibc_list);
+ spin_unlock_irqrestore(lock, flags);
+
+ dropped_lock = kiblnd_tx_may_discard(conn);
+ if (dropped_lock)
+ kiblnd_conn_decref(conn);
+
+ spin_lock_irqsave(lock, flags);
+ if (!dropped_lock)
+ list_add_tail(&conn->ibc_list,
+ &kiblnd_data.kib_connd_waits);
+ }
+
+ /* careful with the jiffy wrap... */
+ timeout = (int)(deadline - jiffies);
+ if (timeout <= 0) {
+ const int n = 4;
+ const int p = 1;
+ int chunk = HASH_SIZE(kiblnd_data.kib_peers);
+ unsigned int lnd_timeout;
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
-
- /* Time to check for RDMA timeouts on a few more
- * peers: I do checks every 'p' seconds on a
- * proportion of the peer_ni table and I need to check
- * every connection 'n' times within a timeout
- * interval, to ensure I detect a timeout on any
- * connection within (n+1)/n times the timeout
- * interval. */
-
- if (*kiblnd_tunables.kib_timeout > n * p)
- chunk = (chunk * n * p) /
- *kiblnd_tunables.kib_timeout;
- if (chunk == 0)
- chunk = 1;
+ dropped_lock = true;
+
+ /* Time to check for RDMA timeouts on a few more
+ * peers: I do checks every 'p' seconds on a
+ * proportion of the peer_ni table and I need to check
+ * every connection 'n' times within a timeout
+ * interval, to ensure I detect a timeout on any
+ * connection within (n+1)/n times the timeout
+ * interval.
+ */
+
+ lnd_timeout = kiblnd_timeout();
+ if (lnd_timeout > n * p)
+ chunk = (chunk * n * p) / lnd_timeout;
+ if (chunk == 0)
+ chunk = 1;
for (i = 0; i < chunk; i++) {
kiblnd_check_conns(peer_index);
peer_index = (peer_index + 1) %
- kiblnd_data.kib_peer_hash_size;
+ HASH_SIZE(kiblnd_data.kib_peers);
}
- deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ deadline += cfs_time_seconds(p);
spin_lock_irqsave(lock, flags);
}
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(lock, flags);
}
void
kiblnd_qp_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- CDEBUG(D_NET, "%s established\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ CDEBUG(D_NET, "%s established\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
/* We received a packet but connection isn't established
* probably handshake packet was lost, so free to
* force make connection established */
rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
- return;
+ return;
- default:
- CERROR("%s: Async QP event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
- return;
- }
+ case IB_EVENT_PORT_ERR:
+ case IB_EVENT_DEVICE_FATAL:
+ CERROR("Fatal device error for NI %s\n",
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
+ atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
+ return;
+
+ case IB_EVENT_PORT_ACTIVE:
+ CERROR("Port reactivated for NI %s\n",
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
+ atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
+ return;
+
+ default:
+ CERROR("%s: Async QP event type %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
+ return;
+ }
}
static void
* consuming my CQ I could be called after all completions have
* occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
* and this CQ is about to be destroyed so I NOOP. */
- kib_conn_t *conn = (kib_conn_t *)arg;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
+ struct kib_conn *conn = arg;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
LASSERT(cq == conn->ibc_cq);
(conn->ibc_nrx > 0 ||
conn->ibc_nsends_posted > 0)) {
kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
+ kiblnd_dump_conn_dbg(conn);
conn->ibc_scheduled = 1;
list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
void
kiblnd_cq_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
CERROR("%s: async CQ event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
int
kiblnd_scheduler(void *arg)
{
- long id = (long)arg;
- struct kib_sched_info *sched;
- kib_conn_t *conn;
- wait_queue_t wait;
- unsigned long flags;
- struct ib_wc wc;
- int did_something;
- int busy_loops = 0;
- int rc;
-
- cfs_block_allsigs();
+ long id = (long)arg;
+ struct kib_sched_info *sched;
+ struct kib_conn *conn;
+ wait_queue_entry_t wait;
+ unsigned long flags;
+ struct ib_wc wc;
+ bool did_something;
+ int rc;
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)];
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Unable to bind on CPU partition %d, please verify "
- "whether all CPUs are healthy and reload modules if "
- "necessary, otherwise your system might under risk of "
- "low performance\n", sched->ibs_cpt);
+ CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
while (!kiblnd_data.kib_shutdown) {
- if (busy_loops++ >= IBLND_RESCHED) {
+ if (need_resched()) {
spin_unlock_irqrestore(&sched->ibs_lock, flags);
cond_resched();
- busy_loops = 0;
spin_lock_irqsave(&sched->ibs_lock, flags);
}
- did_something = 0;
+ did_something = false;
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- kib_conn_t, ibc_sched_list);
+ conn = list_first_entry_or_null(&sched->ibs_conns,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
wc.wr_id = IBLND_WID_INVAL;
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (rc == 0) {
- rc = ib_req_notify_cq(conn->ibc_cq,
- IB_CQ_NEXT_COMP);
- if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, "
- "closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
+ rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ if (rc == 0) {
+ rc = ib_req_notify_cq(conn->ibc_cq,
+ IB_CQ_NEXT_COMP);
+ if (rc < 0) {
+ CWARN("%s: ib_req_notify_cq failed: %d, closing connection %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc, conn);
+ kiblnd_close_conn(conn, -EIO);
+ kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock,
- flags);
+ flags);
continue;
}
}
if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_poll_cq failed: %d, closing connection %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc);
+ rc, conn);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock, flags);
/* +1 ref for sched_conns */
kiblnd_conn_addref(conn);
list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
+ &sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
} else {
kiblnd_complete(&wc);
spin_lock_irqsave(&sched->ibs_lock, flags);
- }
+ }
- kiblnd_conn_decref(conn); /* ...drop my ref from above */
- did_something = 1;
- }
+ kiblnd_conn_decref(conn); /* ..drop my ref from above */
+ did_something = true;
+ }
- if (did_something)
- continue;
+ if (did_something)
+ continue;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
spin_unlock_irqrestore(&sched->ibs_lock, flags);
schedule();
- busy_loops = 0;
remove_wait_queue(&sched->ibs_waitq, &wait);
set_current_state(TASK_RUNNING);
int
kiblnd_failover_thread(void *arg)
{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
- wait_queue_t wait;
- unsigned long flags;
- int rc;
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ struct kib_dev *dev;
+ struct net *ns = arg;
+ wait_queue_entry_t wait;
+ unsigned long flags;
+ int rc;
LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
- cfs_block_allsigs();
-
- init_waitqueue_entry(&wait, current);
+ init_wait(&wait);
write_lock_irqsave(glock, flags);
- while (!kiblnd_data.kib_shutdown) {
- int do_failover = 0;
- int long_sleep;
+ while (!kiblnd_data.kib_shutdown) {
+ bool do_failover = false;
+ int long_sleep;
list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
- ibd_fail_list) {
- if (cfs_time_before(cfs_time_current(),
- dev->ibd_next_failover))
- continue;
- do_failover = 1;
- break;
- }
+ ibd_fail_list) {
+ if (ktime_get_seconds() < dev->ibd_next_failover)
+ continue;
+ do_failover = true;
+ break;
+ }
- if (do_failover) {
+ if (do_failover) {
list_del_init(&dev->ibd_fail_list);
- dev->ibd_failover = 1;
+ dev->ibd_failover = 1;
write_unlock_irqrestore(glock, flags);
- rc = kiblnd_dev_failover(dev);
+ rc = kiblnd_dev_failover(dev, ns);
write_lock_irqsave(glock, flags);
- LASSERT (dev->ibd_failover);
- dev->ibd_failover = 0;
- if (rc >= 0) { /* Device is OK or failover succeed */
- dev->ibd_next_failover = cfs_time_shift(3);
- continue;
- }
-
- /* failed to failover, retry later */
- dev->ibd_next_failover =
- cfs_time_shift(min(dev->ibd_failed_failover, 10));
- if (kiblnd_dev_can_failover(dev)) {
+ LASSERT(dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
+ dev->ibd_next_failover = ktime_get_seconds() + 3;
+ continue;
+ }
+
+ /* failed to failover, retry later */
+ dev->ibd_next_failover = ktime_get_seconds() +
+ min(dev->ibd_failed_failover, 10);
+ if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
+ &kiblnd_data.kib_failed_devs);
+ }
- continue;
- }
+ continue;
+ }
- /* long sleep if no more pending failover */
+ /* long sleep if no more pending failover */
long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
set_current_state(TASK_INTERRUPTIBLE);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
- cfs_time_seconds(1));
+ cfs_time_seconds(1));
set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
- if (!long_sleep || rc != 0)
- continue;
+ if (!long_sleep || rc != 0)
+ continue;
- /* have a long sleep, routine check all active devices,
- * we need checking like this because if there is not active
- * connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover */
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover
+ */
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (kiblnd_dev_can_failover(dev)) {
+ if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
- }
- }
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
+ }
write_unlock_irqrestore(glock, flags);
- kiblnd_thread_fini();
- return 0;
+ kiblnd_thread_fini();
+ return 0;
}