*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd_cb.c
*
static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
int error);
-static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
- int type, int body_nob);
+static struct ib_rdma_wr *
+kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx,
+ int type, int body_nob, int payload_nob);
+#define kiblnd_init_tx_msg(ni, tx, type, body) \
+ kiblnd_init_tx_msg_payload(ni, tx, type, body, 0)
static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
static void kiblnd_unmap_tx(struct kib_tx *tx);
static void kiblnd_check_sends_locked(struct kib_conn *conn);
-void
+static void
kiblnd_tx_done(struct kib_tx *tx)
{
struct lnet_msg *lntmsg[2];
{
struct kib_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist,
+ struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
LASSERT (tx->tx_lntmsg[1] == NULL);
LASSERT (tx->tx_nfrags == 0);
+ tx->tx_gpu = 0;
tx->tx_gaps = false;
tx->tx_hstatus = LNET_MSG_STATUS_OK;
struct kib_conn *conn = rx->rx_conn;
struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
struct ib_recv_wr *bad_wrq = NULL;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
#endif
int rc;
LASSERT (credit == IBLND_POSTRX_NO_CREDIT ||
credit == IBLND_POSTRX_PEER_CREDIT ||
credit == IBLND_POSTRX_RSRVD_CREDIT);
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
LASSERT(mr != NULL);
rx->rx_sge.lkey = mr->lkey;
* own this rx (and rx::rx_conn) anymore, LU-5678.
*/
kiblnd_conn_addref(conn);
-#ifdef HAVE_IB_POST_SEND_RECV_CONST
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
(const struct ib_recv_wr **)&bad_wrq);
#else
static struct kib_tx *
kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+ struct kib_tx *tx;
+ list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
if (tx->tx_status == 0) { /* success so far */
if (status < 0) { /* failed? */
+ if (status == -ECONNABORTED) {
+ CDEBUG(D_NET, "bad status for connection to %s "
+ "with completion type %x\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ txtype);
+ }
+
tx->tx_status = status;
tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
} else if (txtype == IBLND_MSG_GET_REQ) {
kiblnd_handle_rx(struct kib_rx *rx)
{
struct kib_msg *msg = rx->rx_msg;
- struct kib_conn *conn = rx->rx_conn;
+ struct kib_conn *conn = rx->rx_conn;
struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int credits = msg->ibm_credits;
+ int credits = msg->ibm_credits;
struct kib_tx *tx;
- int rc = 0;
- int rc2;
- int post_credit;
+ int rc = 0;
+ int rc2;
+ int post_credit;
+ struct lnet_hdr hdr;
+ struct lnet_nid srcnid;
LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- CDEBUG (D_NET, "Received %x[%d] from %s\n",
- msg->ibm_type, credits,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "Received %x[%d] nob %u cm_id %p qp_num 0x%x\n",
+ msg->ibm_type, credits,
+ msg->ibm_nob,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
if (credits != 0) {
/* Have I received credits that will let me send? */
post_credit = IBLND_POSTRX_PEER_CREDIT;
break;
- case IBLND_MSG_IMMEDIATE:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr,
- msg->ibm_srcnid, rx, 0);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_IMMEDIATE:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.immediate.ibim_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 0);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
- case IBLND_MSG_PUT_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_PUT_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.putreq.ibprm_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
case IBLND_MSG_PUT_NAK:
CWARN ("PUT_NACK from %s\n",
msg->ibm_u.completion.ibcm_cookie);
break;
- case IBLND_MSG_GET_REQ:
- post_credit = IBLND_POSTRX_DONT_POST;
- rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr,
- msg->ibm_srcnid, rx, 1);
- if (rc < 0) /* repost on error */
- post_credit = IBLND_POSTRX_PEER_CREDIT;
- break;
+ case IBLND_MSG_GET_REQ:
+ post_credit = IBLND_POSTRX_DONT_POST;
+ lnet_hdr_from_nid4(&hdr, &msg->ibm_u.get.ibgm_hdr);
+ lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid);
+ rc = lnet_parse(ni, &hdr, &srcnid, rx, 1);
+ if (rc < 0) /* repost on error */
+ post_credit = IBLND_POSTRX_PEER_CREDIT;
+ break;
case IBLND_MSG_GET_DONE:
post_credit = IBLND_POSTRX_RSRVD_CREDIT;
if (rc < 0) /* protocol error */
kiblnd_close_conn(conn, rc);
- if (post_credit != IBLND_POSTRX_DONT_POST)
- kiblnd_post_rx(rx, post_credit);
+ if (post_credit != IBLND_POSTRX_DONT_POST)
+ kiblnd_post_rx(rx, post_credit);
}
static void
int rc;
int err = -EIO;
- LASSERT (net != NULL);
- LASSERT (rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
+ LASSERT(net);
+ LASSERT(rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
- LASSERT (nob >= 0);
- rx->rx_nob = nob;
+ LASSERT(nob >= 0);
+ rx->rx_nob = nob;
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR ("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR ("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
+ /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
return;
}
write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
+ }
+ kiblnd_handle_rx(rx);
+ return;
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
+failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
static int
* in trying to map the memory, because it'll just fail. So
* preemptively fail with an appropriate message
*/
- if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) &&
+ if (IS_FAST_REG_DEV(dev) &&
!(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) &&
tx->tx_gaps) {
CERROR("Using FastReg with no GAPS support, but tx has gaps. "
return -EPROTONOSUPPORT;
}
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
/*
* FMR does not support gaps but the tx has gaps then
* we should make sure that the number of fragments we'll be sending
* the fragments in one FastReg or FMR fragment.
*/
if (
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
&& !tx->tx_gaps) ||
#endif
- (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) {
+ IS_FAST_REG_DEV(dev)) {
/* FMR requires zero based address */
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
#endif
kiblnd_unmap_tx(struct kib_tx *tx)
{
if (
-#ifdef HAVE_FMR_POOL_API
+#ifdef HAVE_OFED_FMR_POOL_API
tx->tx_fmr.fmr_pfmr ||
#endif
tx->tx_fmr.fmr_frd)
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
- tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev, tx);
tx->tx_nfrags = 0;
}
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
static struct ib_mr *
kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
{
* memory regions. If that's not available either, then you're
* dead in the water and fail the operation.
*/
- if (tunables->lnd_map_on_demand &&
- (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED
-#ifdef HAVE_FMR_POOL_API
+ if (tunables->lnd_map_on_demand && (IS_FAST_REG_DEV(net->ibn_dev)
+#ifdef HAVE_OFED_FMR_POOL_API
|| net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED
#endif
))
{
struct kib_net *net = ni->ni_data;
struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = NULL;
#endif
__u32 nob;
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
- tx->tx_nfrags, tx->tx_dmadir);
-
+ rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len(
hdev->ibh_ibdev, &tx->tx_frags[i]);
nob += rd->rd_frags[i].rf_nob;
}
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
mr = kiblnd_find_rd_dma_mr(ni, rd);
if (mr != NULL) {
/* found pre-mapping MR */
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
- int fragnob;
- int max_nkiov;
+ int fragnob;
+ int max_nkiov;
+ int sg_count = 0;
- CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
+ CDEBUG(D_NET, "niov %d offset %d nob %d gpu %d\n",
+ nkiov, offset, nob, tx->tx_gpu);
LASSERT(nob > 0);
LASSERT(nkiov > 0);
do {
LASSERT(nkiov > 0);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
+ sg_count++;
+
fragnob = min((int)(kiov->bv_len - offset), nob);
/*
sg_set_page(sg, kiov->bv_page, fragnob,
kiov->bv_offset + offset);
sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
offset = 0;
kiov++;
nob -= fragnob;
} while (nob > 0);
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+ return kiblnd_map_tx(ni, tx, rd, sg_count);
}
static int
struct kib_msg *msg = tx->tx_msg;
struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
int ver = conn->ibc_version;
int rc;
int done;
}
if (credit != 0 && conn->ibc_credits == 0) { /* no credits */
- CDEBUG(D_NET, "%s: no credits\n",
- libcfs_nid2str(peer_ni->ibp_nid));
+ CDEBUG(D_NET, "%s: no credits cm_id %p qp_num 0x%x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
return -EAGAIN;
}
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
spin_lock(&conn->ibc_lock);
- CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- conn->ibc_noops_posted);
- return 0;
+ CDEBUG(D_NET, "%s(%d): redundant or enough NOOP cm_id %p qp_num 0x%x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ conn->ibc_noops_posted,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
+ return 0;
}
+ CDEBUG(D_NET, "Transmit %x[%d] nob %u cm_id %p qp_num 0x%x\n",
+ msg->ibm_type, credit,
+ msg->ibm_nob,
+ conn->ibc_cmid,
+ conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0);
+ kiblnd_dump_conn_dbg(conn);
+
kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits,
peer_ni->ibp_nid, conn->ibc_incarnation);
/* I'm still holding ibc_lock! */
if (conn->ibc_state != IBLND_CONN_ESTABLISHED) {
+ CDEBUG(D_NET, "connection to %s is not established\n",
+ conn->ibc_peer? libcfs_nid2str(conn->ibc_peer->ibp_nid): "NULL");
rc = -ECONNABORTED;
} else if (tx->tx_pool->tpo_pool.po_failed ||
conn->ibc_hdev != tx->tx_pool->tpo_hdev) {
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
- if (frd != NULL) {
- if (!frd->frd_valid) {
- wr = &frd->frd_inv_wr.wr;
- wr->next = &frd->frd_fastreg_wr.wr;
- } else {
- wr = &frd->frd_fastreg_wr.wr;
- }
+ if (frd != NULL && !frd->frd_posted) {
+ wr = &frd->frd_inv_wr.wr;
+ wr->next = &frd->frd_fastreg_wr.wr;
frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
}
if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
rc = -EINVAL;
else
-#ifdef HAVE_IB_POST_SEND_RECV_CONST
+#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST
rc = ib_post_send(conn->ibc_cmid->qp, wr,
(const struct ib_send_wr **)&bad);
#else
rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
#endif
+ if (frd && !frd->frd_posted) {
+ /* The local invalidate becomes invalid (has been
+ * successfully used) if the post succeeds or the
+ * failing wr was not the invalidate. */
+ frd->frd_valid =
+ !(rc == 0 || (bad != &frd->frd_inv_wr.wr));
+ }
}
conn->ibc_last_send = ktime_get();
- if (rc == 0)
- return 0;
+ if (rc == 0) {
+ if (frd != NULL)
+ frd->frd_posted = true;
+ return 0;
+ }
/* NB credits are transferred in the actual
* message, which can only be the last work item */
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- struct kib_tx, tx_list);
+ (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+ struct kib_tx, tx_list)) != NULL) {
list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_noops,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue,
+ struct kib_tx, tx_list);
} else
break;
tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
tx->tx_waiting = 0; /* don't wait for peer_ni */
tx->tx_status = -EIO;
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
}
idle = (tx->tx_sending == 0) && /* This is the final callback */
kiblnd_tx_done(tx);
}
+
static void
-kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
- int body_nob)
+kiblnd_init_tx_sge(struct kib_tx *tx, u64 addr, unsigned int len)
{
+ struct ib_sge *sge = &tx->tx_sge[tx->tx_nsge];
struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
- struct ib_sge *sge = &tx->tx_msgsge;
- struct ib_rdma_wr *wrq;
- int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
-#ifdef HAVE_IB_GET_DMA_MR
+#ifdef HAVE_OFED_IB_GET_DMA_MR
struct ib_mr *mr = hdev->ibh_mrs;
#endif
+ *sge = (struct ib_sge) {
+#ifdef HAVE_OFED_IB_GET_DMA_MR
+ .lkey = mr->lkey,
+#else
+ .lkey = hdev->ibh_pd->local_dma_lkey,
+#endif
+ .addr = addr,
+ .length = len,
+ };
+
+ tx->tx_nsge++;
+}
+
+static struct ib_rdma_wr *
+kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx, int type,
+ int body_nob, int payload)
+{
+ struct ib_rdma_wr *wrq;
+ int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
+
LASSERT(tx->tx_nwrq >= 0);
LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
LASSERT(nob <= IBLND_MSG_SIZE);
-#ifdef HAVE_IB_GET_DMA_MR
- LASSERT(mr != NULL);
-#endif
- kiblnd_init_msg(tx->tx_msg, type, body_nob);
-
-#ifdef HAVE_IB_GET_DMA_MR
- sge->lkey = mr->lkey;
-#else
- sge->lkey = hdev->ibh_pd->local_dma_lkey;
-#endif
- sge->addr = tx->tx_msgaddr;
- sge->length = nob;
+ kiblnd_init_msg(tx->tx_msg, type, body_nob + payload);
wrq = &tx->tx_wrq[tx->tx_nwrq];
- memset(wrq, 0, sizeof(*wrq));
- wrq->wr.next = NULL;
- wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX);
- wrq->wr.sg_list = sge;
- wrq->wr.num_sge = 1;
- wrq->wr.opcode = IB_WR_SEND;
- wrq->wr.send_flags = IB_SEND_SIGNALED;
+ *wrq = (struct ib_rdma_wr) {
+ .wr = {
+ .wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ .num_sge = 1,
+ .sg_list = &tx->tx_sge[tx->tx_nsge],
+ .opcode = IB_WR_SEND,
+ .send_flags = IB_SEND_SIGNALED,
+ },
+ };
+
+ kiblnd_init_tx_sge(tx, tx->tx_msgaddr, nob);
tx->tx_nwrq++;
+ return wrq;
}
static int
int prev = dstidx;
if (srcidx >= srcrd->rd_nfrags) {
- CERROR("Src buffer exhausted: %d frags\n", srcidx);
+ CERROR("Src buffer exhausted: %d frags %px\n",
+ srcidx, tx);
rc = -EPROTO;
break;
}
wrq->wr.opcode = IB_WR_RDMA_WRITE;
wrq->wr.send_flags = 0;
-#ifdef HAVE_IB_RDMA_WR
+#ifdef HAVE_OFED_IB_RDMA_WR
wrq->remote_addr = kiblnd_rd_frag_addr(dstrd,
dstidx);
wrq->rkey = kiblnd_rd_frag_key(dstrd,
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
+ CDEBUG(D_NET, "connection with %s is disconnected\n",
+ conn->ibc_peer? libcfs_nid2str(conn->ibc_peer->ibp_nid): "NULL");
+
tx->tx_status = -ECONNABORTED;
tx->tx_waiting = 0;
if (tx->tx_conn != NULL) {
int rc;
int i;
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ s64 timeout_ns;
/* If I get here, I've committed to send, so I complete the tx with
* failure on any problems
return;
}
+ timeout_ns = kiblnd_timeout() * NSEC_PER_SEC;
read_unlock(g_lock);
/* Re-try with a write lock */
write_lock(g_lock);
if (list_empty(&peer_ni->ibp_conns)) {
/* found a peer_ni, but it's still connecting... */
LASSERT(kiblnd_peer_connecting(peer_ni));
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(),
+ timeout_ns);
list_add_tail(&tx->tx_list,
&peer_ni->ibp_tx_queue);
+ }
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer_ni);
if (list_empty(&peer2->ibp_conns)) {
/* found a peer_ni, but it's still connecting... */
LASSERT(kiblnd_peer_connecting(peer2));
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(),
+ timeout_ns);
list_add_tail(&tx->tx_list,
&peer2->ibp_tx_queue);
+ }
write_unlock_irqrestore(g_lock, flags);
} else {
conn = kiblnd_get_conn_locked(peer2);
/* always called with a ref on ni, which prevents ni being shutdown */
LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
- if (tx != NULL)
+ if (tx != NULL) {
+ tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
+ }
kiblnd_peer_addref(peer_ni);
hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nid);
int
kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg)
{
+ struct kib_dev *dev = ((struct kib_net *)ni->ni_data)->ibn_dev;
struct lnet_hdr *hdr = &lntmsg->msg_hdr;
- int type = lntmsg->msg_type;
- struct lnet_process_id target = lntmsg->msg_target;
- int target_is_router = lntmsg->msg_target_is_router;
- int routing = lntmsg->msg_routing;
- unsigned int payload_niov = lntmsg->msg_niov;
- struct bio_vec *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
+ int type = lntmsg->msg_type;
+ struct lnet_processid *target = &lntmsg->msg_target;
+ int target_is_router = lntmsg->msg_target_is_router;
+ int routing = lntmsg->msg_routing;
+ unsigned int payload_niov = lntmsg->msg_niov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
+ bool gpu;
struct kib_msg *ibmsg;
struct kib_rdma_desc *rd;
struct kib_tx *tx;
- int nob;
- int rc;
+ int nob;
+ int rc;
- /* NB 'private' is different depending on what we're sending.... */
+ /* NB 'private' is different depending on what we're sending.... */
- CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
- payload_nob, payload_niov, libcfs_id2str(target));
+ CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n",
+ payload_nob, payload_niov, libcfs_idstr(target));
- LASSERT (payload_nob == 0 || payload_niov > 0);
- LASSERT (payload_niov <= LNET_MAX_IOV);
+ LASSERT(payload_nob == 0 || payload_niov > 0);
/* Thread context */
- LASSERT (!in_interrupt());
+ LASSERT(!in_interrupt());
+
+ tx = kiblnd_get_idle_tx(ni, lnet_nid_to_nid4(&target->nid));
+ if (tx == NULL) {
+ CERROR("Can't allocate %s txd for %s\n",
+ lnet_msgtyp2str(type),
+ libcfs_nidstr(&target->nid));
+ return -ENOMEM;
+ }
+ ibmsg = tx->tx_msg;
+ gpu = lnet_md_is_gpu(msg_md);
switch (type) {
default:
LBUG();
return (-EIO);
- case LNET_MSG_ACK:
- LASSERT (payload_nob == 0);
- break;
-
- case LNET_MSG_GET:
- if (routing || target_is_router)
- break; /* send IMMEDIATE */
+ case LNET_MSG_ACK:
+ LASSERT(payload_nob == 0);
+ break;
- /* is the REPLY message too small for RDMA? */
- nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
+ case LNET_MSG_GET:
+ if (routing || target_is_router)
+ break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ /* is the REPLY message too small for RDMA? */
+ nob = offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload[msg_md->md_length]);
+ if (nob <= IBLND_MSG_SIZE && !gpu)
+ break; /* send IMMEDIATE */
- ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
+ tx->tx_gpu = gpu;
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
- lntmsg->msg_md->md_niov,
- lntmsg->msg_md->md_kiov,
- 0, lntmsg->msg_md->md_length);
+ msg_md->md_niov,
+ msg_md->md_kiov,
+ 0, msg_md->md_length);
if (rc != 0) {
CERROR("Can't setup GET sink for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
+ libcfs_nidstr(&target->nid), rc);
tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
return -EIO;
nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
- ibmsg->ibm_u.get.ibgm_hdr = *hdr;
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.get.ibgm_hdr);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob);
- tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
+ tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg);
if (tx->tx_lntmsg[1] == NULL) {
CERROR("Can't create reply for GET -> %s\n",
- libcfs_nid2str(target.nid));
+ libcfs_nidstr(&target->nid));
kiblnd_tx_done(tx);
return -EIO;
}
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for GET_DONE */
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
- nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
+ case LNET_MSG_REPLY:
+ case LNET_MSG_PUT:
+ /* Is the payload small enough not to need RDMA? */
+ nob = offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload[payload_nob]);
+ if (nob <= IBLND_MSG_SIZE && !gpu)
+ break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ tx->tx_gpu = gpu;
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
payload_niov, payload_kiov,
payload_offset, payload_nob);
if (rc != 0) {
CERROR("Can't setup PUT src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
+ libcfs_nidstr(&target->nid), rc);
kiblnd_tx_done(tx);
return -EIO;
}
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.putreq.ibprm_hdr);
+ ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
sizeof(struct kib_putreq_msg));
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
+ }
/* send IMMEDIATE */
LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR ("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ ibmsg = tx->tx_msg;
+ lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.immediate.ibim_hdr);
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
+ if (IS_FAST_REG_DEV(dev) && payload_nob) {
+ struct ib_rdma_wr *wrq;
+ int i;
- lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(struct kib_msg,
- ibm_u.immediate.ibim_payload),
- payload_niov, payload_kiov,
- payload_offset, payload_nob);
+ nob = offsetof(struct kib_immediate_msg, ibim_payload[0]);
+ wrq = kiblnd_init_tx_msg_payload(ni, tx, IBLND_MSG_IMMEDIATE,
+ nob, payload_nob);
- nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+ rd = tx->tx_rd;
+ rc = kiblnd_setup_rd_kiov(ni, tx, rd,
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+ if (rc != 0) {
+ CERROR("Can't setup IMMEDIATE src for %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ kiblnd_tx_done(tx);
+ return -EIO;
+ }
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ /* lets generate a SGE chain */
+ for (i = 0; i < rd->rd_nfrags; i++) {
+ kiblnd_init_tx_sge(tx, rd->rd_frags[i].rf_addr,
+ rd->rd_frags[i].rf_nob);
+ wrq->wr.num_sge++;
+ }
+ } else {
+ lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
+ offsetof(struct kib_msg,
+ ibm_u.immediate.ibim_payload),
+ payload_niov, payload_kiov,
+ payload_offset, payload_nob);
+
+ nob = offsetof(struct kib_immediate_msg,
+ ibim_payload[payload_nob]);
+
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+ }
+
+ /* finalise lntmsg on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+
+ kiblnd_launch_tx(ni, tx, lnet_nid_to_nid4(&target->nid));
+ return 0;
}
static void
kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
{
- struct lnet_process_id target = lntmsg->msg_target;
+ struct lnet_processid *target = &lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
struct kib_tx *tx;
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't get tx for REPLY to %s\n",
- libcfs_nid2str(target.nid));
- goto failed_0;
- }
+ if (tx == NULL) {
+ CERROR("Can't get tx for REPLY to %s\n",
+ libcfs_nidstr(&target->nid));
+ goto failed_0;
+ }
- if (nob == 0)
- rc = 0;
- else
- rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
- niov, kiov, offset, nob);
+ tx->tx_gpu = lnet_md_is_gpu(msg_md);
- if (rc != 0) {
- CERROR("Can't setup GET src for %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
+ if (nob == 0)
+ rc = 0;
+ else
+ rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
+ niov, kiov, offset, nob);
- rc = kiblnd_init_rdma(rx->rx_conn, tx,
- IBLND_MSG_GET_DONE, nob,
- &rx->rx_msg->ibm_u.get.ibgm_rd,
- rx->rx_msg->ibm_u.get.ibgm_cookie);
- if (rc < 0) {
- CERROR("Can't setup rdma for GET from %s: %d\n",
- libcfs_nid2str(target.nid), rc);
- goto failed_1;
- }
+ if (rc != 0) {
+ CERROR("Can't setup GET src for %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ goto failed_1;
+ }
+
+ rc = kiblnd_init_rdma(rx->rx_conn, tx,
+ IBLND_MSG_GET_DONE, nob,
+ &rx->rx_msg->ibm_u.get.ibgm_rd,
+ rx->rx_msg->ibm_u.get.ibgm_cookie);
+ if (rc < 0) {
+ CERROR("Can't setup rdma for GET from %s: %d\n",
+ libcfs_nidstr(&target->nid), rc);
+ goto failed_1;
+ }
if (nob == 0) {
/* No RDMA: local completion may happen now! */
tx->tx_lntmsg[0] = lntmsg;
}
- kiblnd_queue_tx(tx, rx->rx_conn);
- return;
+ kiblnd_queue_tx(tx, rx->rx_conn);
+ return;
failed_1:
lnet_finalize(lntmsg, -EIO);
}
+unsigned int
+kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx)
+{
+ struct kib_net *net = ni->ni_data;
+ struct device *dev = NULL;
+
+ if (net)
+ dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device;
+
+ return lnet_get_dev_prio(dev, dev_idx);
+
+}
+
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov, struct bio_vec *kiov,
switch (rxmsg->ibm_type) {
default:
LBUG();
-
- case IBLND_MSG_IMMEDIATE:
+ /* fallthrough */
+ case IBLND_MSG_IMMEDIATE:
nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
- if (nob > rx->rx_nob) {
- CERROR ("Immediate message from %s too big: %d(%d)\n",
- libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
- nob, rx->rx_nob);
- rc = -EPROTO;
- break;
- }
+ if (nob > rx->rx_nob) {
+ CERROR("Immediate message from %s too big: %d(%d)\n",
+ libcfs_nidstr(&lntmsg->msg_hdr.src_nid),
+ nob, rx->rx_nob);
+ rc = -EPROTO;
+ break;
+ }
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
case IBLND_MSG_PUT_REQ: {
struct kib_msg *txmsg;
struct kib_rdma_desc *rd;
- ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
+ struct lnet_libmd *msg_md = lntmsg->msg_md;
+ ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
if (mlen == 0) {
lnet_finalize(lntmsg, 0);
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
}
tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
- if (tx == NULL) {
- CERROR("Can't allocate tx for %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- /* Not replying will break the connection */
- rc = -ENOMEM;
- break;
- }
+ if (tx == NULL) {
+ CERROR("Can't allocate tx for %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ /* Not replying will break the connection */
+ rc = -ENOMEM;
+ break;
+ }
+
+ tx->tx_gpu = lnet_md_is_gpu(msg_md);
txmsg = tx->tx_msg;
rd = &txmsg->ibm_u.putack.ibpam_rd;
return rc;
}
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
static void
kiblnd_thread_fini (void)
{
static void
kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
{
- int error = 0;
+ int error = 0;
time64_t last_alive = 0;
unsigned long flags;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (error != 0)
- lnet_notify(peer_ni->ibp_ni,
- peer_ni->ibp_nid, false, false, last_alive);
+ if (error != 0) {
+ struct lnet_nid nid;
+
+ lnet_nid4_to_nid(peer_ni->ibp_nid, &nid);
+ lnet_notify(peer_ni->ibp_ni, &nid,
+ false, false, last_alive);
+ }
}
void
list_empty(&conn->ibc_tx_queue) &&
list_empty(&conn->ibc_tx_queue_rsrvd) &&
list_empty(&conn->ibc_tx_queue_nocred) &&
- list_empty(&conn->ibc_active_txs)) {
- CDEBUG(D_NET, "closing conn to %s\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- } else {
- CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n",
- libcfs_nid2str(peer_ni->ibp_nid), error,
- list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
- list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)",
- list_empty(&conn->ibc_tx_queue_rsrvd) ?
+ list_empty(&conn->ibc_active_txs))
+ CDEBUG(D_NET, "closing conn %p to %s\n",
+ conn,
+ libcfs_nid2str(peer_ni->ibp_nid));
+ else
+ CNETERR("Closing conn %p to %s: error %d%s%s%s%s%s\n",
+ conn,
+ libcfs_nid2str(peer_ni->ibp_nid), error,
+ list_empty(&conn->ibc_tx_queue) ? "" : "(sending)",
+ list_empty(&conn->ibc_tx_noops) ?
+ "" : "(sending_noops)",
+ list_empty(&conn->ibc_tx_queue_rsrvd) ?
"" : "(sending_rsrvd)",
- list_empty(&conn->ibc_tx_queue_nocred) ?
- "" : "(sending_nocred)",
- list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
- }
+ list_empty(&conn->ibc_tx_queue_nocred) ?
+ "" : "(sending_nocred)",
+ list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
if (peer_ni->ibp_next_conn == conn)
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- struct kib_rx, rx_list);
+ while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+ struct kib_rx,
+ rx_list)) != NULL) {
list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* Complete all tx descs not waiting for sends to complete.
* NB we should be safe from RDMA now that the QP has changed state */
+ CDEBUG(D_NET, "abort connection with %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid));
+
kiblnd_abort_txs(conn, &conn->ibc_tx_noops);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
/* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- /* reset retry count */
- peer_ni->ibp_retries = 0;
-
conn->ibc_last_send = ktime_get();
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer_ni);
* scheduled. We won't be using round robin on this first batch.
*/
spin_lock(&conn->ibc_lock);
- while (!list_empty(&txs)) {
- tx = list_entry(txs.next, struct kib_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
{
int rc;
-#ifdef HAVE_RDMA_REJECT_4ARGS
+#ifdef HAVE_OFED_RDMA_REJECT_4ARGS
rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED);
#else
rc = rdma_reject(cmid, rej, sizeof(*rej));
struct kib_conn *conn;
struct lnet_ni *ni = NULL;
struct kib_net *net = NULL;
+ struct lnet_nid destnid;
lnet_nid_t nid;
struct rdma_conn_param cp;
struct kib_rej rej;
}
nid = reqmsg->ibm_srcnid;
- ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
+ lnet_nid4_to_nid(reqmsg->ibm_dstnid, &destnid);
+ ni = lnet_nid_to_ni_addref(&destnid);
if (ni != NULL) {
net = (struct kib_net *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
+ } else {
+ if (ibdev->ibd_nnets == 0) {
+ rej.ibr_why = IBLND_REJECT_EARLY;
+ CNETERR("Can't accept conn from %s (%s:%d:%pI4h): net for nid %s not added yet\n",
+ libcfs_nid2str(nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+ goto failed;
+ }
+ list_for_each_entry(net, &ibdev->ibd_nets, ibn_list) {
+ if ((net->ibn_dev == ibdev) &&
+ (net->ibn_ni != NULL) &&
+ (net->ibn_ni->ni_state != LNET_NI_STATE_ACTIVE)) {
+ rej.ibr_why = IBLND_REJECT_EARLY;
+ CNETERR("Can't accept conn from %s on %s (%s:%d:%pI4h): nid %s not ready\n",
+ libcfs_nid2str(nid),
+ libcfs_nidstr(&net->ibn_ni->ni_nid),
+ ibdev->ibd_ifname, ibdev->ibd_nnets,
+ &ibdev->ibd_ifip,
+ libcfs_nid2str(reqmsg->ibm_dstnid));
+ goto failed;
+ }
+ }
}
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
- ni ? libcfs_nid2str(ni->ni_nid) : "NA",
+ if (ni == NULL || /* no matching net */
+ !nid_same(&ni->ni_nid, &destnid) || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
+ CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n",
+ libcfs_nid2str(nid),
+ ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
* the lower NID connection win so we can move forward.
*/
if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
+ nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
cp.retry_count = *kiblnd_tunables.kib_retry_count;
cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count;
- CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid));
+ CDEBUG(D_NET, "Accept %s conn %p\n", libcfs_nid2str(nid), conn);
rc = rdma_accept(cmid, &cp);
if (rc != 0) {
- CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc);
+ CNETERR("Can't accept %s: %d cm_id %p\n", libcfs_nid2str(nid), rc, cmid);
rej.ibr_version = version;
rej.ibr_why = IBLND_REJECT_FATAL;
goto out;
}
- if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
- reason = "retry count exceeded due to no listener";
- goto out;
- }
-
switch (why) {
default:
reason = "Unknown";
break;
case IBLND_REJECT_RDMA_FRAGS: {
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
-#ifdef HAVE_IB_GET_DMA_MR
- /*
- * This check only makes sense if the kernel supports global
- * memory registration. Otherwise, map_on_demand will never == 0
- */
- if (!tunables->lnd_map_on_demand) {
- reason = "map_on_demand must be enabled";
- goto out;
- }
-#endif
+
if (conn->ibc_max_frags <= frag_num) {
reason = "unsupported max frags";
goto out;
case IBLND_REJECT_CONN_UNCOMPAT:
reason = "version negotiation";
break;
-
- case IBLND_REJECT_INVALID_SRV_ID:
- reason = "invalid service id";
- break;
}
conn->ibc_reconnect = 1;
case IB_CM_REJ_INVALID_SERVICE_ID:
status = -EHOSTUNREACH;
- peer_ni->ibp_retries++;
- kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_INVALID_SRV_ID, NULL);
CNETERR("%s rejected: no listener at %d\n",
libcfs_nid2str(peer_ni->ibp_nid),
*kiblnd_tunables.kib_service);
libcfs_nid2str(peer_ni->ibp_nid));
break;
+ case IBLND_REJECT_EARLY:
+ CNETERR("%s rejected: tried too early\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
default:
CERROR("%s rejected: o2iblnd reason %d\n",
libcfs_nid2str(peer_ni->ibp_nid),
}
break;
}
- /* fall through */
+ fallthrough;
default:
CNETERR("%s rejected: reason %d, size %d\n",
libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
}
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
+ if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
LASSERT(conn->ibc_cmid == cmid);
rc = rdma_connect_locked(cmid, &cp);
if (rc != 0) {
- CERROR("Can't connect to %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
+ CNETERR("Can't connect to %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc, cmid);
kiblnd_connreq_done(conn, rc);
kiblnd_conn_decref(conn);
- }
+ } else {
+ CDEBUG(D_NET, "Connected to %s: cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), cmid);
+ }
return 0;
}
+/* set the IP ToS ("Type of Service") used by the RoCE QoS */
+static void
+kiblnd_set_tos(struct rdma_cm_id *cmid)
+{
+ struct kib_peer_ni *peer_ni = cmid->context;
+ struct lnet_ioctl_config_o2iblnd_tunables *t;
+
+ t = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ if (t->lnd_tos < 0)
+ return;
+
+ rdma_set_service_type(cmid, t->lnd_tos);
+}
+
int
kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
{
rc = kiblnd_passive_connect(cmid,
(void *)KIBLND_CONN_PARAM(event),
KIBLND_CONN_PARAM_LEN(event));
- CDEBUG(D_NET, "connreq: %d\n", rc);
+ CDEBUG(D_NET, "connreq: %d cm_id %p\n", rc, cmid);
return rc;
case RDMA_CM_EVENT_ADDR_ERROR:
peer_ni = cmid->context;
- CNETERR("%s: ADDR ERROR %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("%s: ADDR ERROR %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer_ni);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
peer_ni = cmid->context;
- CDEBUG(D_NET,"%s Addr resolved: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CDEBUG(D_NET, "%s Addr resolved: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
if (event->status != 0) {
- CNETERR("Can't resolve address for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("Can't resolve address for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ event->status, cmid);
rc = event->status;
} else {
+ kiblnd_set_tos(cmid);
rc = rdma_resolve_route(
cmid, kiblnd_timeout() * 1000);
if (rc == 0) {
}
/* Can't initiate route resolution */
- CERROR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
+ CNETERR("Can't resolve route for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc, cmid);
}
kiblnd_peer_connect_failed(peer_ni, 1, rc);
kiblnd_peer_decref(peer_ni);
case RDMA_CM_EVENT_ROUTE_ERROR:
peer_ni = cmid->context;
- CNETERR("%s: ROUTE ERROR %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("%s: ROUTE ERROR %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
kiblnd_peer_decref(peer_ni);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
if (event->status == 0)
return kiblnd_active_connect(cmid);
- CNETERR("Can't resolve route for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), event->status);
+ CNETERR("Can't resolve route for %s: %d cm_id %p\n",
+ libcfs_nid2str(peer_ni->ibp_nid), event->status, cmid);
kiblnd_peer_connect_failed(peer_ni, 1, event->status);
kiblnd_peer_decref(peer_ni);
return event->status; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_UNREACHABLE:
conn = cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: UNREACHABLE %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENETDOWN);
- kiblnd_conn_decref(conn);
+ CNETERR("%s: UNREACHABLE %d cm_id %p conn %p ibc_state: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid, conn, conn->ibc_state);
+ LASSERT(conn->ibc_state != IBLND_CONN_INIT);
+ if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) {
+ kiblnd_connreq_done(conn, -ENETDOWN);
+ kiblnd_conn_decref(conn);
+ }
return 0;
case RDMA_CM_EVENT_CONNECT_ERROR:
conn = cmid->context;
- LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
- conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
- CNETERR("%s: CONNECT ERROR %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status);
- kiblnd_connreq_done(conn, -ENOTCONN);
- kiblnd_conn_decref(conn);
- return 0;
+ CNETERR("%s: CONNECT ERROR %d cm_id %p conn %p state: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid, conn, conn->ibc_state);
+ if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
+ conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) {
+ kiblnd_connreq_done(conn, -ENOTCONN);
+ kiblnd_conn_decref(conn);
+ }
+ return 0;
case RDMA_CM_EVENT_REJECTED:
conn = cmid->context;
LBUG();
case IBLND_CONN_PASSIVE_WAIT:
- CERROR ("%s: REJECTED %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid),
- event->status);
+ CERROR("%s: REJECTED %d cm_id %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ event->status, cmid);
kiblnd_connreq_done(conn, -ECONNRESET);
break;
LBUG();
case IBLND_CONN_PASSIVE_WAIT:
- CDEBUG(D_NET, "ESTABLISHED (passive): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "ESTABLISHED (passive): %s cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_connreq_done(conn, 0);
break;
case IBLND_CONN_ACTIVE_CONNECT:
- CDEBUG(D_NET, "ESTABLISHED(active): %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_NET, "ESTABLISHED(active): %s cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_check_connreply(conn,
(void *)KIBLND_CONN_PARAM(event),
KIBLND_CONN_PARAM_LEN(event));
case RDMA_CM_EVENT_DISCONNECTED:
conn = cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
- CERROR("%s DISCONNECTED\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CERROR("%s DISCONNECTED cm_id %p conn %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), cmid, conn);
kiblnd_connreq_done(conn, -ECONNRESET);
} else {
kiblnd_close_conn(conn, 0);
kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
struct kib_tx *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, struct kib_tx, tx_list);
+ list_for_each_entry(tx, txs, tx_list) {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
struct kib_tx *tx, *tx_tmp;
- struct list_head *ctmp;
unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
}
}
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
conn->ibc_credits,
conn->ibc_outstanding_credits,
conn->ibc_reserved_credits);
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
list_add(&conn->ibc_connd_list, &closes);
} else {
list_add(&conn->ibc_connd_list, &checksends);
* connection. We can only be sure RDMA activity
* has ceased once the QP has been modified.
*/
- while (!list_empty(&closes)) {
- conn = list_entry(closes.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&closes,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
* NOOP, but there were no non-blocking tx descs
* free to do it last time...
*/
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&checksends,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
LASSERT (!in_interrupt());
LASSERT (current == kiblnd_data.kib_connd);
LASSERT (conn->ibc_state == IBLND_CONN_CLOSING);
-
+#ifdef O2IBLND_CONN_STATE_DEBUG
+ kiblnd_dump_conn_dbg(conn);
+#endif
rdma_disconnect(conn->ibc_cmid);
kiblnd_finalise_conn(conn);
dropped_lock = false;
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+ struct kib_conn, ibc_list);
+ if (conn) {
struct kib_peer_ni *peer_ni = NULL;
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
&kiblnd_data.kib_reconn_wait);
}
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+ struct kib_conn, ibc_list);
+ if (conn) {
int wait;
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- struct kib_conn, ibc_list);
+
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
&kiblnd_data.kib_reconn_list);
}
- if (list_empty(&kiblnd_data.kib_reconn_list))
+ conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+ struct kib_conn, ibc_list);
+ if (!conn)
break;
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
spin_lock_irqsave(lock, flags);
}
- if (!list_empty(&kiblnd_data.kib_connd_waits)) {
- conn = list_entry(kiblnd_data.kib_connd_waits.next,
- struct kib_conn, ibc_list);
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+ struct kib_conn, ibc_list);
+ if (conn) {
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
case IB_EVENT_PORT_ERR:
case IB_EVENT_DEVICE_FATAL:
CERROR("Fatal device error for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
return;
case IB_EVENT_PORT_ACTIVE:
CERROR("Port reactivated for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
return;
(conn->ibc_nrx > 0 ||
conn->ibc_nsends_posted > 0)) {
kiblnd_conn_addref(conn); /* +1 ref for sched_conns */
+ kiblnd_dump_conn_dbg(conn);
conn->ibc_scheduled = 1;
list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns);
did_something = false;
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- struct kib_conn, ibc_sched_list);
+ conn = list_first_entry_or_null(&sched->ibs_conns,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
rc = ib_req_notify_cq(conn->ibc_cq,
IB_CQ_NEXT_COMP);
if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ CWARN("%s: ib_req_notify_cq failed: %d, closing connection %p\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid),
+ rc, conn);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock,
}
if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
+ CWARN("%s: ib_poll_cq failed: %d, closing connection %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
- rc);
+ rc, conn);
kiblnd_close_conn(conn, -EIO);
kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock, flags);