X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=658d0adde546e5fbb9e9da7c9fc455e6165f75c9;hb=HEAD;hp=c6ad2b484b4227cd1365a1a88f27c193310e63ce;hpb=babf0232273467b7199ec9a7c36047b1968913df;p=fs%2Flustre-release.git diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index c6ad2b4..c310eba 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lnet/klnds/o2iblnd/o2iblnd_cb.c * @@ -41,8 +40,11 @@ static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni); static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active, int error); -static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, - int type, int body_nob); +static struct ib_rdma_wr * +kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx, + int type, int body_nob, int payload_nob); +#define kiblnd_init_tx_msg(ni, tx, type, body) \ + kiblnd_init_tx_msg_payload(ni, tx, type, body, 0) static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, int resid, struct kib_rdma_desc *dstrd, u64 dstcookie); static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); @@ -51,7 +53,7 @@ static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); static void kiblnd_unmap_tx(struct kib_tx *tx); static void kiblnd_check_sends_locked(struct kib_conn *conn); -void +static void kiblnd_tx_done(struct kib_tx *tx) { struct lnet_msg *lntmsg[2]; @@ -100,9 +102,9 @@ kiblnd_txlist_done(struct list_head *txlist, int status, { struct kib_tx *tx; - while (!list_empty(txlist)) { - tx = list_entry(txlist->next, struct kib_tx, tx_list); - + while ((tx = list_first_entry_or_null(txlist, + struct kib_tx, + tx_list)) != NULL) { list_del(&tx->tx_list); /* complete now */ tx->tx_waiting = 0; @@ -114,14 +116,14 @@ kiblnd_txlist_done(struct list_head *txlist, int status, } static struct kib_tx * -kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target) +kiblnd_get_idle_tx(struct lnet_ni *ni, struct lnet_nid *target) { struct kib_net *net = ni->ni_data; struct list_head *node; struct kib_tx *tx; struct kib_tx_poolset *tps; - tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)]; + tps = net->ibn_tx_ps[lnet_nid2cpt(target, ni)]; node = kiblnd_pool_alloc_node(&tps->tps_poolset); if (node == NULL) return NULL; @@ -137,6 +139,7 @@ kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target) LASSERT (tx->tx_lntmsg[1] == NULL); LASSERT (tx->tx_nfrags == 0); + tx->tx_gpu = 0; tx->tx_gaps = false; tx->tx_hstatus = LNET_MSG_STATUS_OK; @@ -164,7 +167,7 @@ kiblnd_post_rx(struct kib_rx *rx, int credit) struct kib_conn *conn = rx->rx_conn; struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; #endif int rc; @@ -174,7 +177,7 @@ kiblnd_post_rx(struct kib_rx *rx, int credit) LASSERT (credit == IBLND_POSTRX_NO_CREDIT || credit == IBLND_POSTRX_PEER_CREDIT || credit == IBLND_POSTRX_RSRVD_CREDIT); -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR LASSERT(mr != NULL); rx->rx_sge.lkey = mr->lkey; @@ -203,7 +206,7 @@ kiblnd_post_rx(struct kib_rx *rx, int credit) * own this rx (and rx::rx_conn) anymore, LU-5678. */ kiblnd_conn_addref(conn); -#ifdef HAVE_IB_POST_SEND_RECV_CONST +#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, (const struct ib_recv_wr **)&bad_wrq); #else @@ -211,7 +214,7 @@ kiblnd_post_rx(struct kib_rx *rx, int credit) #endif if (unlikely(rc != 0)) { CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); + libcfs_nidstr(&conn->ibc_peer->ibp_nid), rc, bad_wrq); rx->rx_nob = 0; } @@ -243,11 +246,9 @@ out: static struct kib_tx * kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie) { - struct list_head *tmp; - - list_for_each(tmp, &conn->ibc_active_txs) { - struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list); + struct kib_tx *tx; + list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) { LASSERT(!tx->tx_queued); LASSERT(tx->tx_sending != 0 || tx->tx_waiting); @@ -279,13 +280,20 @@ kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cook spin_unlock(&conn->ibc_lock); CWARN("Unmatched completion type %x cookie %#llx from %s\n", - txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + txtype, cookie, libcfs_nidstr(&conn->ibc_peer->ibp_nid)); kiblnd_close_conn(conn, -EPROTO); return; } if (tx->tx_status == 0) { /* success so far */ if (status < 0) { /* failed? */ + if (status == -ECONNABORTED) { + CDEBUG(D_NET, + "bad status for connection to %s with completion type %x\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + txtype); + } + tx->tx_status = status; tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR; } else if (txtype == IBLND_MSG_GET_REQ) { @@ -309,11 +317,11 @@ static void kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie) { struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + struct kib_tx *tx = kiblnd_get_idle_tx(ni, &conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", - type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + type, libcfs_nidstr(&conn->ibc_peer->ibp_nid)); return; } @@ -328,19 +336,24 @@ static void kiblnd_handle_rx(struct kib_rx *rx) { struct kib_msg *msg = rx->rx_msg; - struct kib_conn *conn = rx->rx_conn; + struct kib_conn *conn = rx->rx_conn; struct lnet_ni *ni = conn->ibc_peer->ibp_ni; - int credits = msg->ibm_credits; + int credits = msg->ibm_credits; struct kib_tx *tx; - int rc = 0; - int rc2; - int post_credit; + int rc = 0; + int rc2; + int post_credit; + struct lnet_hdr hdr; + struct lnet_nid srcnid; LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); - CDEBUG (D_NET, "Received %x[%d] from %s\n", - msg->ibm_type, credits, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_NET, "Received %x[%d] nob %u cm_id %p qp_num 0x%x\n", + msg->ibm_type, credits, + msg->ibm_nob, + conn->ibc_cmid, + conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0); + kiblnd_dump_conn_dbg(conn); if (credits != 0) { /* Have I received credits that will let me send? */ @@ -352,7 +365,7 @@ kiblnd_handle_rx(struct kib_rx *rx) spin_unlock(&conn->ibc_lock); CERROR("Bad credits from %s: %d + %d > %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), + libcfs_nidstr(&conn->ibc_peer->ibp_nid), rc2, credits, conn->ibc_queue_depth); @@ -375,7 +388,7 @@ kiblnd_handle_rx(struct kib_rx *rx) switch (msg->ibm_type) { default: CERROR("Bad IBLND message type %x from %s\n", - msg->ibm_type, libcfs_nid2str(conn->ibc_peer->ibp_nid)); + msg->ibm_type, libcfs_nidstr(&conn->ibc_peer->ibp_nid)); post_credit = IBLND_POSTRX_NO_CREDIT; rc = -EPROTO; break; @@ -392,25 +405,27 @@ kiblnd_handle_rx(struct kib_rx *rx) post_credit = IBLND_POSTRX_PEER_CREDIT; break; - case IBLND_MSG_IMMEDIATE: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.immediate.ibim_hdr, - msg->ibm_srcnid, rx, 0); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; + case IBLND_MSG_IMMEDIATE: + post_credit = IBLND_POSTRX_DONT_POST; + lnet_hdr_from_nid4(&hdr, &msg->ibm_u.immediate.ibim_hdr); + lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid); + rc = lnet_parse(ni, &hdr, &srcnid, rx, 0); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; - case IBLND_MSG_PUT_REQ: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.putreq.ibprm_hdr, - msg->ibm_srcnid, rx, 1); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; + case IBLND_MSG_PUT_REQ: + post_credit = IBLND_POSTRX_DONT_POST; + lnet_hdr_from_nid4(&hdr, &msg->ibm_u.putreq.ibprm_hdr); + lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid); + rc = lnet_parse(ni, &hdr, &srcnid, rx, 1); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; case IBLND_MSG_PUT_NAK: CWARN ("PUT_NACK from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); post_credit = IBLND_POSTRX_RSRVD_CREDIT; kiblnd_handle_completion(conn, IBLND_MSG_PUT_REQ, msg->ibm_u.completion.ibcm_status, @@ -429,7 +444,7 @@ kiblnd_handle_rx(struct kib_rx *rx) if (tx == NULL) { CERROR("Unmatched PUT_ACK from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); rc = -EPROTO; break; } @@ -447,7 +462,7 @@ kiblnd_handle_rx(struct kib_rx *rx) msg->ibm_u.putack.ibpam_dst_cookie); if (rc2 < 0) CERROR("Can't setup rdma for PUT to %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); + libcfs_nidstr(&conn->ibc_peer->ibp_nid), rc2); spin_lock(&conn->ibc_lock); tx->tx_waiting = 0; /* clear waiting and queue atomically */ @@ -462,13 +477,14 @@ kiblnd_handle_rx(struct kib_rx *rx) msg->ibm_u.completion.ibcm_cookie); break; - case IBLND_MSG_GET_REQ: - post_credit = IBLND_POSTRX_DONT_POST; - rc = lnet_parse(ni, &msg->ibm_u.get.ibgm_hdr, - msg->ibm_srcnid, rx, 1); - if (rc < 0) /* repost on error */ - post_credit = IBLND_POSTRX_PEER_CREDIT; - break; + case IBLND_MSG_GET_REQ: + post_credit = IBLND_POSTRX_DONT_POST; + lnet_hdr_from_nid4(&hdr, &msg->ibm_u.get.ibgm_hdr); + lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid); + rc = lnet_parse(ni, &hdr, &srcnid, rx, 1); + if (rc < 0) /* repost on error */ + post_credit = IBLND_POSTRX_PEER_CREDIT; + break; case IBLND_MSG_GET_DONE: post_credit = IBLND_POSTRX_RSRVD_CREDIT; @@ -481,8 +497,8 @@ kiblnd_handle_rx(struct kib_rx *rx) if (rc < 0) /* protocol error */ kiblnd_close_conn(conn, rc); - if (post_credit != IBLND_POSTRX_DONT_POST) - kiblnd_post_rx(rx, post_credit); + if (post_credit != IBLND_POSTRX_DONT_POST) + kiblnd_post_rx(rx, post_credit); } static void @@ -492,48 +508,51 @@ kiblnd_rx_complete(struct kib_rx *rx, int status, int nob) struct kib_conn *conn = rx->rx_conn; struct lnet_ni *ni = conn->ibc_peer->ibp_ni; struct kib_net *net = ni->ni_data; + struct lnet_nid srcnid, destnid; int rc; int err = -EIO; - LASSERT (net != NULL); - LASSERT (rx->rx_nob < 0); /* was posted */ - rx->rx_nob = 0; /* isn't now */ + LASSERT(net); + LASSERT(rx->rx_nob < 0); /* was posted */ + rx->rx_nob = 0; /* isn't now */ - if (conn->ibc_state > IBLND_CONN_ESTABLISHED) - goto ignore; + if (conn->ibc_state > IBLND_CONN_ESTABLISHED) + goto ignore; - if (status != IB_WC_SUCCESS) { - CNETERR("Rx from %s failed: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), status); - goto failed; - } + if (status != IB_WC_SUCCESS) { + CNETERR("Rx from %s failed: %d\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), status); + goto failed; + } - LASSERT (nob >= 0); - rx->rx_nob = nob; + LASSERT(nob >= 0); + rx->rx_nob = nob; - rc = kiblnd_unpack_msg(msg, rx->rx_nob); - if (rc != 0) { - CERROR ("Error %d unpacking rx from %s\n", - rc, libcfs_nid2str(conn->ibc_peer->ibp_nid)); - goto failed; - } + rc = kiblnd_unpack_msg(msg, rx->rx_nob); + if (rc != 0) { + CERROR("Error %d unpacking rx from %s\n", + rc, libcfs_nidstr(&conn->ibc_peer->ibp_nid)); + goto failed; + } - if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid || - msg->ibm_dstnid != ni->ni_nid || - msg->ibm_srcstamp != conn->ibc_incarnation || - msg->ibm_dststamp != net->ibn_incarnation) { - CERROR ("Stale rx from %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - err = -ESTALE; - goto failed; - } + lnet_nid4_to_nid(msg->ibm_srcnid, &srcnid); + lnet_nid4_to_nid(msg->ibm_dstnid, &destnid); + if (!nid_same(&srcnid, &conn->ibc_peer->ibp_nid) || + !nid_same(&destnid, &ni->ni_nid) || + msg->ibm_srcstamp != conn->ibc_incarnation || + msg->ibm_dststamp != net->ibn_incarnation) { + CERROR("Stale rx from %s\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); + err = -ESTALE; + goto failed; + } - /* set time last known alive */ - kiblnd_peer_alive(conn->ibc_peer); + /* set time last known alive */ + kiblnd_peer_alive(conn->ibc_peer); - /* racing with connection establishment/teardown! */ + /* racing with connection establishment/teardown! */ - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; @@ -545,15 +564,15 @@ kiblnd_rx_complete(struct kib_rx *rx, int status, int nob) return; } write_unlock_irqrestore(g_lock, flags); - } - kiblnd_handle_rx(rx); - return; + } + kiblnd_handle_rx(rx); + return; - failed: - CDEBUG(D_NET, "rx %p conn %p\n", rx, conn); - kiblnd_close_conn(conn, err); - ignore: - kiblnd_drop_rx(rx); /* Don't re-post rx. */ +failed: + CDEBUG(D_NET, "rx %p conn %p\n", rx, conn); + kiblnd_close_conn(conn, err); +ignore: + kiblnd_drop_rx(rx); /* Don't re-post rx. */ } static int @@ -580,7 +599,7 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, * in trying to map the memory, because it'll just fail. So * preemptively fail with an appropriate message */ - if ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED) && + if (IS_FAST_REG_DEV(dev) && !(dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT) && tx->tx_gaps) { CERROR("Using FastReg with no GAPS support, but tx has gaps. " @@ -588,6 +607,7 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, return -EPROTONOSUPPORT; } +#ifdef HAVE_OFED_FMR_POOL_API /* * FMR does not support gaps but the tx has gaps then * we should make sure that the number of fragments we'll be sending @@ -606,6 +626,7 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, return -EFBIG; } } +#endif fps = net->ibn_fmr_ps[cpt]; rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->tx_fmr); @@ -624,11 +645,17 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, * for FastReg or FMR with no gaps we can accumulate all * the fragments in one FastReg or FMR fragment. */ - if (((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) && !tx->tx_gaps) || - (dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)) { + if ( +#ifdef HAVE_OFED_FMR_POOL_API + ((dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) + && !tx->tx_gaps) || +#endif + IS_FAST_REG_DEV(dev)) { /* FMR requires zero based address */ +#ifdef HAVE_OFED_FMR_POOL_API if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED) rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; +#endif rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; } else { @@ -649,17 +676,20 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, static void kiblnd_unmap_tx(struct kib_tx *tx) { - if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd) + if ( +#ifdef HAVE_OFED_FMR_POOL_API + tx->tx_fmr.fmr_pfmr || +#endif + tx->tx_fmr.fmr_frd) kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status); if (tx->tx_nfrags != 0) { - kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev, tx); tx->tx_nfrags = 0; } } -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR static struct ib_mr * kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd) { @@ -675,9 +705,11 @@ kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd) * memory regions. If that's not available either, then you're * dead in the water and fail the operation. */ - if (tunables->lnd_map_on_demand && - (net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED || - net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)) + if (tunables->lnd_map_on_demand && (IS_FAST_REG_DEV(net->ibn_dev) +#ifdef HAVE_OFED_FMR_POOL_API + || net->ibn_dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED +#endif + )) return NULL; /* @@ -694,7 +726,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx, { struct kib_net *net = ni->ni_data; struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev; -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR struct ib_mr *mr = NULL; #endif __u32 nob; @@ -705,9 +737,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx, tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; tx->tx_nfrags = nfrags; - rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, - tx->tx_nfrags, tx->tx_dmadir); - + rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( hdev->ibh_ibdev, &tx->tx_frags[i]); @@ -716,7 +746,7 @@ static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx, nob += rd->rd_frags[i].rf_nob; } -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR mr = kiblnd_find_rd_dma_mr(ni, rd); if (mr != NULL) { /* found pre-mapping MR */ @@ -737,10 +767,12 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, { struct kib_net *net = ni->ni_data; struct scatterlist *sg; - int fragnob; - int max_nkiov; + int fragnob; + int max_nkiov; + int sg_count = 0; - CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob); + CDEBUG(D_NET, "niov %d offset %d nob %d gpu %d\n", + nkiov, offset, nob, tx->tx_gpu); LASSERT(nob > 0); LASSERT(nkiov > 0); @@ -759,6 +791,12 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, do { LASSERT(nkiov > 0); + if (!sg) { + CERROR("lacking enough sg entries to map tx\n"); + return -EFAULT; + } + sg_count++; + fragnob = min((int)(kiov->bv_len - offset), nob); /* @@ -778,10 +816,6 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, sg_set_page(sg, kiov->bv_page, fragnob, kiov->bv_offset + offset); sg = sg_next(sg); - if (!sg) { - CERROR("lacking enough sg entries to map tx\n"); - return -EFAULT; - } offset = 0; kiov++; @@ -789,7 +823,7 @@ static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx, nob -= fragnob; } while (nob > 0); - return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); + return kiblnd_map_tx(ni, tx, rd, sg_count); } static int @@ -799,6 +833,7 @@ __must_hold(&conn->ibc_lock) struct kib_msg *msg = tx->tx_msg; struct kib_peer_ni *peer_ni = conn->ibc_peer; struct lnet_ni *ni = peer_ni->ibp_ni; + struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd; int ver = conn->ibc_version; int rc; int done; @@ -818,13 +853,16 @@ __must_hold(&conn->ibc_lock) kiblnd_concurrent_sends(ver, ni)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", - libcfs_nid2str(peer_ni->ibp_nid)); + libcfs_nidstr(&peer_ni->ibp_nid)); return -EAGAIN; } if (credit != 0 && conn->ibc_credits == 0) { /* no credits */ - CDEBUG(D_NET, "%s: no credits\n", - libcfs_nid2str(peer_ni->ibp_nid)); + CDEBUG(D_NET, "%s: no credits cm_id %p qp_num 0x%x\n", + libcfs_nidstr(&peer_ni->ibp_nid), + conn->ibc_cmid, + conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0); + kiblnd_dump_conn_dbg(conn); return -EAGAIN; } @@ -832,7 +870,7 @@ __must_hold(&conn->ibc_lock) conn->ibc_credits == 1 && /* last credit reserved */ msg->ibm_type != IBLND_MSG_NOOP) { /* for NOOP */ CDEBUG(D_NET, "%s: not using last credit\n", - libcfs_nid2str(peer_ni->ibp_nid)); + libcfs_nidstr(&peer_ni->ibp_nid)); return -EAGAIN; } @@ -851,14 +889,24 @@ __must_hold(&conn->ibc_lock) tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR; kiblnd_tx_done(tx); spin_lock(&conn->ibc_lock); - CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", - libcfs_nid2str(peer_ni->ibp_nid), - conn->ibc_noops_posted); - return 0; + CDEBUG(D_NET, "%s(%d): redundant or enough NOOP cm_id %p qp_num 0x%x\n", + libcfs_nidstr(&peer_ni->ibp_nid), + conn->ibc_noops_posted, + conn->ibc_cmid, + conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0); + kiblnd_dump_conn_dbg(conn); + return 0; } + CDEBUG(D_NET, "Transmit %x[%d] nob %u cm_id %p qp_num 0x%x\n", + msg->ibm_type, credit, + msg->ibm_nob, + conn->ibc_cmid, + conn->ibc_cmid->qp ? conn->ibc_cmid->qp->qp_num : 0); + kiblnd_dump_conn_dbg(conn); + kiblnd_pack_msg(peer_ni->ibp_ni, msg, ver, conn->ibc_outstanding_credits, - peer_ni->ibp_nid, conn->ibc_incarnation); + &peer_ni->ibp_nid, conn->ibc_incarnation); conn->ibc_credits -= credit; conn->ibc_outstanding_credits = 0; @@ -877,47 +925,54 @@ __must_hold(&conn->ibc_lock) /* I'm still holding ibc_lock! */ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { + CDEBUG(D_NET, "connection to %s is not established\n", + conn->ibc_peer? libcfs_nidstr(&conn->ibc_peer->ibp_nid): "NULL"); rc = -ECONNABORTED; } else if (tx->tx_pool->tpo_pool.po_failed || conn->ibc_hdev != tx->tx_pool->tpo_hdev) { /* close_conn will launch failover */ rc = -ENETDOWN; } else { - struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd; struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr; struct ib_send_wr *wr = &tx->tx_wrq[0].wr; - if (frd != NULL) { - if (!frd->frd_valid) { - wr = &frd->frd_inv_wr.wr; - wr->next = &frd->frd_fastreg_wr.wr; - } else { - wr = &frd->frd_fastreg_wr.wr; - } + if (frd != NULL && !frd->frd_posted) { + wr = &frd->frd_inv_wr.wr; + wr->next = &frd->frd_fastreg_wr.wr; frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr; } LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), "bad wr_id %#llx, opc %d, flags %d, peer_ni: %s\n", bad->wr_id, bad->opcode, bad->send_flags, - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); bad = NULL; if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus)) rc = -EINVAL; else -#ifdef HAVE_IB_POST_SEND_RECV_CONST +#ifdef HAVE_OFED_IB_POST_SEND_RECV_CONST rc = ib_post_send(conn->ibc_cmid->qp, wr, (const struct ib_send_wr **)&bad); #else rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad); #endif + if (frd && !frd->frd_posted) { + /* The local invalidate becomes invalid (has been + * successfully used) if the post succeeds or the + * failing wr was not the invalidate. */ + frd->frd_valid = + !(rc == 0 || (bad != &frd->frd_inv_wr.wr)); + } } conn->ibc_last_send = ktime_get(); - if (rc == 0) - return 0; + if (rc == 0) { + if (frd != NULL) + frd->frd_posted = true; + return 0; + } /* NB credits are transferred in the actual * message, which can only be the last work item */ @@ -939,10 +994,10 @@ __must_hold(&conn->ibc_lock) if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CERROR("Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer_ni->ibp_nid)); + rc, libcfs_nidstr(&peer_ni->ibp_nid)); else CDEBUG(D_NET, "Error %d posting transmit to %s\n", - rc, libcfs_nid2str(peer_ni->ibp_nid)); + rc, libcfs_nidstr(&peer_ni->ibp_nid)); kiblnd_close_conn(conn, rc); @@ -964,7 +1019,7 @@ kiblnd_check_sends_locked(struct kib_conn *conn) /* Don't send anything until after the connection is established */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { CDEBUG(D_NET, "%s too soon\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); return; } @@ -975,9 +1030,8 @@ kiblnd_check_sends_locked(struct kib_conn *conn) LASSERT (conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && - !list_empty(&conn->ibc_tx_queue_rsrvd)) { - tx = list_entry(conn->ibc_tx_queue_rsrvd.next, - struct kib_tx, tx_list); + (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd, + struct kib_tx, tx_list)) != NULL) { list_move_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } @@ -985,7 +1039,7 @@ kiblnd_check_sends_locked(struct kib_conn *conn) if (kiblnd_need_noop(conn)) { spin_unlock(&conn->ibc_lock); - tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + tx = kiblnd_get_idle_tx(ni, &conn->ibc_peer->ibp_nid); if (tx != NULL) kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); @@ -999,17 +1053,17 @@ kiblnd_check_sends_locked(struct kib_conn *conn) if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; - tx = list_entry(conn->ibc_tx_queue_nocred.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_queue_nocred, + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT (!IBLND_OOB_CAPABLE(ver)); credit = 1; - tx = list_entry(conn->ibc_tx_noops.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_noops, + struct kib_tx, tx_list); } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; - tx = list_entry(conn->ibc_tx_queue.next, - struct kib_tx, tx_list); + tx = list_first_entry(&conn->ibc_tx_queue, + struct kib_tx, tx_list); } else break; @@ -1035,7 +1089,7 @@ kiblnd_tx_complete(struct kib_tx *tx, int status) if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CNETERR("Tx -> %s cookie %#llx" " sending %d waiting %d: failed %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), + libcfs_nidstr(&conn->ibc_peer->ibp_nid), tx->tx_cookie, tx->tx_sending, tx->tx_waiting, status); @@ -1058,6 +1112,9 @@ kiblnd_tx_complete(struct kib_tx *tx, int status) tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED; tx->tx_waiting = 0; /* don't wait for peer_ni */ tx->tx_status = -EIO; +#ifdef O2IBLND_CONN_STATE_DEBUG + kiblnd_dump_conn_dbg(conn); +#endif } idle = (tx->tx_sending == 0) && /* This is the final callback */ @@ -1073,46 +1130,58 @@ kiblnd_tx_complete(struct kib_tx *tx, int status) kiblnd_tx_done(tx); } + static void -kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type, - int body_nob) +kiblnd_init_tx_sge(struct kib_tx *tx, u64 addr, unsigned int len) { + struct ib_sge *sge = &tx->tx_sge[tx->tx_nsge]; struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev; - struct ib_sge *sge = &tx->tx_msgsge; - struct ib_rdma_wr *wrq; - int nob = offsetof(struct kib_msg, ibm_u) + body_nob; -#ifdef HAVE_IB_GET_DMA_MR +#ifdef HAVE_OFED_IB_GET_DMA_MR struct ib_mr *mr = hdev->ibh_mrs; #endif + *sge = (struct ib_sge) { +#ifdef HAVE_OFED_IB_GET_DMA_MR + .lkey = mr->lkey, +#else + .lkey = hdev->ibh_pd->local_dma_lkey, +#endif + .addr = addr, + .length = len, + }; + + tx->tx_nsge++; +} + +static struct ib_rdma_wr * +kiblnd_init_tx_msg_payload(struct lnet_ni *ni, struct kib_tx *tx, int type, + int body_nob, int payload) +{ + struct ib_rdma_wr *wrq; + int nob = offsetof(struct kib_msg, ibm_u) + body_nob; + LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); LASSERT(nob <= IBLND_MSG_SIZE); -#ifdef HAVE_IB_GET_DMA_MR - LASSERT(mr != NULL); -#endif - kiblnd_init_msg(tx->tx_msg, type, body_nob); - -#ifdef HAVE_IB_GET_DMA_MR - sge->lkey = mr->lkey; -#else - sge->lkey = hdev->ibh_pd->local_dma_lkey; -#endif - sge->addr = tx->tx_msgaddr; - sge->length = nob; + kiblnd_init_msg(tx->tx_msg, type, body_nob + payload); wrq = &tx->tx_wrq[tx->tx_nwrq]; - memset(wrq, 0, sizeof(*wrq)); - wrq->wr.next = NULL; - wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); - wrq->wr.sg_list = sge; - wrq->wr.num_sge = 1; - wrq->wr.opcode = IB_WR_SEND; - wrq->wr.send_flags = IB_SEND_SIGNALED; + *wrq = (struct ib_rdma_wr) { + .wr = { + .wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + .num_sge = 1, + .sg_list = &tx->tx_sge[tx->tx_nsge], + .opcode = IB_WR_SEND, + .send_flags = IB_SEND_SIGNALED, + }, + }; + + kiblnd_init_tx_sge(tx, tx->tx_msgaddr, nob); tx->tx_nwrq++; + return wrq; } static int @@ -1138,7 +1207,8 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, int prev = dstidx; if (srcidx >= srcrd->rd_nfrags) { - CERROR("Src buffer exhausted: %d frags\n", srcidx); + CERROR("Src buffer exhausted: %d frags %px\n", + srcidx, tx); rc = -EPROTO; break; } @@ -1152,7 +1222,7 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, if (tx->tx_nwrq >= conn->ibc_max_frags) { CERROR("RDMA has too many fragments for peer_ni %s (%d), " "src idx/frags: %d/%d dst idx/frags: %d/%d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), + libcfs_nidstr(&conn->ibc_peer->ibp_nid), conn->ibc_max_frags, srcidx, srcrd->rd_nfrags, dstidx, dstrd->rd_nfrags); @@ -1178,7 +1248,7 @@ kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, wrq->wr.opcode = IB_WR_RDMA_WRITE; wrq->wr.send_flags = 0; -#ifdef HAVE_IB_RDMA_WR +#ifdef HAVE_OFED_IB_RDMA_WR wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); wrq->rkey = kiblnd_rd_frag_key(dstrd, @@ -1225,6 +1295,9 @@ kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) { + CDEBUG(D_NET, "connection with %s is disconnected\n", + conn->ibc_peer? libcfs_nidstr(&conn->ibc_peer->ibp_nid): "NULL"); + tx->tx_status = -ECONNABORTED; tx->tx_waiting = 0; if (tx->tx_conn != NULL) { @@ -1295,15 +1368,13 @@ kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn) static int kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid, - struct sockaddr_in *srcaddr, - struct sockaddr_in *dstaddr, + struct sockaddr *srcaddr, + struct sockaddr *dstaddr, int timeout_ms) { unsigned short port; int rc; - LASSERT(capable(CAP_NET_BIND_SERVICE)); - /* allow the port to be reused */ rc = rdma_set_reuseaddr(cmid, 1); if (rc != 0) { @@ -1313,11 +1384,29 @@ kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid, /* look for a free privileged port */ for (port = PROT_SOCK-1; port > 0; port--) { - srcaddr->sin_port = htons(port); - rc = rdma_resolve_addr(cmid, - (struct sockaddr *)srcaddr, - (struct sockaddr *)dstaddr, - timeout_ms); + rc = 0; + + switch (srcaddr->sa_family) { + case AF_INET: { + struct sockaddr_in *sa = (void *)srcaddr; + + sa->sin_port = htons(port); + break; + } + case AF_INET6: { + struct sockaddr_in6 *sa = (void *)srcaddr; + + sa->sin6_port = htons(port); + break; + } + default: + rc = -EOPNOTSUPP; + break; + } + if (rc < 0) + return rc; + + rc = rdma_resolve_addr(cmid, srcaddr, dstaddr, timeout_ms); if (rc == 0) { CDEBUG(D_NET, "bound to port %hu\n", port); return 0; @@ -1329,14 +1418,15 @@ kiblnd_resolve_addr_cap(struct rdma_cm_id *cmid, } } - CERROR("Failed to bind to a free privileged port\n"); - return rc; + CERROR("cannot bind to a free privileged port: rc = %d\n", rc); + + return rc; } static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, - struct sockaddr_in *srcaddr, - struct sockaddr_in *dstaddr, + struct sockaddr *srcaddr, + struct sockaddr *dstaddr, int timeout_ms) { const struct cred *old_creds = NULL; @@ -1344,7 +1434,7 @@ kiblnd_resolve_addr(struct rdma_cm_id *cmid, int rc; if (!capable(CAP_NET_BIND_SERVICE)) { - new_creds = prepare_creds(); + new_creds = prepare_kernel_cred(NULL); if (!new_creds) return -ENOMEM; @@ -1364,10 +1454,9 @@ static void kiblnd_connect_peer(struct kib_peer_ni *peer_ni) { struct rdma_cm_id *cmid; - struct kib_dev *dev; struct kib_net *net = peer_ni->ibp_ni->ni_data; - struct sockaddr_in srcaddr; - struct sockaddr_in dstaddr; + struct sockaddr srcaddr; + struct sockaddr dstaddr; int rc; LASSERT (net != NULL); @@ -1379,20 +1468,40 @@ kiblnd_connect_peer(struct kib_peer_ni *peer_ni) if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", - libcfs_nid2str(peer_ni->ibp_nid), PTR_ERR(cmid)); + libcfs_nidstr(&peer_ni->ibp_nid), PTR_ERR(cmid)); rc = PTR_ERR(cmid); goto failed; } - dev = net->ibn_dev; - memset(&srcaddr, 0, sizeof(srcaddr)); - srcaddr.sin_family = AF_INET; - srcaddr.sin_addr.s_addr = htonl(dev->ibd_ifip); + memset(&srcaddr, 0, sizeof(srcaddr)); + if (nid_is_nid4(&net->ibn_ni->ni_nid)) { + struct sockaddr_in *sa = (void *)&srcaddr; - memset(&dstaddr, 0, sizeof(dstaddr)); - dstaddr.sin_family = AF_INET; - dstaddr.sin_port = htons(*kiblnd_tunables.kib_service); - dstaddr.sin_addr.s_addr = htonl(LNET_NIDADDR(peer_ni->ibp_nid)); + sa->sin_family = AF_INET; + sa->sin_addr.s_addr = net->ibn_ni->ni_nid.nid_addr[0]; + } else { + struct sockaddr_in6 *sa = (void *)&srcaddr; + + sa->sin6_family = AF_INET6; + memcpy(&sa->sin6_addr, &net->ibn_ni->ni_nid.nid_addr, + NID_ADDR_BYTES(&net->ibn_ni->ni_nid)); + } + + memset(&dstaddr, 0, sizeof(dstaddr)); + if (nid_is_nid4(&peer_ni->ibp_nid)) { + struct sockaddr_in *sa = (void *)&dstaddr; + + sa->sin_family = AF_INET; + sa->sin_port = htons(*kiblnd_tunables.kib_service); + sa->sin_addr.s_addr = peer_ni->ibp_nid.nid_addr[0]; + } else { + struct sockaddr_in6 *sa = (void *)&dstaddr; + + sa->sin6_family = AF_INET6; + sa->sin6_port = htons(*kiblnd_tunables.kib_service); + memcpy(&sa->sin6_addr, &peer_ni->ibp_nid.nid_addr, + NID_ADDR_BYTES(&peer_ni->ibp_nid)); + } kiblnd_peer_addref(peer_ni); /* cmid's ref */ @@ -1400,15 +1509,13 @@ kiblnd_connect_peer(struct kib_peer_ni *peer_ni) rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr, kiblnd_timeout() * 1000); } else { - rc = rdma_resolve_addr(cmid, - (struct sockaddr *)&srcaddr, - (struct sockaddr *)&dstaddr, + rc = rdma_resolve_addr(cmid, &srcaddr, &dstaddr, kiblnd_timeout() * 1000); } if (rc != 0) { /* Can't initiate address resolution: */ CERROR("Can't resolve addr for %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), rc); + libcfs_nidstr(&peer_ni->ibp_nid), rc); goto failed2; } @@ -1467,60 +1574,67 @@ kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni) write_unlock_irqrestore(glock, flags); CWARN("Abort reconnection of %s: %s\n", - libcfs_nid2str(peer_ni->ibp_nid), reason); + libcfs_nidstr(&peer_ni->ibp_nid), reason); kiblnd_txlist_done(&txs, -ECONNABORTED, LNET_MSG_STATUS_LOCAL_ABORTED); return false; } void -kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) +kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, struct lnet_nid *nid) { struct kib_peer_ni *peer_ni; struct kib_peer_ni *peer2; struct kib_conn *conn; rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - int rc; - int i; + unsigned long flags; + int rc; + int i; struct lnet_ioctl_config_o2iblnd_tunables *tunables; + s64 timeout_ns; - /* If I get here, I've committed to send, so I complete the tx with - * failure on any problems */ + /* If I get here, I've committed to send, so I complete the tx with + * failure on any problems + */ - LASSERT (tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ - LASSERT (tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ + LASSERT(!tx || !tx->tx_conn); /* only set when assigned a conn */ + LASSERT(!tx || tx->tx_nwrq > 0); /* work items have been set up */ - /* First time, just use a read lock since I expect to find my peer_ni - * connected */ + /* First time, just use a read lock since I expect to find my peer_ni + * connected + */ read_lock_irqsave(g_lock, flags); - peer_ni = kiblnd_find_peer_locked(ni, nid); + peer_ni = kiblnd_find_peer_locked(ni, nid); if (peer_ni != NULL && !list_empty(&peer_ni->ibp_conns)) { - /* Found a peer_ni with an established connection */ - conn = kiblnd_get_conn_locked(peer_ni); - kiblnd_conn_addref(conn); /* 1 ref for me... */ + /* Found a peer_ni with an established connection */ + conn = kiblnd_get_conn_locked(peer_ni); + kiblnd_conn_addref(conn); /* 1 ref for me... */ read_unlock_irqrestore(g_lock, flags); - if (tx != NULL) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - return; - } + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + return; + } + timeout_ns = kiblnd_timeout() * NSEC_PER_SEC; read_unlock(g_lock); /* Re-try with a write lock */ write_lock(g_lock); - peer_ni = kiblnd_find_peer_locked(ni, nid); - if (peer_ni != NULL) { + peer_ni = kiblnd_find_peer_locked(ni, nid); + if (peer_ni != NULL) { if (list_empty(&peer_ni->ibp_conns)) { - /* found a peer_ni, but it's still connecting... */ + /* found a peer_ni, but it's still connecting... */ LASSERT(kiblnd_peer_connecting(peer_ni)); - if (tx != NULL) + if (tx != NULL) { + tx->tx_deadline = ktime_add_ns(ktime_get(), + timeout_ns); list_add_tail(&tx->tx_list, - &peer_ni->ibp_tx_queue); + &peer_ni->ibp_tx_queue); + } write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer_ni); @@ -1528,19 +1642,19 @@ kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - } - return; - } + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + } + return; + } write_unlock_irqrestore(g_lock, flags); /* Allocate a peer_ni ready to add to the peer_ni table and retry */ rc = kiblnd_create_peer(ni, &peer_ni, nid); if (rc != 0) { - CERROR("Can't create peer_ni %s\n", libcfs_nid2str(nid)); + CERROR("Can't create peer_ni %s\n", libcfs_nidstr(nid)); if (tx != NULL) { tx->tx_status = -EHOSTUNREACH; tx->tx_waiting = 0; @@ -1552,14 +1666,17 @@ kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) write_lock_irqsave(g_lock, flags); - peer2 = kiblnd_find_peer_locked(ni, nid); - if (peer2 != NULL) { + peer2 = kiblnd_find_peer_locked(ni, nid); + if (peer2 != NULL) { if (list_empty(&peer2->ibp_conns)) { - /* found a peer_ni, but it's still connecting... */ + /* found a peer_ni, but it's still connecting... */ LASSERT(kiblnd_peer_connecting(peer2)); - if (tx != NULL) + if (tx != NULL) { + tx->tx_deadline = ktime_add_ns(ktime_get(), + timeout_ns); list_add_tail(&tx->tx_list, - &peer2->ibp_tx_queue); + &peer2->ibp_tx_queue); + } write_unlock_irqrestore(g_lock, flags); } else { conn = kiblnd_get_conn_locked(peer2); @@ -1567,14 +1684,14 @@ kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - if (tx != NULL) - kiblnd_queue_tx(tx, conn); - kiblnd_conn_decref(conn); /* ...to here */ - } + if (tx != NULL) + kiblnd_queue_tx(tx, conn); + kiblnd_conn_decref(conn); /* ...to here */ + } - kiblnd_peer_decref(peer_ni); - return; - } + kiblnd_peer_decref(peer_ni); + return; + } /* Brand new peer_ni */ LASSERT(peer_ni->ibp_connecting == 0); @@ -1584,82 +1701,90 @@ kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid) /* always called with a ref on ni, which prevents ni being shutdown */ LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0); - if (tx != NULL) + if (tx != NULL) { + tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns); list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue); + } - kiblnd_peer_addref(peer_ni); - list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, nidhash(nid)); write_unlock_irqrestore(g_lock, flags); for (i = 0; i < tunables->lnd_conns_per_peer; i++) kiblnd_connect_peer(peer_ni); - kiblnd_peer_decref(peer_ni); + kiblnd_peer_decref(peer_ni); } int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) { + struct kib_dev *dev = ((struct kib_net *)ni->ni_data)->ibn_dev; struct lnet_hdr *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; - struct lnet_process_id target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct bio_vec *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; + int type = lntmsg->msg_type; + struct lnet_processid *target = &lntmsg->msg_target; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct bio_vec *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + struct lnet_libmd *msg_md = lntmsg->msg_md; + bool gpu; struct kib_msg *ibmsg; struct kib_rdma_desc *rd; struct kib_tx *tx; - int nob; - int rc; + int nob; + int rc; - /* NB 'private' is different depending on what we're sending.... */ + /* NB 'private' is different depending on what we're sending.... */ - CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", - payload_nob, payload_niov, libcfs_id2str(target)); + CDEBUG(D_NET, "sending %d bytes in %d frags to %s\n", + payload_nob, payload_niov, libcfs_idstr(target)); - LASSERT (payload_nob == 0 || payload_niov > 0); - LASSERT (payload_niov <= LNET_MAX_IOV); + LASSERT(payload_nob == 0 || payload_niov > 0); /* Thread context */ - LASSERT (!in_interrupt()); + LASSERT(!in_interrupt()); + + tx = kiblnd_get_idle_tx(ni, &target->nid); + if (tx == NULL) { + CERROR("Can't allocate %s txd for %s\n", + lnet_msgtyp2str(type), + libcfs_nidstr(&target->nid)); + return -ENOMEM; + } + ibmsg = tx->tx_msg; + gpu = lnet_md_is_gpu(msg_md); switch (type) { default: LBUG(); return (-EIO); - case LNET_MSG_ACK: - LASSERT (payload_nob == 0); - break; + case LNET_MSG_ACK: + LASSERT(payload_nob == 0); + break; - case LNET_MSG_GET: - if (routing || target_is_router) - break; /* send IMMEDIATE */ + case LNET_MSG_GET: + if (routing || target_is_router) + break; /* send IMMEDIATE */ - /* is the REPLY message too small for RDMA? */ - nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]); - if (nob <= IBLND_MSG_SIZE) - break; /* send IMMEDIATE */ + /* is the REPLY message too small for RDMA? */ + nob = offsetof(struct kib_msg, + ibm_u.immediate.ibim_payload[msg_md->md_length]); + if (nob <= IBLND_MSG_SIZE && !gpu) + break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { - CERROR("Can't allocate txd for GET to %s\n", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } - - ibmsg = tx->tx_msg; rd = &ibmsg->ibm_u.get.ibgm_rd; + tx->tx_gpu = gpu; rc = kiblnd_setup_rd_kiov(ni, tx, rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_kiov, - 0, lntmsg->msg_md->md_length); + msg_md->md_niov, + msg_md->md_kiov, + 0, msg_md->md_length); if (rc != 0) { CERROR("Can't setup GET sink for %s: %d\n", - libcfs_nid2str(target.nid), rc); + libcfs_nidstr(&target->nid), rc); tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR; kiblnd_tx_done(tx); return -EIO; @@ -1667,127 +1792,150 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]); ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; - ibmsg->ibm_u.get.ibgm_hdr = *hdr; + lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.get.ibgm_hdr); - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); - tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); + tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); if (tx->tx_lntmsg[1] == NULL) { CERROR("Can't create reply for GET -> %s\n", - libcfs_nid2str(target.nid)); + libcfs_nidstr(&target->nid)); kiblnd_tx_done(tx); return -EIO; } - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */ - tx->tx_waiting = 1; /* waiting for GET_DONE */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; + /* finalise lntmsg[0,1] on completion */ + tx->tx_lntmsg[0] = lntmsg; + tx->tx_waiting = 1; /* waiting for GET_DONE */ + kiblnd_launch_tx(ni, tx, &target->nid); + return 0; - case LNET_MSG_REPLY: - case LNET_MSG_PUT: - /* Is the payload small enough not to need RDMA? */ - nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]); - if (nob <= IBLND_MSG_SIZE) - break; /* send IMMEDIATE */ + case LNET_MSG_REPLY: + case LNET_MSG_PUT: + /* Is the payload small enough not to need RDMA? */ + nob = offsetof(struct kib_msg, + ibm_u.immediate.ibim_payload[payload_nob]); + if (nob <= IBLND_MSG_SIZE && !gpu) + break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { - CERROR("Can't allocate %s txd for %s\n", - type == LNET_MSG_PUT ? "PUT" : "REPLY", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } + tx->tx_gpu = gpu; rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, payload_niov, payload_kiov, payload_offset, payload_nob); if (rc != 0) { CERROR("Can't setup PUT src for %s: %d\n", - libcfs_nid2str(target.nid), rc); + libcfs_nidstr(&target->nid), rc); kiblnd_tx_done(tx); return -EIO; } - ibmsg = tx->tx_msg; - ibmsg->ibm_u.putreq.ibprm_hdr = *hdr; - ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; + lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.putreq.ibprm_hdr); + ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(struct kib_putreq_msg)); - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ - tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; - } + /* finalise lntmsg[0,1] on completion */ + tx->tx_lntmsg[0] = lntmsg; + tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ + kiblnd_launch_tx(ni, tx, &target->nid); + return 0; + } /* send IMMEDIATE */ LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); - tx = kiblnd_get_idle_tx(ni, target.nid); - if (tx == NULL) { - CERROR ("Can't send %d to %s: tx descs exhausted\n", - type, libcfs_nid2str(target.nid)); - return -ENOMEM; - } + ibmsg = tx->tx_msg; + lnet_hdr_to_nid4(hdr, &ibmsg->ibm_u.immediate.ibim_hdr); - ibmsg = tx->tx_msg; - ibmsg->ibm_u.immediate.ibim_hdr = *hdr; + if (IS_FAST_REG_DEV(dev) && payload_nob) { + struct ib_rdma_wr *wrq; + int i; - lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, - offsetof(struct kib_msg, - ibm_u.immediate.ibim_payload), - payload_niov, payload_kiov, - payload_offset, payload_nob); + nob = offsetof(struct kib_immediate_msg, ibim_payload[0]); + wrq = kiblnd_init_tx_msg_payload(ni, tx, IBLND_MSG_IMMEDIATE, + nob, payload_nob); - nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); + rd = tx->tx_rd; + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + payload_niov, payload_kiov, + payload_offset, payload_nob); + if (rc != 0) { + CERROR("Can't setup IMMEDIATE src for %s: %d\n", + libcfs_nidstr(&target->nid), rc); + kiblnd_tx_done(tx); + return -EIO; + } - tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ - kiblnd_launch_tx(ni, tx, target.nid); - return 0; + /* lets generate a SGE chain */ + for (i = 0; i < rd->rd_nfrags; i++) { + kiblnd_init_tx_sge(tx, rd->rd_frags[i].rf_addr, + rd->rd_frags[i].rf_nob); + wrq->wr.num_sge++; + } + } else { + lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg, + offsetof(struct kib_msg, + ibm_u.immediate.ibim_payload), + payload_niov, payload_kiov, + payload_offset, payload_nob); + + nob = offsetof(struct kib_immediate_msg, + ibim_payload[payload_nob]); + + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); + } + + /* finalise lntmsg on completion */ + tx->tx_lntmsg[0] = lntmsg; + + kiblnd_launch_tx(ni, tx, &target->nid); + return 0; } static void kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) { - struct lnet_process_id target = lntmsg->msg_target; + struct lnet_processid *target = &lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; struct bio_vec *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; + struct lnet_libmd *msg_md = lntmsg->msg_md; struct kib_tx *tx; int rc; - tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); - if (tx == NULL) { - CERROR("Can't get tx for REPLY to %s\n", - libcfs_nid2str(target.nid)); - goto failed_0; - } + tx = kiblnd_get_idle_tx(ni, &rx->rx_conn->ibc_peer->ibp_nid); + if (tx == NULL) { + CERROR("Can't get tx for REPLY to %s\n", + libcfs_nidstr(&target->nid)); + goto failed_0; + } - if (nob == 0) - rc = 0; - else - rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, - niov, kiov, offset, nob); + tx->tx_gpu = lnet_md_is_gpu(msg_md); - if (rc != 0) { - CERROR("Can't setup GET src for %s: %d\n", - libcfs_nid2str(target.nid), rc); - goto failed_1; - } + if (nob == 0) + rc = 0; + else + rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, + niov, kiov, offset, nob); - rc = kiblnd_init_rdma(rx->rx_conn, tx, - IBLND_MSG_GET_DONE, nob, - &rx->rx_msg->ibm_u.get.ibgm_rd, - rx->rx_msg->ibm_u.get.ibgm_cookie); - if (rc < 0) { - CERROR("Can't setup rdma for GET from %s: %d\n", - libcfs_nid2str(target.nid), rc); - goto failed_1; - } + if (rc != 0) { + CERROR("Can't setup GET src for %s: %d\n", + libcfs_nidstr(&target->nid), rc); + goto failed_1; + } + + rc = kiblnd_init_rdma(rx->rx_conn, tx, + IBLND_MSG_GET_DONE, nob, + &rx->rx_msg->ibm_u.get.ibgm_rd, + rx->rx_msg->ibm_u.get.ibgm_cookie); + if (rc < 0) { + CERROR("Can't setup rdma for GET from %s: %d\n", + libcfs_nidstr(&target->nid), rc); + goto failed_1; + } if (nob == 0) { /* No RDMA: local completion may happen now! */ @@ -1798,8 +1946,8 @@ kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg) tx->tx_lntmsg[0] = lntmsg; } - kiblnd_queue_tx(tx, rx->rx_conn); - return; + kiblnd_queue_tx(tx, rx->rx_conn); + return; failed_1: @@ -1809,6 +1957,19 @@ failed_0: lnet_finalize(lntmsg, -EIO); } +unsigned int +kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx) +{ + struct kib_net *net = ni->ni_data; + struct device *dev = NULL; + + if (net) + dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device; + + return lnet_get_dev_prio(dev, dev_idx); + +} + int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, int delayed, unsigned int niov, struct bio_vec *kiov, @@ -1829,16 +1990,16 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, switch (rxmsg->ibm_type) { default: LBUG(); - - case IBLND_MSG_IMMEDIATE: + /* fallthrough */ + case IBLND_MSG_IMMEDIATE: nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]); - if (nob > rx->rx_nob) { - CERROR ("Immediate message from %s too big: %d(%d)\n", - libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid), - nob, rx->rx_nob); - rc = -EPROTO; - break; - } + if (nob > rx->rx_nob) { + CERROR("Immediate message from %s too big: %d(%d)\n", + libcfs_nidstr(&lntmsg->msg_hdr.src_nid), + nob, rx->rx_nob); + rc = -EPROTO; + break; + } lnet_copy_flat2kiov(niov, kiov, offset, IBLND_MSG_SIZE, rxmsg, @@ -1851,8 +2012,9 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, case IBLND_MSG_PUT_REQ: { struct kib_msg *txmsg; struct kib_rdma_desc *rd; - ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; + struct lnet_libmd *msg_md = lntmsg->msg_md; + ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; if (mlen == 0) { lnet_finalize(lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, @@ -1860,14 +2022,16 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, break; } - tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); - if (tx == NULL) { - CERROR("Can't allocate tx for %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); - /* Not replying will break the connection */ - rc = -ENOMEM; - break; - } + tx = kiblnd_get_idle_tx(ni, &conn->ibc_peer->ibp_nid); + if (tx == NULL) { + CERROR("Can't allocate tx for %s\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); + /* Not replying will break the connection */ + rc = -ENOMEM; + break; + } + + tx->tx_gpu = lnet_md_is_gpu(msg_md); txmsg = tx->tx_msg; rd = &txmsg->ibm_u.putack.ibpam_rd; @@ -1875,7 +2039,7 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, niov, kiov, offset, mlen); if (rc != 0) { CERROR("Can't setup PUT sink for %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); + libcfs_nidstr(&conn->ibc_peer->ibp_nid), rc); tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR; kiblnd_tx_done(tx); /* tell peer_ni it's over */ @@ -1916,18 +2080,6 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, return rc; } -int -kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) -{ - struct task_struct *task = kthread_run(fn, arg, name); - - if (IS_ERR(task)) - return PTR_ERR(task); - - atomic_inc(&kiblnd_data.kib_nthreads); - return 0; -} - static void kiblnd_thread_fini (void) { @@ -1945,7 +2097,7 @@ kiblnd_peer_alive(struct kib_peer_ni *peer_ni) static void kiblnd_peer_notify(struct kib_peer_ni *peer_ni) { - int error = 0; + int error = 0; time64_t last_alive = 0; unsigned long flags; @@ -1961,8 +2113,8 @@ kiblnd_peer_notify(struct kib_peer_ni *peer_ni) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (error != 0) - lnet_notify(peer_ni->ibp_ni, - peer_ni->ibp_nid, false, false, last_alive); + lnet_notify(peer_ni->ibp_ni, &peer_ni->ibp_nid, + false, false, last_alive); } void @@ -1991,20 +2143,21 @@ kiblnd_close_conn_locked(struct kib_conn *conn, int error) list_empty(&conn->ibc_tx_queue) && list_empty(&conn->ibc_tx_queue_rsrvd) && list_empty(&conn->ibc_tx_queue_nocred) && - list_empty(&conn->ibc_active_txs)) { - CDEBUG(D_NET, "closing conn to %s\n", - libcfs_nid2str(peer_ni->ibp_nid)); - } else { - CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", - libcfs_nid2str(peer_ni->ibp_nid), error, - list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - list_empty(&conn->ibc_tx_queue_rsrvd) ? + list_empty(&conn->ibc_active_txs)) + CDEBUG(D_NET, "closing conn %p to %s\n", + conn, libcfs_nidstr(&peer_ni->ibp_nid)); + else + CNETERR("Closing conn %p to %s: error %d%s%s%s%s%s\n", + conn, + libcfs_nidstr(&peer_ni->ibp_nid), error, + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? + "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - list_empty(&conn->ibc_tx_queue_nocred) ? - "" : "(sending_nocred)", - list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); - } + list_empty(&conn->ibc_tx_queue_nocred) ? + "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev; if (peer_ni->ibp_next_conn == conn) @@ -2060,9 +2213,9 @@ kiblnd_handle_early_rxs(struct kib_conn *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!list_empty(&conn->ibc_early_rxs)) { - rx = list_entry(conn->ibc_early_rxs.next, - struct kib_rx, rx_list); + while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs, + struct kib_rx, + rx_list)) != NULL) { list_del(&rx->rx_list); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); @@ -2137,10 +2290,10 @@ kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs) kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK); } -static int +static bool kiblnd_tx_may_discard(struct kib_conn *conn) { - int rc = 0; + bool rc = false; struct kib_tx *nxt; struct kib_tx *tx; @@ -2153,7 +2306,7 @@ kiblnd_tx_may_discard(struct kib_conn *conn) if (tx->tx_sending == 0) { kiblnd_conn_decref(tx->tx_conn); tx->tx_conn = NULL; - rc = 1; + rc = true; } } } @@ -2178,6 +2331,9 @@ kiblnd_finalise_conn(struct kib_conn *conn) /* Complete all tx descs not waiting for sends to complete. * NB we should be safe from RDMA now that the QP has changed state */ + CDEBUG(D_NET, "abort connection with %s\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); + kiblnd_abort_txs(conn, &conn->ibc_tx_noops); kiblnd_abort_txs(conn, &conn->ibc_tx_queue); kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd); @@ -2192,10 +2348,11 @@ kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active, int error) { LIST_HEAD(zombies); - unsigned long flags; + unsigned long flags; + enum lnet_msg_hstatus hstatus; - LASSERT (error != 0); - LASSERT (!in_interrupt()); + LASSERT(error != 0); + LASSERT(!in_interrupt()); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); @@ -2236,14 +2393,22 @@ kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active, return; CNETERR("Deleting messages for %s: connection failed\n", - libcfs_nid2str(peer_ni->ibp_nid)); + libcfs_nidstr(&peer_ni->ibp_nid)); - if (error == -EHOSTUNREACH || error == -ETIMEDOUT) - kiblnd_txlist_done(&zombies, error, - LNET_MSG_STATUS_NETWORK_TIMEOUT); - else - kiblnd_txlist_done(&zombies, error, - LNET_MSG_STATUS_LOCAL_DROPPED); + switch (error) { + case -EHOSTUNREACH: + case -ETIMEDOUT: + hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT; + break; + case -ECONNREFUSED: + hstatus = LNET_MSG_STATUS_REMOTE_DROPPED; + break; + default: + hstatus = LNET_MSG_STATUS_LOCAL_DROPPED; + break; + } + + kiblnd_txlist_done(&zombies, error, hstatus); } static void @@ -2258,7 +2423,7 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); CDEBUG(D_NET,"%s: active(%d), version(%x), status(%d)\n", - libcfs_nid2str(peer_ni->ibp_nid), active, + libcfs_nidstr(&peer_ni->ibp_nid), active, conn->ibc_version, status); LASSERT (!in_interrupt()); @@ -2280,9 +2445,6 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) /* connection established */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - /* reset retry count */ - peer_ni->ibp_retries = 0; - conn->ibc_last_send = ktime_get(); kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); kiblnd_peer_alive(peer_ni); @@ -2340,8 +2502,8 @@ kiblnd_connreq_done(struct kib_conn *conn, int status) * scheduled. We won't be using round robin on this first batch. */ spin_lock(&conn->ibc_lock); - while (!list_empty(&txs)) { - tx = list_entry(txs.next, struct kib_tx, tx_list); + while ((tx = list_first_entry_or_null(&txs, struct kib_tx, + tx_list)) != NULL) { list_del(&tx->tx_list); kiblnd_queue_tx_locked(tx, conn); @@ -2359,7 +2521,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej) { int rc; -#ifdef HAVE_RDMA_REJECT_4ARGS +#ifdef HAVE_OFED_RDMA_REJECT_4ARGS rc = rdma_reject(cmid, rej, sizeof(*rej), IB_CM_REJ_CONSUMER_DEFINED); #else rc = rdma_reject(cmid, rej, sizeof(*rej)); @@ -2372,7 +2534,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej) static int kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { - rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; struct kib_msg *reqmsg = priv; struct kib_msg *ackmsg; struct kib_dev *ibdev; @@ -2381,30 +2543,47 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) struct kib_conn *conn; struct lnet_ni *ni = NULL; struct kib_net *net = NULL; - lnet_nid_t nid; - struct rdma_conn_param cp; + struct lnet_nid destnid; + struct lnet_nid srcnid; + struct rdma_conn_param cp; struct kib_rej rej; - int version = IBLND_MSG_VERSION; - unsigned long flags; - int rc; - struct sockaddr_in *peer_addr; - LASSERT (!in_interrupt()); + int version = IBLND_MSG_VERSION; + unsigned long flags; + int port = PROT_SOCK, rc; + LASSERT(!in_interrupt()); /* cmid inherits 'context' from the corresponding listener id */ ibdev = cmid->context; LASSERT(ibdev); - memset(&rej, 0, sizeof(rej)); - rej.ibr_magic = IBLND_MSG_MAGIC; - rej.ibr_why = IBLND_REJECT_FATAL; - rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; - - peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); - if (*kiblnd_tunables.kib_require_priv_port && - ntohs(peer_addr->sin_port) >= PROT_SOCK) { - __u32 ip = ntohl(peer_addr->sin_addr.s_addr); - CERROR("peer_ni's port (%pI4h:%hu) is not privileged\n", - &ip, ntohs(peer_addr->sin_port)); + memset(&rej, 0, sizeof(rej)); + rej.ibr_magic = IBLND_MSG_MAGIC; + rej.ibr_why = IBLND_REJECT_FATAL; + rej.ibr_cp.ibcp_max_msg_size = IBLND_MSG_SIZE; + + if (*kiblnd_tunables.kib_require_priv_port) { + switch (cmid->route.addr.dst_addr.ss_family) { + case AF_INET6: { + struct sockaddr_in6 *sa; + + sa = (struct sockaddr_in6 *)&(cmid->route.addr.dst_addr); + port = ntohs(sa->sin6_port); + break; + } + case AF_INET: { + struct sockaddr_in *sa; + + sa = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); + port = ntohs(sa->sin_port); + break; + } + default: + break; + } + } + if (port >= PROT_SOCK) { + CERROR("peer_ni's port (%pISc:%hu) is not privileged\n", + &cmid->route.addr.dst_addr, port); goto failed; } @@ -2437,28 +2616,52 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - nid = reqmsg->ibm_srcnid; - ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid); + lnet_nid4_to_nid(reqmsg->ibm_srcnid, &srcnid); + lnet_nid4_to_nid(reqmsg->ibm_dstnid, &destnid); + ni = lnet_nid_to_ni_addref(&destnid); if (ni != NULL) { net = (struct kib_net *)ni->ni_data; rej.ibr_incarnation = net->ibn_incarnation; + } else { + if (ibdev->ibd_nnets == 0) { + rej.ibr_why = IBLND_REJECT_EARLY; + CNETERR("Can't accept conn from %s (%s:%d:%pISc): net for nid %s not added yet\n", + libcfs_nidstr(&srcnid), + ibdev->ibd_ifname, ibdev->ibd_nnets, + &ibdev->ibd_addr, + libcfs_nidstr(&destnid)); + goto failed; + } + list_for_each_entry(net, &ibdev->ibd_nets, ibn_list) { + if ((net->ibn_dev == ibdev) && + (net->ibn_ni != NULL) && + (net->ibn_ni->ni_state != LNET_NI_STATE_ACTIVE)) { + rej.ibr_why = IBLND_REJECT_EARLY; + CNETERR("Can't accept conn from %s on %s (%s:%d:%pISc): nid %s not ready\n", + libcfs_nidstr(&srcnid), + libcfs_nidstr(&net->ibn_ni->ni_nid), + ibdev->ibd_ifname, ibdev->ibd_nnets, + &ibdev->ibd_addr, + libcfs_nidstr(&destnid)); + goto failed; + } + } } - if (ni == NULL || /* no matching net */ - ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ - net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): " - "bad dst nid %s\n", libcfs_nid2str(nid), - ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), + if (ni == NULL || /* no matching net */ + !nid_same(&ni->ni_nid, &destnid) || /* right NET, wrong NID! */ + net->ibn_dev != ibdev) { /* wrong device */ + CERROR("Can't accept conn from %s on %s (%s:%d:%pISc): bad dst nid %s\n", + libcfs_nidstr(&srcnid), + ni ? libcfs_nidstr(&ni->ni_nid) : "NA", ibdev->ibd_ifname, ibdev->ibd_nnets, - &ibdev->ibd_ifip, - libcfs_nid2str(reqmsg->ibm_dstnid)); - + &ibdev->ibd_addr, + libcfs_nidstr(&destnid)); goto failed; } - /* check time stamp as soon as possible */ + /* check time stamp as soon as possible */ if (reqmsg->ibm_dststamp != 0 && reqmsg->ibm_dststamp != net->ibn_incarnation) { CWARN("Stale connection request\n"); @@ -2471,15 +2674,14 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (reqmsg->ibm_type != IBLND_MSG_CONNREQ) { CERROR("Unexpected connreq msg type: %x from %s\n", - reqmsg->ibm_type, libcfs_nid2str(nid)); + reqmsg->ibm_type, libcfs_nidstr(&srcnid)); goto failed; } if (reqmsg->ibm_u.connparams.ibcp_queue_depth > kiblnd_msg_queue_size(version, ni)) { - CERROR("Can't accept conn from %s, queue depth too large: " - " %d (<=%d wanted)\n", - libcfs_nid2str(nid), + CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n", + libcfs_nidstr(&srcnid), reqmsg->ibm_u.connparams.ibcp_queue_depth, kiblnd_msg_queue_size(version, ni)); @@ -2491,9 +2693,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (reqmsg->ibm_u.connparams.ibcp_max_frags > IBLND_MAX_RDMA_FRAGS) { - CWARN("Can't accept conn from %s (version %x): " - "max_frags %d too large (%d wanted)\n", - libcfs_nid2str(nid), version, + CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n", + libcfs_nidstr(&srcnid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, IBLND_MAX_RDMA_FRAGS); @@ -2504,10 +2705,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < IBLND_MAX_RDMA_FRAGS && net->ibn_fmr_ps == NULL) { - CWARN("Can't accept conn from %s (version %x): " - "max_frags %d incompatible without FMR pool " - "(%d wanted)\n", - libcfs_nid2str(nid), version, + CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n", + libcfs_nidstr(&srcnid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, IBLND_MAX_RDMA_FRAGS); @@ -2517,18 +2716,18 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { - CERROR("Can't accept %s: message size %d too big (%d max)\n", - libcfs_nid2str(nid), - reqmsg->ibm_u.connparams.ibcp_max_msg_size, - IBLND_MSG_SIZE); - goto failed; - } + if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { + CERROR("Can't accept %s: message size %d too big (%d max)\n", + libcfs_nidstr(&srcnid), + reqmsg->ibm_u.connparams.ibcp_max_msg_size, + IBLND_MSG_SIZE); + goto failed; + } /* assume 'nid' is a new peer_ni; create */ - rc = kiblnd_create_peer(ni, &peer_ni, nid); + rc = kiblnd_create_peer(ni, &peer_ni, &srcnid); if (rc != 0) { - CERROR("Can't create peer_ni for %s\n", libcfs_nid2str(nid)); + CERROR("Can't create peer_ni for %s\n", libcfs_nidstr(&srcnid)); rej.ibr_why = IBLND_REJECT_NO_RESOURCES; goto failed; } @@ -2539,16 +2738,16 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) write_lock_irqsave(g_lock, flags); - peer2 = kiblnd_find_peer_locked(ni, nid); - if (peer2 != NULL) { - if (peer2->ibp_version == 0) { - peer2->ibp_version = version; - peer2->ibp_incarnation = reqmsg->ibm_srcstamp; - } + peer2 = kiblnd_find_peer_locked(ni, &srcnid); + if (peer2 != NULL) { + if (peer2->ibp_version == 0) { + peer2->ibp_version = version; + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + } - /* not the guy I've talked with */ - if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || - peer2->ibp_version != version) { + /* not the guy I've talked with */ + if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || + peer2->ibp_version != version) { kiblnd_close_peer_conns_locked(peer2, -ESTALE); if (kiblnd_peer_active(peer2)) { @@ -2558,13 +2757,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) write_unlock_irqrestore(g_lock, flags); CWARN("Conn stale %s version %x/%x incarnation %llu/%llu\n", - libcfs_nid2str(nid), peer2->ibp_version, version, + libcfs_nidstr(&srcnid), peer2->ibp_version, version, peer2->ibp_incarnation, reqmsg->ibm_srcstamp); - kiblnd_peer_decref(peer_ni); - rej.ibr_why = IBLND_REJECT_CONN_STALE; - goto failed; - } + kiblnd_peer_decref(peer_ni); + rej.ibr_why = IBLND_REJECT_CONN_STALE; + goto failed; + } /* Tie-break connection race in favour of the higher NID. * If we keep running into a race condition multiple times, @@ -2574,13 +2773,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) * the lower NID connection win so we can move forward. */ if (peer2->ibp_connecting != 0 && - nid < ni->ni_nid && peer2->ibp_races < - MAX_CONN_RACES_BEFORE_ABORT) { + nidhash(&srcnid) < nidhash(&ni->ni_nid) && + peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) { peer2->ibp_races++; write_unlock_irqrestore(g_lock, flags); CDEBUG(D_NET, "Conn race %s\n", - libcfs_nid2str(peer2->ibp_nid)); + libcfs_nidstr(&peer2->ibp_nid)); kiblnd_peer_decref(peer_ni); rej.ibr_why = IBLND_REJECT_CONN_RACE; @@ -2588,7 +2787,7 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } if (peer2->ibp_races >= MAX_CONN_RACES_BEFORE_ABORT) CNETERR("Conn race %s: unresolved after %d attempts, letting lower NID win\n", - libcfs_nid2str(peer2->ibp_nid), + libcfs_nidstr(&peer2->ibp_nid), MAX_CONN_RACES_BEFORE_ABORT); /* * passive connection is allowed even this peer_ni is waiting for @@ -2606,78 +2805,82 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) peer2->ibp_queue_depth = peer_ni->ibp_queue_depth; write_unlock_irqrestore(g_lock, flags); - kiblnd_peer_decref(peer_ni); - peer_ni = peer2; - } else { - /* Brand new peer_ni */ - LASSERT (peer_ni->ibp_accepting == 0); - LASSERT (peer_ni->ibp_version == 0 && - peer_ni->ibp_incarnation == 0); + kiblnd_peer_decref(peer_ni); + peer_ni = peer2; + } else { + /* Brand new peer_ni */ + LASSERT(peer_ni->ibp_accepting == 0); + LASSERT(peer_ni->ibp_version == 0 && + peer_ni->ibp_incarnation == 0); - peer_ni->ibp_accepting = 1; - peer_ni->ibp_version = version; - peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp; + peer_ni->ibp_accepting = 1; + peer_ni->ibp_version = version; + peer_ni->ibp_incarnation = reqmsg->ibm_srcstamp; - /* I have a ref on ni that prevents it being shutdown */ - LASSERT (net->ibn_shutdown == 0); + /* I have a ref on ni that prevents it being shutdown */ + LASSERT(net->ibn_shutdown == 0); - kiblnd_peer_addref(peer_ni); - list_add_tail(&peer_ni->ibp_list, kiblnd_nid2peerlist(nid)); + kiblnd_peer_addref(peer_ni); + hash_add(kiblnd_data.kib_peers, &peer_ni->ibp_list, + nidhash(&srcnid)); write_unlock_irqrestore(g_lock, flags); - } + } - conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, version); - if (conn == NULL) { - kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM); - kiblnd_peer_decref(peer_ni); - rej.ibr_why = IBLND_REJECT_NO_RESOURCES; - goto failed; - } + conn = kiblnd_create_conn(peer_ni, cmid, IBLND_CONN_PASSIVE_WAIT, + version); + if (!conn) { + kiblnd_peer_connect_failed(peer_ni, 0, -ENOMEM); + kiblnd_peer_decref(peer_ni); + rej.ibr_why = IBLND_REJECT_NO_RESOURCES; + goto failed; + } - /* conn now "owns" cmid, so I return success from here on to ensure the - * CM callback doesn't destroy cmid. */ + /* conn now "owns" cmid, so I return success from here on to ensure the + * CM callback doesn't destroy cmid. + */ conn->ibc_incarnation = reqmsg->ibm_srcstamp; conn->ibc_credits = conn->ibc_queue_depth; conn->ibc_reserved_credits = conn->ibc_queue_depth; LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); - ackmsg = &conn->ibc_connvars->cv_msg; - memset(ackmsg, 0, sizeof(*ackmsg)); + ackmsg = &conn->ibc_connvars->cv_msg; + memset(ackmsg, 0, sizeof(*ackmsg)); - kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, - sizeof(ackmsg->ibm_u.connparams)); + kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, + sizeof(ackmsg->ibm_u.connparams)); ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); + kiblnd_pack_msg(ni, ackmsg, version, 0, &srcnid, reqmsg->ibm_srcstamp); - memset(&cp, 0, sizeof(cp)); - cp.private_data = ackmsg; - cp.private_data_len = ackmsg->ibm_nob; - cp.responder_resources = 0; /* No atomic ops or RDMA reads */ - cp.initiator_depth = 0; - cp.flow_control = 1; - cp.retry_count = *kiblnd_tunables.kib_retry_count; - cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; + memset(&cp, 0, sizeof(cp)); + cp.private_data = ackmsg; + cp.private_data_len = ackmsg->ibm_nob; + cp.responder_resources = 0; /* No atomic ops or RDMA reads */ + cp.initiator_depth = 0; + cp.flow_control = 1; + cp.retry_count = *kiblnd_tunables.kib_retry_count; + cp.rnr_retry_count = *kiblnd_tunables.kib_rnr_retry_count; - CDEBUG(D_NET, "Accept %s\n", libcfs_nid2str(nid)); + CDEBUG(D_NET, "Accept %s conn %p\n", libcfs_nidstr(&srcnid), conn); - rc = rdma_accept(cmid, &cp); - if (rc != 0) { - CERROR("Can't accept %s: %d\n", libcfs_nid2str(nid), rc); - rej.ibr_version = version; - rej.ibr_why = IBLND_REJECT_FATAL; + rc = rdma_accept(cmid, &cp); + if (rc != 0) { + CNETERR("Can't accept %s: %d cm_id %p\n", + libcfs_nidstr(&srcnid), rc, cmid); + rej.ibr_version = version; + rej.ibr_why = IBLND_REJECT_FATAL; - kiblnd_reject(cmid, &rej); - kiblnd_connreq_done(conn, rc); - kiblnd_conn_decref(conn); - } + kiblnd_reject(cmid, &rej); + kiblnd_connreq_done(conn, rc); + kiblnd_conn_decref(conn); + } - lnet_ni_decref(ni); - return 0; + lnet_ni_decref(ni); + return 0; failed: if (ni != NULL) { @@ -2731,34 +2934,17 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, goto out; } - if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) { - reason = "retry count exceeded due to no listener"; - goto out; - } - switch (why) { default: reason = "Unknown"; break; case IBLND_REJECT_RDMA_FRAGS: { - struct lnet_ioctl_config_o2iblnd_tunables *tunables; - if (!cp) { reason = "can't negotiate max frags"; goto out; } - tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; -#ifdef HAVE_IB_GET_DMA_MR - /* - * This check only makes sense if the kernel supports global - * memory registration. Otherwise, map_on_demand will never == 0 - */ - if (!tunables->lnd_map_on_demand) { - reason = "map_on_demand must be enabled"; - goto out; - } -#endif + if (conn->ibc_max_frags <= frag_num) { reason = "unsupported max frags"; goto out; @@ -2793,10 +2979,6 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, case IBLND_REJECT_CONN_UNCOMPAT: reason = "version negotiation"; break; - - case IBLND_REJECT_INVALID_SRV_ID: - reason = "invalid service id"; - break; } conn->ibc_reconnect = 1; @@ -2808,7 +2990,7 @@ kiblnd_check_reconnect(struct kib_conn *conn, int version, write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), reconnect ? "reconnect" : "don't reconnect", reason, IBLND_MSG_VERSION, version, msg_size, conn->ibc_queue_depth, queue_dep, @@ -2823,6 +3005,7 @@ static void kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob) { struct kib_peer_ni *peer_ni = conn->ibc_peer; + int status = -ECONNREFUSED; LASSERT (!in_interrupt()); LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2834,117 +3017,123 @@ kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob) break; case IB_CM_REJ_INVALID_SERVICE_ID: - peer_ni->ibp_retries++; - kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_INVALID_SRV_ID, NULL); + status = -EHOSTUNREACH; CNETERR("%s rejected: no listener at %d\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), *kiblnd_tunables.kib_service); break; - case IB_CM_REJ_CONSUMER_DEFINED: + case IB_CM_REJ_CONSUMER_DEFINED: if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) { struct kib_rej *rej = priv; struct kib_connparams *cp = NULL; - int flip = 0; - __u64 incarnation = -1; - - /* NB. default incarnation is -1 because: - * a) V1 will ignore dst incarnation in connreq. - * b) V2 will provide incarnation while rejecting me, - * -1 will be overwrote. - * - * if I try to connect to a V1 peer_ni with V2 protocol, - * it rejected me then upgrade to V2, I have no idea - * about the upgrading and try to reconnect with V1, - * in this case upgraded V2 can find out I'm trying to - * talk to the old guy and reject me(incarnation is -1). - */ - - if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) || - rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) { - __swab32s(&rej->ibr_magic); - __swab16s(&rej->ibr_version); - flip = 1; - } + bool flip = false; + __u64 incarnation = -1; + + /* NB. default incarnation is -1 because: + * a) V1 will ignore dst incarnation in connreq. + * b) V2 will provide incarnation while rejecting me, + * -1 will be overwrote. + * + * if I try to connect to a V1 peer_ni with V2 protocol, + * it rejected me then upgrade to V2, I have no idea + * about the upgrading and try to reconnect with V1, + * in this case upgraded V2 can find out I'm trying to + * talk to the old guy and reject me(incarnation is -1). + */ + + if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) || + rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) { + __swab32s(&rej->ibr_magic); + __swab16s(&rej->ibr_version); + flip = true; + } if (priv_nob >= sizeof(struct kib_rej) && - rej->ibr_version > IBLND_MSG_VERSION_1) { - /* priv_nob is always 148 in current version - * of OFED, so we still need to check version. - * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */ - cp = &rej->ibr_cp; - - if (flip) { - __swab64s(&rej->ibr_incarnation); - __swab16s(&cp->ibcp_queue_depth); - __swab16s(&cp->ibcp_max_frags); - __swab32s(&cp->ibcp_max_msg_size); - } - - incarnation = rej->ibr_incarnation; - } - - if (rej->ibr_magic != IBLND_MSG_MAGIC && - rej->ibr_magic != LNET_PROTO_MAGIC) { - CERROR("%s rejected: consumer defined fatal error\n", - libcfs_nid2str(peer_ni->ibp_nid)); - break; - } - - if (rej->ibr_version != IBLND_MSG_VERSION && - rej->ibr_version != IBLND_MSG_VERSION_1) { - CERROR("%s rejected: o2iblnd version %x error\n", - libcfs_nid2str(peer_ni->ibp_nid), - rej->ibr_version); - break; - } - - if (rej->ibr_why == IBLND_REJECT_FATAL && - rej->ibr_version == IBLND_MSG_VERSION_1) { - CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n", - libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version); - - if (conn->ibc_version != IBLND_MSG_VERSION_1) - rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; - } - - switch (rej->ibr_why) { - case IBLND_REJECT_CONN_RACE: - case IBLND_REJECT_CONN_STALE: - case IBLND_REJECT_CONN_UNCOMPAT: + rej->ibr_version > IBLND_MSG_VERSION_1) { + /* priv_nob is always 148 in current version + * of OFED, so we still need to check version. + * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) + */ + cp = &rej->ibr_cp; + + if (flip) { + __swab64s(&rej->ibr_incarnation); + __swab16s(&cp->ibcp_queue_depth); + __swab16s(&cp->ibcp_max_frags); + __swab32s(&cp->ibcp_max_msg_size); + } + + incarnation = rej->ibr_incarnation; + } + + if (rej->ibr_magic != IBLND_MSG_MAGIC && + rej->ibr_magic != LNET_PROTO_MAGIC) { + CERROR("%s rejected: consumer defined fatal error\n", + libcfs_nidstr(&peer_ni->ibp_nid)); + break; + } + + if (rej->ibr_version != IBLND_MSG_VERSION && + rej->ibr_version != IBLND_MSG_VERSION_1) { + CERROR("%s rejected: o2iblnd version %x error\n", + libcfs_nidstr(&peer_ni->ibp_nid), + rej->ibr_version); + break; + } + + if (rej->ibr_why == IBLND_REJECT_FATAL && + rej->ibr_version == IBLND_MSG_VERSION_1) { + CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n", + libcfs_nidstr(&peer_ni->ibp_nid), + rej->ibr_version); + + if (conn->ibc_version != IBLND_MSG_VERSION_1) + rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT; + } + + switch (rej->ibr_why) { + case IBLND_REJECT_CONN_RACE: + case IBLND_REJECT_CONN_STALE: + case IBLND_REJECT_CONN_UNCOMPAT: case IBLND_REJECT_MSG_QUEUE_SIZE: case IBLND_REJECT_RDMA_FRAGS: kiblnd_check_reconnect(conn, rej->ibr_version, - incarnation, rej->ibr_why, cp); - break; - - case IBLND_REJECT_NO_RESOURCES: - CERROR("%s rejected: o2iblnd no resources\n", - libcfs_nid2str(peer_ni->ibp_nid)); - break; - - case IBLND_REJECT_FATAL: - CERROR("%s rejected: o2iblnd fatal error\n", - libcfs_nid2str(peer_ni->ibp_nid)); - break; - - default: - CERROR("%s rejected: o2iblnd reason %d\n", - libcfs_nid2str(peer_ni->ibp_nid), - rej->ibr_why); - break; - } - break; - } - /* fall through */ - default: - CNETERR("%s rejected: reason %d, size %d\n", - libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob); - break; - } + incarnation, + rej->ibr_why, cp); + break; + + case IBLND_REJECT_NO_RESOURCES: + CERROR("%s rejected: o2iblnd no resources\n", + libcfs_nidstr(&peer_ni->ibp_nid)); + break; + + case IBLND_REJECT_FATAL: + CERROR("%s rejected: o2iblnd fatal error\n", + libcfs_nidstr(&peer_ni->ibp_nid)); + break; + + case IBLND_REJECT_EARLY: + CNETERR("%s rejected: tried too early\n", + libcfs_nidstr(&peer_ni->ibp_nid)); + break; - kiblnd_connreq_done(conn, -ECONNREFUSED); + default: + CERROR("%s rejected: o2iblnd reason %d\n", + libcfs_nidstr(&peer_ni->ibp_nid), + rej->ibr_why); + break; + } + break; + } + fallthrough; + default: + CNETERR("%s rejected: reason %d, size %d\n", + libcfs_nidstr(&peer_ni->ibp_nid), reason, priv_nob); + break; + } + + kiblnd_connreq_done(conn, status); } static void @@ -2962,13 +3151,13 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (rc != 0) { CERROR("Can't unpack connack from %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), rc); + libcfs_nidstr(&peer_ni->ibp_nid), rc); goto failed; } if (msg->ibm_type != IBLND_MSG_CONNACK) { CERROR("Unexpected message %d from %s\n", - msg->ibm_type, libcfs_nid2str(peer_ni->ibp_nid)); + msg->ibm_type, libcfs_nidstr(&peer_ni->ibp_nid)); rc = -EPROTO; goto failed; } @@ -2976,7 +3165,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (ver != msg->ibm_version) { CERROR("%s replied version %x is different with " "requested version %x\n", - libcfs_nid2str(peer_ni->ibp_nid), msg->ibm_version, ver); + libcfs_nidstr(&peer_ni->ibp_nid), msg->ibm_version, ver); rc = -EPROTO; goto failed; } @@ -2984,7 +3173,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_queue_depth > conn->ibc_queue_depth) { CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_queue_depth, conn->ibc_queue_depth); rc = -EPROTO; @@ -2994,7 +3183,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_max_frags > conn->ibc_max_frags) { CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_max_frags, conn->ibc_max_frags); rc = -EPROTO; @@ -3003,7 +3192,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("%s max message size %d too big (%d max)\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), msg->ibm_u.connparams.ibcp_max_msg_size, IBLND_MSG_SIZE); rc = -EPROTO; @@ -3011,7 +3200,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) } read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (msg->ibm_dstnid == ni->ni_nid && + if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) && msg->ibm_dststamp == net->ibn_incarnation) rc = 0; else @@ -3021,7 +3210,7 @@ kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob) if (rc != 0) { CERROR("Bad connection reply from %s, rc = %d, " "version: %x max_frags: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), rc, + libcfs_nidstr(&peer_ni->ibp_nid), rc, msg->ibm_version, msg->ibm_u.connparams.ibcp_max_frags); goto failed; } @@ -3089,7 +3278,7 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(peer_ni->ibp_ni, msg, version, - 0, peer_ni->ibp_nid, incarnation); + 0, &peer_ni->ibp_nid, incarnation); memset(&cp, 0, sizeof(cp)); cp.private_data = msg; @@ -3102,18 +3291,34 @@ kiblnd_active_connect(struct rdma_cm_id *cmid) LASSERT(cmid->context == (void *)conn); LASSERT(conn->ibc_cmid == cmid); - - rc = rdma_connect(cmid, &cp); + rc = rdma_connect_locked(cmid, &cp); if (rc != 0) { - CERROR("Can't connect to %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), rc); + CNETERR("Can't connect to %s: %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), rc, cmid); kiblnd_connreq_done(conn, rc); kiblnd_conn_decref(conn); - } + } else { + CDEBUG(D_NET, "Connected to %s: cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), cmid); + } return 0; } +/* set the IP ToS ("Type of Service") used by the RoCE QoS */ +static void +kiblnd_set_tos(struct rdma_cm_id *cmid) +{ + struct kib_peer_ni *peer_ni = cmid->context; + struct lnet_ioctl_config_o2iblnd_tunables *t; + + t = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib; + if (t->lnd_tos < 0) + return; + + rdma_set_service_type(cmid, t->lnd_tos); +} + int kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) { @@ -3132,13 +3337,13 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) rc = kiblnd_passive_connect(cmid, (void *)KIBLND_CONN_PARAM(event), KIBLND_CONN_PARAM_LEN(event)); - CDEBUG(D_NET, "connreq: %d\n", rc); + CDEBUG(D_NET, "connreq: %d cm_id %p\n", rc, cmid); return rc; case RDMA_CM_EVENT_ADDR_ERROR: peer_ni = cmid->context; - CNETERR("%s: ADDR ERROR %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + CNETERR("%s: ADDR ERROR %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), event->status, cmid); kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ @@ -3146,32 +3351,34 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ADDR_RESOLVED: peer_ni = cmid->context; - CDEBUG(D_NET,"%s Addr resolved: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + CDEBUG(D_NET, "%s Addr resolved: %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), event->status, cmid); if (event->status != 0) { - CNETERR("Can't resolve address for %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + CNETERR("Can't resolve address for %s: %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), + event->status, cmid); rc = event->status; } else { + kiblnd_set_tos(cmid); rc = rdma_resolve_route( cmid, kiblnd_timeout() * 1000); if (rc == 0) { struct kib_net *net = peer_ni->ibp_ni->ni_data; struct kib_dev *dev = net->ibn_dev; - CDEBUG(D_NET, "%s: connection bound to "\ - "%s:%pI4h:%s\n", - libcfs_nid2str(peer_ni->ibp_nid), - dev->ibd_ifname, - &dev->ibd_ifip, cmid->device->name); + CDEBUG(D_NET, + "%s: connection bound to %s:%pISc:%s\n", + libcfs_nidstr(&peer_ni->ibp_nid), + dev->ibd_ifname, &dev->ibd_addr, + cmid->device->name); return 0; } /* Can't initiate route resolution */ - CERROR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), rc); + CNETERR("Can't resolve route for %s: %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), rc, cmid); } kiblnd_peer_connect_failed(peer_ni, 1, rc); kiblnd_peer_decref(peer_ni); @@ -3179,8 +3386,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ROUTE_ERROR: peer_ni = cmid->context; - CNETERR("%s: ROUTE ERROR %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + CNETERR("%s: ROUTE ERROR %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), event->status, cmid); kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH); kiblnd_peer_decref(peer_ni); return -EHOSTUNREACH; /* rc != 0 destroys cmid */ @@ -3188,36 +3395,41 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_ROUTE_RESOLVED: peer_ni = cmid->context; CDEBUG(D_NET,"%s Route resolved: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + libcfs_nidstr(&peer_ni->ibp_nid), event->status); if (event->status == 0) return kiblnd_active_connect(cmid); - CNETERR("Can't resolve route for %s: %d\n", - libcfs_nid2str(peer_ni->ibp_nid), event->status); + CNETERR("Can't resolve route for %s: %d cm_id %p\n", + libcfs_nidstr(&peer_ni->ibp_nid), event->status, cmid); kiblnd_peer_connect_failed(peer_ni, 1, event->status); kiblnd_peer_decref(peer_ni); return event->status; /* rc != 0 destroys cmid */ case RDMA_CM_EVENT_UNREACHABLE: conn = cmid->context; - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || - conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CNETERR("%s: UNREACHABLE %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); - kiblnd_connreq_done(conn, -ENETDOWN); - kiblnd_conn_decref(conn); + CNETERR("%s: UNREACHABLE %d cm_id %p conn %p ibc_state: %d\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + event->status, cmid, conn, conn->ibc_state); + LASSERT(conn->ibc_state != IBLND_CONN_INIT); + if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || + conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) { + kiblnd_connreq_done(conn, -ENETDOWN); + kiblnd_conn_decref(conn); + } return 0; case RDMA_CM_EVENT_CONNECT_ERROR: conn = cmid->context; - LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || - conn->ibc_state == IBLND_CONN_PASSIVE_WAIT); - CNETERR("%s: CONNECT ERROR %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->status); - kiblnd_connreq_done(conn, -ENOTCONN); - kiblnd_conn_decref(conn); - return 0; + CNETERR("%s: CONNECT ERROR %d cm_id %p conn %p state: %d\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + event->status, cmid, conn, conn->ibc_state); + if (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT || + conn->ibc_state == IBLND_CONN_PASSIVE_WAIT) { + kiblnd_connreq_done(conn, -ENOTCONN); + kiblnd_conn_decref(conn); + } + return 0; case RDMA_CM_EVENT_REJECTED: conn = cmid->context; @@ -3226,9 +3438,9 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LBUG(); case IBLND_CONN_PASSIVE_WAIT: - CERROR ("%s: REJECTED %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - event->status); + CERROR("%s: REJECTED %d cm_id %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + event->status, cmid); kiblnd_connreq_done(conn, -ECONNRESET); break; @@ -3248,14 +3460,14 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) LBUG(); case IBLND_CONN_PASSIVE_WAIT: - CDEBUG(D_NET, "ESTABLISHED (passive): %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_NET, "ESTABLISHED (passive): %s cm_id %p conn %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), cmid, conn); kiblnd_connreq_done(conn, 0); break; case IBLND_CONN_ACTIVE_CONNECT: - CDEBUG(D_NET, "ESTABLISHED(active): %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CDEBUG(D_NET, "ESTABLISHED(active): %s cm_id %p conn %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), cmid, conn); kiblnd_check_connreply(conn, (void *)KIBLND_CONN_PARAM(event), KIBLND_CONN_PARAM_LEN(event)); @@ -3271,8 +3483,8 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) case RDMA_CM_EVENT_DISCONNECTED: conn = cmid->context; if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - CERROR("%s DISCONNECTED\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + CERROR("%s DISCONNECTED cm_id %p conn %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), cmid, conn); kiblnd_connreq_done(conn, -ECONNRESET); } else { kiblnd_close_conn(conn, 0); @@ -3299,11 +3511,8 @@ static int kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs) { struct kib_tx *tx; - struct list_head *ttmp; - - list_for_each(ttmp, txs) { - tx = list_entry(ttmp, struct kib_tx, tx_list); + list_for_each_entry(tx, txs, tx_list) { if (txs != &conn->ibc_active_txs) { LASSERT(tx->tx_queued); } else { @@ -3341,39 +3550,34 @@ kiblnd_check_conns (int idx) LIST_HEAD(closes); LIST_HEAD(checksends); LIST_HEAD(timedout_txs); - struct list_head *peers = &kiblnd_data.kib_peers[idx]; - struct list_head *ptmp; + struct hlist_head *peers = &kiblnd_data.kib_peers[idx]; struct kib_peer_ni *peer_ni; - struct kib_conn *conn; + struct kib_conn *conn; struct kib_tx *tx, *tx_tmp; - struct list_head *ctmp; - unsigned long flags; + unsigned long flags; /* NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we - * take a look... */ + * take a look... + */ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - list_for_each(ptmp, peers) { - peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list); - + hlist_for_each_entry(peer_ni, peers, ibp_list) { /* Check tx_deadline */ list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) { if (ktime_compare(ktime_get(), tx->tx_deadline) >= 0) { CWARN("Timed out tx for %s: %lld seconds\n", - libcfs_nid2str(peer_ni->ibp_nid), + libcfs_nidstr(&peer_ni->ibp_nid), ktime_ms_delta(ktime_get(), tx->tx_deadline) / MSEC_PER_SEC); list_move(&tx->tx_list, &timedout_txs); } } - list_for_each(ctmp, &peer_ni->ibp_conns) { + list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) { int timedout; int sendnoop; - conn = list_entry(ctmp, struct kib_conn, ibc_list); - LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); spin_lock(&conn->ibc_lock); @@ -3386,13 +3590,16 @@ kiblnd_check_conns (int idx) } if (timedout) { - CERROR("Timed out RDMA with %s (%lld): " - "c: %u, oc: %u, rc: %u\n", - libcfs_nid2str(peer_ni->ibp_nid), - ktime_get_seconds() - peer_ni->ibp_last_alive, + CERROR("Timed out RDMA with %s (%lld): c: %u, oc: %u, rc: %u\n", + libcfs_nidstr(&peer_ni->ibp_nid), + ktime_get_seconds() + - peer_ni->ibp_last_alive, conn->ibc_credits, conn->ibc_outstanding_credits, conn->ibc_reserved_credits); +#ifdef O2IBLND_CONN_STATE_DEBUG + kiblnd_dump_conn_dbg(conn); +#endif list_add(&conn->ibc_connd_list, &closes); } else { list_add(&conn->ibc_connd_list, &checksends); @@ -3412,10 +3619,11 @@ kiblnd_check_conns (int idx) /* Handle timeout by closing the whole * connection. We can only be sure RDMA activity - * has ceased once the QP has been modified. */ - while (!list_empty(&closes)) { - conn = list_entry(closes.next, - struct kib_conn, ibc_connd_list); + * has ceased once the QP has been modified. + */ + while ((conn = list_first_entry_or_null(&closes, + struct kib_conn, + ibc_connd_list)) != NULL) { list_del(&conn->ibc_connd_list); kiblnd_close_conn(conn, -ETIMEDOUT); kiblnd_conn_decref(conn); @@ -3423,10 +3631,11 @@ kiblnd_check_conns (int idx) /* In case we have enough credits to return via a * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - while (!list_empty(&checksends)) { - conn = list_entry(checksends.next, - struct kib_conn, ibc_connd_list); + * free to do it last time... + */ + while ((conn = list_first_entry_or_null(&checksends, + struct kib_conn, + ibc_connd_list)) != NULL) { list_del(&conn->ibc_connd_list); spin_lock(&conn->ibc_lock); @@ -3443,7 +3652,9 @@ kiblnd_disconnect_conn(struct kib_conn *conn) LASSERT (!in_interrupt()); LASSERT (current == kiblnd_data.kib_connd); LASSERT (conn->ibc_state == IBLND_CONN_CLOSING); - +#ifdef O2IBLND_CONN_STATE_DEBUG + kiblnd_dump_conn_dbg(conn); +#endif rdma_disconnect(conn->ibc_cmid); kiblnd_finalise_conn(conn); @@ -3464,17 +3675,17 @@ kiblnd_disconnect_conn(struct kib_conn *conn) int kiblnd_connd (void *arg) { - spinlock_t *lock= &kiblnd_data.kib_connd_lock; + spinlock_t *lock = &kiblnd_data.kib_connd_lock; wait_queue_entry_t wait; - unsigned long flags; + unsigned long flags; struct kib_conn *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; + int timeout; + int i; + bool dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; - init_waitqueue_entry(&wait, current); + init_wait(&wait); kiblnd_data.kib_connd = current; spin_lock_irqsave(lock, flags); @@ -3482,13 +3693,13 @@ kiblnd_connd (void *arg) while (!kiblnd_data.kib_shutdown) { int reconn = 0; - dropped_lock = 0; + dropped_lock = false; - if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies, + struct kib_conn, ibc_list); + if (conn) { struct kib_peer_ni *peer_ni = NULL; - conn = list_entry(kiblnd_data.kib_connd_zombies.next, - struct kib_conn, ibc_list); list_del(&conn->ibc_list); if (conn->ibc_reconnect) { peer_ni = conn->ibc_peer; @@ -3496,7 +3707,7 @@ kiblnd_connd (void *arg) } spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; + dropped_lock = true; kiblnd_destroy_conn(conn); @@ -3515,14 +3726,15 @@ kiblnd_connd (void *arg) &kiblnd_data.kib_reconn_wait); } - if (!list_empty(&kiblnd_data.kib_connd_conns)) { + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns, + struct kib_conn, ibc_list); + if (conn) { int wait; - conn = list_entry(kiblnd_data.kib_connd_conns.next, - struct kib_conn, ibc_list); + list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; + dropped_lock = true; kiblnd_disconnect_conn(conn); wait = conn->ibc_waits; @@ -3534,7 +3746,7 @@ kiblnd_connd (void *arg) if (wait) list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_waits); - } + } while (reconn < KIB_RECONN_BREAK) { if (kiblnd_data.kib_reconn_sec != @@ -3544,15 +3756,15 @@ kiblnd_connd (void *arg) &kiblnd_data.kib_reconn_list); } - if (list_empty(&kiblnd_data.kib_reconn_list)) + conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list, + struct kib_conn, ibc_list); + if (!conn) break; - conn = list_entry(kiblnd_data.kib_reconn_list.next, - struct kib_conn, ibc_list); list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; + dropped_lock = true; reconn += kiblnd_reconnect_peer(conn->ibc_peer); kiblnd_peer_decref(conn->ibc_peer); @@ -3561,9 +3773,9 @@ kiblnd_connd (void *arg) spin_lock_irqsave(lock, flags); } - if (!list_empty(&kiblnd_data.kib_connd_waits)) { - conn = list_entry(kiblnd_data.kib_connd_waits.next, - struct kib_conn, ibc_list); + conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits, + struct kib_conn, ibc_list); + if (conn) { list_del(&conn->ibc_list); spin_unlock_irqrestore(lock, flags); @@ -3572,29 +3784,30 @@ kiblnd_connd (void *arg) kiblnd_conn_decref(conn); spin_lock_irqsave(lock, flags); - if (dropped_lock == 0) + if (!dropped_lock) list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_waits); } - /* careful with the jiffy wrap... */ - timeout = (int)(deadline - jiffies); - if (timeout <= 0) { - const int n = 4; - const int p = 1; - int chunk = kiblnd_data.kib_peer_hash_size; + /* careful with the jiffy wrap... */ + timeout = (int)(deadline - jiffies); + if (timeout <= 0) { + const int n = 4; + const int p = 1; + int chunk = HASH_SIZE(kiblnd_data.kib_peers); unsigned int lnd_timeout; spin_unlock_irqrestore(lock, flags); - dropped_lock = 1; + dropped_lock = true; - /* Time to check for RDMA timeouts on a few more - * peers: I do checks every 'p' seconds on a - * proportion of the peer_ni table and I need to check - * every connection 'n' times within a timeout - * interval, to ensure I detect a timeout on any - * connection within (n+1)/n times the timeout - * interval. */ + /* Time to check for RDMA timeouts on a few more + * peers: I do checks every 'p' seconds on a + * proportion of the peer_ni table and I need to check + * every connection 'n' times within a timeout + * interval, to ensure I detect a timeout on any + * connection within (n+1)/n times the timeout + * interval. + */ lnd_timeout = kiblnd_timeout(); if (lnd_timeout > n * p) @@ -3605,7 +3818,7 @@ kiblnd_connd (void *arg) for (i = 0; i < chunk; i++) { kiblnd_check_conns(peer_index); peer_index = (peer_index + 1) % - kiblnd_data.kib_peer_hash_size; + HASH_SIZE(kiblnd_data.kib_peers); } deadline += cfs_time_seconds(p); @@ -3640,7 +3853,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg) switch (event->event) { case IB_EVENT_COMM_EST: CDEBUG(D_NET, "%s established\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_nid)); /* We received a packet but connection isn't established * probably handshake packet was lost, so free to * force make connection established */ @@ -3650,19 +3863,19 @@ kiblnd_qp_event(struct ib_event *event, void *arg) case IB_EVENT_PORT_ERR: case IB_EVENT_DEVICE_FATAL: CERROR("Fatal device error for NI %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid)); atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1); return; case IB_EVENT_PORT_ACTIVE: CERROR("Port reactivated for NI %s\n", - libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid)); + libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid)); atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0); return; default: CERROR("%s: Async QP event type %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); + libcfs_nidstr(&conn->ibc_peer->ibp_nid), event->event); return; } } @@ -3724,6 +3937,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) (conn->ibc_nrx > 0 || conn->ibc_nsends_posted > 0)) { kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ + kiblnd_dump_conn_dbg(conn); conn->ibc_scheduled = 1; list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); @@ -3740,31 +3954,28 @@ kiblnd_cq_event(struct ib_event *event, void *arg) struct kib_conn *conn = arg; CERROR("%s: async CQ event type %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event); + libcfs_nidstr(&conn->ibc_peer->ibp_nid), event->event); } int kiblnd_scheduler(void *arg) { - long id = (long)arg; - struct kib_sched_info *sched; + long id = (long)arg; + struct kib_sched_info *sched; struct kib_conn *conn; - wait_queue_entry_t wait; - unsigned long flags; - struct ib_wc wc; - int did_something; - int rc; + wait_queue_entry_t wait; + unsigned long flags; + struct ib_wc wc; + bool did_something; + int rc; - init_waitqueue_entry(&wait, current); + init_wait(&wait); sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); if (rc != 0) { - CWARN("Unable to bind on CPU partition %d, please verify " - "whether all CPUs are healthy and reload modules if " - "necessary, otherwise your system might under risk of " - "low performance\n", sched->ibs_cpt); + CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt); } spin_lock_irqsave(&sched->ibs_lock, flags); @@ -3778,11 +3989,12 @@ kiblnd_scheduler(void *arg) spin_lock_irqsave(&sched->ibs_lock, flags); } - did_something = 0; + did_something = false; - if (!list_empty(&sched->ibs_conns)) { - conn = list_entry(sched->ibs_conns.next, - struct kib_conn, ibc_sched_list); + conn = list_first_entry_or_null(&sched->ibs_conns, + struct kib_conn, + ibc_sched_list); + if (conn) { /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); list_del(&conn->ibc_sched_list); @@ -3792,18 +4004,18 @@ kiblnd_scheduler(void *arg) wc.wr_id = IBLND_WID_INVAL; - rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - if (rc == 0) { - rc = ib_req_notify_cq(conn->ibc_cq, - IB_CQ_NEXT_COMP); - if (rc < 0) { - CWARN("%s: ib_req_notify_cq failed: %d, " - "closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); - kiblnd_close_conn(conn, -EIO); - kiblnd_conn_decref(conn); + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + if (rc == 0) { + rc = ib_req_notify_cq(conn->ibc_cq, + IB_CQ_NEXT_COMP); + if (rc < 0) { + CWARN("%s: ib_req_notify_cq failed: %d, closing connection %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + rc, conn); + kiblnd_close_conn(conn, -EIO); + kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, - flags); + flags); continue; } @@ -3818,16 +4030,15 @@ kiblnd_scheduler(void *arg) "please upgrade firmware and OFED or " "contact vendor.\n", rc, wc.opcode, wc.status, wc.vendor_err, - libcfs_nid2str(conn->ibc_peer->ibp_nid), + libcfs_nidstr(&conn->ibc_peer->ibp_nid), conn->ibc_state); rc = -EINVAL; } if (rc < 0) { - CWARN("%s: ib_poll_cq failed: %d, " - "closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc); + CWARN("%s: ib_poll_cq failed: %d, closing connection %p\n", + libcfs_nidstr(&conn->ibc_peer->ibp_nid), + rc, conn); kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); spin_lock_irqsave(&sched->ibs_lock, flags); @@ -3843,7 +4054,7 @@ kiblnd_scheduler(void *arg) /* +1 ref for sched_conns */ kiblnd_conn_addref(conn); list_add_tail(&conn->ibc_sched_list, - &sched->ibs_conns); + &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); } else { @@ -3855,14 +4066,14 @@ kiblnd_scheduler(void *arg) kiblnd_complete(&wc); spin_lock_irqsave(&sched->ibs_lock, flags); - } + } - kiblnd_conn_decref(conn); /* ...drop my ref from above */ - did_something = 1; - } + kiblnd_conn_decref(conn); /* ..drop my ref from above */ + did_something = true; + } - if (did_something) - continue; + if (did_something) + continue; set_current_state(TASK_INTERRUPTIBLE); add_wait_queue_exclusive(&sched->ibs_waitq, &wait); @@ -3884,58 +4095,58 @@ kiblnd_scheduler(void *arg) int kiblnd_failover_thread(void *arg) { - rwlock_t *glock = &kiblnd_data.kib_global_lock; + rwlock_t *glock = &kiblnd_data.kib_global_lock; struct kib_dev *dev; struct net *ns = arg; wait_queue_entry_t wait; - unsigned long flags; - int rc; + unsigned long flags; + int rc; LASSERT(*kiblnd_tunables.kib_dev_failover != 0); - init_waitqueue_entry(&wait, current); + init_wait(&wait); write_lock_irqsave(glock, flags); - while (!kiblnd_data.kib_shutdown) { - int do_failover = 0; - int long_sleep; + while (!kiblnd_data.kib_shutdown) { + bool do_failover = false; + int long_sleep; list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, - ibd_fail_list) { + ibd_fail_list) { if (ktime_get_seconds() < dev->ibd_next_failover) - continue; - do_failover = 1; - break; - } + continue; + do_failover = true; + break; + } - if (do_failover) { + if (do_failover) { list_del_init(&dev->ibd_fail_list); - dev->ibd_failover = 1; + dev->ibd_failover = 1; write_unlock_irqrestore(glock, flags); rc = kiblnd_dev_failover(dev, ns); write_lock_irqsave(glock, flags); - LASSERT (dev->ibd_failover); - dev->ibd_failover = 0; - if (rc >= 0) { /* Device is OK or failover succeed */ + LASSERT(dev->ibd_failover); + dev->ibd_failover = 0; + if (rc >= 0) { /* Device is OK or failover succeed */ dev->ibd_next_failover = ktime_get_seconds() + 3; - continue; - } + continue; + } - /* failed to failover, retry later */ + /* failed to failover, retry later */ dev->ibd_next_failover = ktime_get_seconds() + - min(dev->ibd_failed_failover, 10); - if (kiblnd_dev_can_failover(dev)) { + min(dev->ibd_failed_failover, 10); + if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - } + &kiblnd_data.kib_failed_devs); + } - continue; - } + continue; + } - /* long sleep if no more pending failover */ + /* long sleep if no more pending failover */ long_sleep = list_empty(&kiblnd_data.kib_failed_devs); set_current_state(TASK_INTERRUPTIBLE); @@ -3943,28 +4154,29 @@ kiblnd_failover_thread(void *arg) write_unlock_irqrestore(glock, flags); rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : - cfs_time_seconds(1)); + cfs_time_seconds(1)); set_current_state(TASK_RUNNING); remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); write_lock_irqsave(glock, flags); - if (!long_sleep || rc != 0) - continue; + if (!long_sleep || rc != 0) + continue; - /* have a long sleep, routine check all active devices, - * we need checking like this because if there is not active - * connection on the dev and no SEND from local, we may listen - * on wrong HCA for ever while there is a bonding failover */ + /* have a long sleep, routine check all active devices, + * we need checking like this because if there is not active + * connection on the dev and no SEND from local, we may listen + * on wrong HCA for ever while there is a bonding failover + */ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { - if (kiblnd_dev_can_failover(dev)) { + if (kiblnd_dev_can_failover(dev)) { list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - } - } - } + &kiblnd_data.kib_failed_devs); + } + } + } write_unlock_irqrestore(glock, flags); - kiblnd_thread_fini(); - return 0; + kiblnd_thread_fini(); + return 0; }