X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=1dbd5189183a7aaac30d17360dcdaa82d54bb7bc;hb=ac25785328d31f63bd76a03f9cbb76f7f31f2ab0;hp=606396dc78c0d2e7c21631c01709ebc4b6d98908;hpb=2f033b088745bb98eeeed33c58518fddd16397db;p=fs%2Flustre-release.git diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index 606396d..1dbd518 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -562,43 +562,29 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) } static int -kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) +kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob) { kib_hca_dev_t *hdev; - __u64 *pages = tx->tx_pages; kib_fmr_poolset_t *fps; - int npages; - int size; int cpt; int rc; - int i; LASSERT(tx->tx_pool != NULL); LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); - hdev = tx->tx_pool->tpo_hdev; - - for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { - for (size = 0; size < rd->rd_frags[i].rf_nob; - size += hdev->ibh_page_size) { - pages[npages ++] = (rd->rd_frags[i].rf_addr & - hdev->ibh_page_mask) + size; - } - } - + hdev = tx->tx_pool->tpo_hdev; cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; fps = net->ibn_fmr_ps[cpt]; - rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); - if (rc != 0) { - CERROR ("Can't map %d pages: %d\n", npages, rc); - return rc; - } + rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr); + if (rc != 0) { + CERROR("Can't map %u pages: %d\n", nob, rc); + return rc; + } /* If rd is not tx_rd, it's going to get sent to a peer, who will need * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : - tx->fmr.fmr_pfmr->fmr->lkey; + rd->rd_key = tx->fmr.fmr_key; rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; rd->rd_frags[0].rf_nob = nob; rd->rd_nfrags = 1; @@ -613,10 +599,8 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) LASSERT(net != NULL); - if (net->ibn_fmr_ps != NULL && tx->fmr.fmr_pfmr != NULL) { + if (net->ibn_fmr_ps != NULL) kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); - tx->fmr.fmr_pfmr = NULL; - } if (tx->tx_nfrags != 0) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, @@ -628,11 +612,11 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; kib_net_t *net = ni->ni_data; + kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev; struct ib_mr *mr = NULL; - __u32 nob; - int i; + __u32 nob; + int i; /* If rd is not tx_rd, it's going to get sent to a peer and I'm the * RDMA sink */ @@ -650,7 +634,7 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) nob += rd->rd_frags[i].rf_nob; } - mr = kiblnd_find_rd_dma_mr(hdev, rd, + mr = kiblnd_find_rd_dma_mr(ni, rd, (tx->tx_conn != NULL) ? tx->tx_conn->ibc_max_frags : -1); if (mr != NULL) { @@ -765,10 +749,10 @@ __must_hold(&conn->ibc_lock) { kib_msg_t *msg = tx->tx_msg; kib_peer_t *peer = conn->ibc_peer; + struct lnet_ni *ni = peer->ibp_ni; int ver = conn->ibc_version; int rc; int done; - struct ib_send_wr *bad_wrq; LASSERT(tx->tx_queued); /* We rely on this for QP sizing */ @@ -781,7 +765,8 @@ __must_hold(&conn->ibc_lock) LASSERT(conn->ibc_credits >= 0); LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); - if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { + if (conn->ibc_nsends_posted == + kiblnd_concurrent_sends(ver, ni)) { /* tx completions outstanding... */ CDEBUG(D_NET, "%s: posted enough\n", libcfs_nid2str(peer->ibp_nid)); @@ -848,9 +833,28 @@ __must_hold(&conn->ibc_lock) /* close_conn will launch failover */ rc = -ENETDOWN; } else { - rc = ib_post_send(conn->ibc_cmid->qp, - tx->tx_wrq, &bad_wrq); - } + struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd; + struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr; + struct ib_send_wr *wr = &tx->tx_wrq[0].wr; + + if (frd != NULL) { + if (!frd->frd_valid) { + wr = &frd->frd_inv_wr.wr; + wr->next = &frd->frd_fastreg_wr.wr; + } else { + wr = &frd->frd_fastreg_wr.wr; + } + frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr; + } + + LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX), + "bad wr_id "LPX64", opc %d, flags %d, peer: %s\n", + bad->wr_id, bad->opcode, bad->send_flags, + libcfs_nid2str(conn->ibc_peer->ibp_nid)); + + bad = NULL; + rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad); + } conn->ibc_last_send = jiffies; @@ -908,7 +912,8 @@ kiblnd_check_sends (kib_conn_t *conn) spin_lock(&conn->ibc_lock); - LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); + LASSERT(conn->ibc_nsends_posted <= + kiblnd_concurrent_sends(ver, ni)); LASSERT (!IBLND_OOB_CAPABLE(ver) || conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver)); LASSERT (conn->ibc_reserved_credits >= 0); @@ -1018,11 +1023,11 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) static void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; - struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; - int nob = offsetof (kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr = hdev->ibh_mrs; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; + struct ib_rdma_wr *wrq; + int nob = offsetof(kib_msg_t, ibm_u) + body_nob; + struct ib_mr *mr = hdev->ibh_mrs; LASSERT(tx->tx_nwrq >= 0); LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); @@ -1035,16 +1040,17 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) sge->addr = tx->tx_msgaddr; sge->length = nob; - memset(wrq, 0, sizeof(*wrq)); + wrq = &tx->tx_wrq[tx->tx_nwrq]; + memset(wrq, 0, sizeof(*wrq)); - wrq->next = NULL; - wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); - wrq->sg_list = sge; - wrq->num_sge = 1; - wrq->opcode = IB_WR_SEND; - wrq->send_flags = IB_SEND_SIGNALED; + wrq->wr.next = NULL; + wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); + wrq->wr.sg_list = sge; + wrq->wr.num_sge = 1; + wrq->wr.opcode = IB_WR_SEND; + wrq->wr.send_flags = IB_SEND_SIGNALED; - tx->tx_nwrq++; + tx->tx_nwrq++; } static int @@ -1054,7 +1060,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, kib_msg_t *ibmsg = tx->tx_msg; kib_rdma_desc_t *srcrd = tx->tx_rd; struct ib_sge *sge = &tx->tx_sge[0]; - struct ib_send_wr *wrq = &tx->tx_wrq[0]; + struct ib_rdma_wr *wrq; int rc = resid; int srcidx; int dstidx; @@ -1101,15 +1107,20 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, wrq = &tx->tx_wrq[tx->tx_nwrq]; - wrq->next = wrq + 1; - wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); - wrq->sg_list = sge; - wrq->num_sge = 1; - wrq->opcode = IB_WR_RDMA_WRITE; - wrq->send_flags = 0; - - wrq->wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); - wrq->wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); + wrq->wr.next = &(wrq + 1)->wr; + wrq->wr.wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); + wrq->wr.sg_list = sge; + wrq->wr.num_sge = 1; + wrq->wr.opcode = IB_WR_RDMA_WRITE; + wrq->wr.send_flags = 0; + +#ifdef HAVE_IB_RDMA_WR + wrq->remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); + wrq->rkey = kiblnd_rd_frag_key(dstrd, dstidx); +#else + wrq->wr.wr.rdma.remote_addr = kiblnd_rd_frag_addr(dstrd, dstidx); + wrq->wr.wr.rdma.rkey = kiblnd_rd_frag_key(dstrd, dstidx); +#endif srcidx = kiblnd_rd_consume_frag(srcrd, srcidx, wrknob); dstidx = kiblnd_rd_consume_frag(dstrd, dstidx, wrknob); @@ -1234,7 +1245,7 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, return rc; } -void +static void kiblnd_connect_peer (kib_peer_t *peer) { struct rdma_cm_id *cmid; @@ -1246,6 +1257,7 @@ kiblnd_connect_peer (kib_peer_t *peer) LASSERT (net != NULL); LASSERT (peer->ibp_connecting > 0); + LASSERT(!peer->ibp_reconnecting); cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, IB_QPT_RC); @@ -1290,13 +1302,65 @@ kiblnd_connect_peer (kib_peer_t *peer) libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, &dev->ibd_ifip, cmid->device->name); - return; + return; failed2: - kiblnd_peer_decref(peer); /* cmid's ref */ - rdma_destroy_id(cmid); + kiblnd_peer_connect_failed(peer, 1, rc); + kiblnd_peer_decref(peer); /* cmid's ref */ + rdma_destroy_id(cmid); + return; failed: - kiblnd_peer_connect_failed(peer, 1, rc); + kiblnd_peer_connect_failed(peer, 1, rc); +} + +bool +kiblnd_reconnect_peer(kib_peer_t *peer) +{ + rwlock_t *glock = &kiblnd_data.kib_global_lock; + char *reason = NULL; + struct list_head txs; + unsigned long flags; + + INIT_LIST_HEAD(&txs); + + write_lock_irqsave(glock, flags); + if (peer->ibp_reconnecting == 0) { + if (peer->ibp_accepting) + reason = "accepting"; + else if (peer->ibp_connecting) + reason = "connecting"; + else if (!list_empty(&peer->ibp_conns)) + reason = "connected"; + else /* connected then closed */ + reason = "closed"; + + goto no_reconnect; + } + + LASSERT(!peer->ibp_accepting && !peer->ibp_connecting && + list_empty(&peer->ibp_conns)); + peer->ibp_reconnecting = 0; + + if (!kiblnd_peer_active(peer)) { + list_splice_init(&peer->ibp_tx_queue, &txs); + reason = "unlinked"; + goto no_reconnect; + } + + peer->ibp_connecting++; + peer->ibp_reconnected++; + write_unlock_irqrestore(glock, flags); + + kiblnd_connect_peer(peer); + return true; + + no_reconnect: + write_unlock_irqrestore(glock, flags); + + CWARN("Abort reconnection of %s: %s\n", + libcfs_nid2str(peer->ibp_nid), reason); + kiblnd_txlist_done(peer->ibp_ni, &txs, -ECONNABORTED); + return false; } void @@ -1341,8 +1405,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (peer != NULL) { if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0); + LASSERT(kiblnd_peer_connecting(peer)); if (tx != NULL) list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); @@ -1380,8 +1443,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (peer2 != NULL) { if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ - LASSERT (peer2->ibp_connecting != 0 || - peer2->ibp_accepting != 0); + LASSERT(kiblnd_peer_connecting(peer2)); if (tx != NULL) list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue); @@ -1802,10 +1864,7 @@ kiblnd_peer_notify (kib_peer_t *peer) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (list_empty(&peer->ibp_conns) && - peer->ibp_accepting == 0 && - peer->ibp_connecting == 0 && - peer->ibp_error != 0) { + if (kiblnd_peer_idle(peer) && peer->ibp_error != 0) { error = peer->ibp_error; peer->ibp_error = 0; @@ -2004,14 +2063,14 @@ kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) peer->ibp_accepting--; } - if (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0) { + if (kiblnd_peer_connecting(peer)) { /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); return; } + peer->ibp_reconnected = 0; if (list_empty(&peer->ibp_conns)) { /* Take peer's blocked transmits to complete with error */ list_add(&zombies, &peer->ibp_tx_queue); @@ -2081,6 +2140,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) * peer instance... */ kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ list_add(&conn->ibc_list, &peer->ibp_conns); + peer->ibp_reconnected = 0; if (active) peer->ibp_connecting--; else @@ -2263,12 +2323,12 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } if (reqmsg->ibm_u.connparams.ibcp_queue_depth > - IBLND_MSG_QUEUE_SIZE(version)) { + kiblnd_msg_queue_size(version, ni)) { CERROR("Can't accept conn from %s, queue depth too large: " " %d (<=%d wanted)\n", libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(version)); + kiblnd_msg_queue_size(version, ni)); if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; @@ -2277,27 +2337,28 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) } if (reqmsg->ibm_u.connparams.ibcp_max_frags > - IBLND_RDMA_FRAGS(version)) { + kiblnd_rdma_frags(version, ni)) { CWARN("Can't accept conn from %s (version %x): " "max_frags %d too large (%d wanted)\n", - libcfs_nid2str(nid), version, - reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + kiblnd_rdma_frags(version, ni)); if (version >= IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < - IBLND_RDMA_FRAGS(version) && net->ibn_fmr_ps == NULL) { + kiblnd_rdma_frags(version, ni) && + net->ibn_fmr_ps == NULL) { CWARN("Can't accept conn from %s (version %x): " "max_frags %d incompatible without FMR pool " "(%d wanted)\n", libcfs_nid2str(nid), version, reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); + kiblnd_rdma_frags(version, ni)); - if (version >= IBLND_MSG_VERSION) + if (version == IBLND_MSG_VERSION) rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; goto failed; @@ -2335,11 +2396,17 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) /* not the guy I've talked with */ if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { - kiblnd_close_peer_conns_locked(peer2, -ESTALE); + kiblnd_close_peer_conns_locked(peer2, -ESTALE); + + if (kiblnd_peer_active(peer2)) { + peer2->ibp_incarnation = reqmsg->ibm_srcstamp; + peer2->ibp_version = version; + } write_unlock_irqrestore(g_lock, flags); - CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", - libcfs_nid2str(nid), peer2->ibp_version, version); + CWARN("Conn stale %s version %x/%x incarnation "LPU64"/"LPU64"\n", + libcfs_nid2str(nid), peer2->ibp_version, version, + peer2->ibp_incarnation, reqmsg->ibm_srcstamp); kiblnd_peer_decref(peer); rej.ibr_why = IBLND_REJECT_CONN_STALE; @@ -2357,7 +2424,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) rej.ibr_why = IBLND_REJECT_CONN_RACE; goto failed; } - + /* + * passive connection is allowed even this peer is waiting for + * reconnection. + */ + peer2->ibp_reconnecting = 0; peer2->ibp_accepting++; kiblnd_peer_addref(peer2); @@ -2445,90 +2516,89 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) if (ni != NULL) lnet_ni_decref(ni); - rej.ibr_version = version; - rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version); - kiblnd_reject(cmid, &rej); + rej.ibr_version = version; + rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni); + rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni); + kiblnd_reject(cmid, &rej); - return -ECONNREFUSED; + return -ECONNREFUSED; } static void -kiblnd_reconnect (kib_conn_t *conn, int version, - __u64 incarnation, int why, kib_connparams_t *cp) +kiblnd_check_reconnect(kib_conn_t *conn, int version, + __u64 incarnation, int why, kib_connparams_t *cp) { + rwlock_t *glock = &kiblnd_data.kib_global_lock; kib_peer_t *peer = conn->ibc_peer; char *reason; - int retry_now = 0; + int msg_size = IBLND_MSG_SIZE; + int frag_num = -1; + int queue_dep = -1; + bool reconnect; unsigned long flags; - LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); - LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); + LASSERT(peer->ibp_connecting > 0); /* 'conn' at least */ + LASSERT(!peer->ibp_reconnecting); - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (cp) { + msg_size = cp->ibcp_max_msg_size; + frag_num = cp->ibcp_max_frags; + queue_dep = cp->ibcp_queue_depth; + } + write_lock_irqsave(glock, flags); /* retry connection if it's still needed and no other connection * attempts (active or passive) are in progress * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be * initiated by kiblnd_query() */ - if ((!list_empty(&peer->ibp_tx_queue) || - peer->ibp_version != version) && - peer->ibp_connecting == 1 && - peer->ibp_accepting == 0) { - if (why == IBLND_REJECT_CONN_RACE) { - /* don't reconnect immediately, intensive reconnecting - * may consume a lot of memory. kiblnd_destroy_conn - * will reconnect after releasing all resources of - * this connection */ - conn->ibc_conn_race = 1; - } else { - retry_now = 1; - } - peer->ibp_connecting++; - peer->ibp_version = version; - peer->ibp_incarnation = incarnation; - } - - write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - - if (!retry_now) - return; + reconnect = (!list_empty(&peer->ibp_tx_queue) || + peer->ibp_version != version) && + peer->ibp_connecting == 1 && + peer->ibp_accepting == 0; + if (!reconnect) { + reason = "no need"; + goto out; + } switch (why) { default: reason = "Unknown"; break; - case IBLND_REJECT_RDMA_FRAGS: - if (!cp) - goto failed; - - if (conn->ibc_max_frags <= cp->ibcp_max_frags) { - CNETERR("Unsupported max frags, peer supports %d\n", - cp->ibcp_max_frags); - goto failed; - } else if (*kiblnd_tunables.kib_map_on_demand == 0) { - CNETERR("map_on_demand must be enabled to support " - "map_on_demand peers\n"); - goto failed; + case IBLND_REJECT_RDMA_FRAGS: { + struct lnet_ioctl_config_lnd_tunables *tunables; + + if (!cp) { + reason = "can't negotiate max frags"; + goto out; + } + tunables = peer->ibp_ni->ni_lnd_tunables; + if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) { + reason = "map_on_demand must be enabled"; + goto out; + } + if (conn->ibc_max_frags <= frag_num) { + reason = "unsupported max frags"; + goto out; } - peer->ibp_max_frags = cp->ibcp_max_frags; + peer->ibp_max_frags = frag_num; reason = "rdma fragments"; break; - + } case IBLND_REJECT_MSG_QUEUE_SIZE: - if (!cp) - goto failed; - - if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) { - CNETERR("Unsupported queue depth, peer supports %d\n", - cp->ibcp_queue_depth); - goto failed; + if (!cp) { + reason = "can't negotiate queue depth"; + goto out; + } + if (conn->ibc_queue_depth <= queue_dep) { + reason = "unsupported queue depth"; + goto out; } - peer->ibp_queue_depth = cp->ibcp_queue_depth; + peer->ibp_queue_depth = queue_dep; reason = "queue depth"; break; @@ -2536,27 +2606,33 @@ kiblnd_reconnect (kib_conn_t *conn, int version, reason = "stale"; break; + case IBLND_REJECT_CONN_RACE: + reason = "conn race"; + break; + case IBLND_REJECT_CONN_UNCOMPAT: reason = "version negotiation"; break; } - CNETERR("%s: retrying (%s), %x, %x, " - "queue_depth: %d, max_frags: %d, msg_size: %d\n", - libcfs_nid2str(peer->ibp_nid), - reason, IBLND_MSG_VERSION, version, - conn->ibc_queue_depth, conn->ibc_max_frags, - cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); - - kiblnd_connect_peer(peer); - return; - - failed: - write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - peer->ibp_connecting--; + conn->ibc_reconnect = 1; + peer->ibp_reconnecting = 1; + peer->ibp_version = version; + if (incarnation != 0) + peer->ibp_incarnation = incarnation; + out: write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - return; + CNETERR("%s: %s (%s), %x, %x, msg_size: %d, queue_depth: %d/%d, max_frags: %d/%d\n", + libcfs_nid2str(peer->ibp_nid), + reconnect ? "reconnect" : "don't reconnect", + reason, IBLND_MSG_VERSION, version, msg_size, + conn->ibc_queue_depth, queue_dep, + conn->ibc_max_frags, frag_num); + /* + * if conn::ibc_reconnect is TRUE, connd will reconnect to the peer + * while destroying the zombie + */ } static void @@ -2569,8 +2645,8 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) switch (reason) { case IB_CM_REJ_STALE_CONN: - kiblnd_reconnect(conn, IBLND_MSG_VERSION, 0, - IBLND_REJECT_CONN_STALE, NULL); + kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0, + IBLND_REJECT_CONN_STALE, NULL); break; case IB_CM_REJ_INVALID_SERVICE_ID: @@ -2652,8 +2728,8 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_CONN_UNCOMPAT: case IBLND_REJECT_MSG_QUEUE_SIZE: case IBLND_REJECT_RDMA_FRAGS: - kiblnd_reconnect(conn, rej->ibr_version, - incarnation, rej->ibr_why, cp); + kiblnd_check_reconnect(conn, rej->ibr_version, + incarnation, rej->ibr_why, cp); break; case IBLND_REJECT_NO_RESOURCES: @@ -3153,10 +3229,22 @@ kiblnd_disconnect_conn (kib_conn_t *conn) kiblnd_peer_notify(conn->ibc_peer); } +/* + * High-water for reconnection to the same peer, reconnection attempt should + * be delayed after trying more than KIB_RECONN_HIGH_RACE. + */ +#define KIB_RECONN_HIGH_RACE 10 +/* + * Allow connd to take a break and handle other things after consecutive + * reconnection attemps. + */ +#define KIB_RECONN_BREAK 100 + int kiblnd_connd (void *arg) { - wait_queue_t wait; + spinlock_t *lock= &kiblnd_data.kib_connd_lock; + wait_queue_t wait; unsigned long flags; kib_conn_t *conn; int timeout; @@ -3170,25 +3258,40 @@ kiblnd_connd (void *arg) init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); - while (!kiblnd_data.kib_shutdown) { + while (!kiblnd_data.kib_shutdown) { + int reconn = 0; dropped_lock = 0; if (!list_empty(&kiblnd_data.kib_connd_zombies)) { - conn = list_entry(kiblnd_data. \ - kib_connd_zombies.next, - kib_conn_t, ibc_list); + kib_peer_t *peer = NULL; + + conn = list_entry(kiblnd_data.kib_connd_zombies.next, + kib_conn_t, ibc_list); list_del(&conn->ibc_list); + if (conn->ibc_reconnect) { + peer = conn->ibc_peer; + kiblnd_peer_addref(peer); + } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn, !peer); + + spin_lock_irqsave(lock, flags); + if (!peer) + continue; - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + conn->ibc_peer = peer; + if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE) + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_list); + else + list_add_tail(&conn->ibc_list, + &kiblnd_data.kib_reconn_wait); } if (!list_empty(&kiblnd_data.kib_connd_conns)) { @@ -3196,16 +3299,39 @@ kiblnd_connd (void *arg) kib_conn_t, ibc_list); list_del(&conn->ibc_list); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, - flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; kiblnd_disconnect_conn(conn); kiblnd_conn_decref(conn); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); } + while (reconn < KIB_RECONN_BREAK) { + if (kiblnd_data.kib_reconn_sec != get_seconds()) { + kiblnd_data.kib_reconn_sec = get_seconds(); + list_splice_init(&kiblnd_data.kib_reconn_wait, + &kiblnd_data.kib_reconn_list); + } + + if (list_empty(&kiblnd_data.kib_reconn_list)) + break; + + conn = list_entry(kiblnd_data.kib_reconn_list.next, + kib_conn_t, ibc_list); + list_del(&conn->ibc_list); + + spin_unlock_irqrestore(lock, flags); + dropped_lock = 1; + + reconn += kiblnd_reconnect_peer(conn->ibc_peer); + kiblnd_peer_decref(conn->ibc_peer); + LIBCFS_FREE(conn, sizeof(*conn)); + + spin_lock_irqsave(lock, flags); + } + /* careful with the jiffy wrap... */ timeout = (int)(deadline - jiffies); if (timeout <= 0) { @@ -3213,7 +3339,7 @@ kiblnd_connd (void *arg) const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); dropped_lock = 1; /* Time to check for RDMA timeouts on a few more @@ -3237,7 +3363,7 @@ kiblnd_connd (void *arg) } deadline += msecs_to_jiffies(p * MSEC_PER_SEC); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); } if (dropped_lock) @@ -3246,16 +3372,16 @@ kiblnd_connd (void *arg) /* Nothing to do for 'timeout' */ set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); schedule_timeout(timeout); set_current_state(TASK_RUNNING); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); - spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(lock, flags); } - spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(lock, flags); kiblnd_thread_fini(); return 0; @@ -3282,9 +3408,15 @@ kiblnd_qp_event(struct ib_event *event, void *arg) static void kiblnd_complete (struct ib_wc *wc) { - switch (kiblnd_wreqid2type(wc->wr_id)) { - default: - LBUG(); + switch (kiblnd_wreqid2type(wc->wr_id)) { + default: + LBUG(); + + case IBLND_WID_MR: + if (wc->status != IB_WC_SUCCESS && + wc->status != IB_WC_WR_FLUSH_ERR) + CNETERR("FastReg failed: %d\n", wc->status); + return; case IBLND_WID_RDMA: /* We only get RDMA completion notification if it fails. All @@ -3400,6 +3532,8 @@ kiblnd_scheduler(void *arg) spin_unlock_irqrestore(&sched->ibs_lock, flags); + wc.wr_id = IBLND_WID_INVAL; + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); if (rc == 0) { rc = ib_req_notify_cq(conn->ibc_cq, @@ -3418,6 +3552,19 @@ kiblnd_scheduler(void *arg) rc = ib_poll_cq(conn->ibc_cq, 1, &wc); } + if (unlikely(rc > 0 && wc.wr_id == IBLND_WID_INVAL)) { + LCONSOLE_ERROR( + "ib_poll_cq (rc: %d) returned invalid " + "wr_id, opcode %d, status: %d, " + "vendor_err: %d, conn: %s status: %d\n" + "please upgrade firmware and OFED or " + "contact vendor.\n", rc, + wc.opcode, wc.status, wc.vendor_err, + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_state); + rc = -EINVAL; + } + if (rc < 0) { CWARN("%s: ib_poll_cq failed: %d, " "closing connection\n",