X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=8196b1bbf2cf7cd0de700fc8cab76cf8d6688ce7;hp=e4f2d331fffbabb84497cb834e73dc034207bffc;hb=88f761bc00c7fb29db4f80594ae864493bdd5071;hpb=580c1e0017296ea7a25f6f5f8aa8022f713ae762 diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index e4f2d33..8196b1b 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2015, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -40,7 +40,18 @@ #include "o2iblnd.h" -void +static void kiblnd_peer_alive(kib_peer_t *peer); +static void kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error); +static void kiblnd_check_sends(kib_conn_t *conn); +static void kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, + int type, int body_nob); +static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie); +static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn); +static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx); + +static void kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) { lnet_msg_t *lntmsg[2]; @@ -84,26 +95,26 @@ kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) } void -kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status) +kiblnd_txlist_done(lnet_ni_t *ni, struct list_head *txlist, int status) { - kib_tx_t *tx; + kib_tx_t *tx; - while (!cfs_list_empty (txlist)) { - tx = cfs_list_entry (txlist->next, kib_tx_t, tx_list); + while (!list_empty(txlist)) { + tx = list_entry(txlist->next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); - /* complete now */ - tx->tx_waiting = 0; - tx->tx_status = status; - kiblnd_tx_done(ni, tx); - } + list_del(&tx->tx_list); + /* complete now */ + tx->tx_waiting = 0; + tx->tx_status = status; + kiblnd_tx_done(ni, tx); + } } -kib_tx_t * +static kib_tx_t * kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { kib_net_t *net = (kib_net_t *)ni->ni_data; - cfs_list_t *node; + struct list_head *node; kib_tx_t *tx; kib_tx_poolset_t *tps; @@ -121,13 +132,12 @@ kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) LASSERT (tx->tx_conn == NULL); LASSERT (tx->tx_lntmsg[0] == NULL); LASSERT (tx->tx_lntmsg[1] == NULL); - LASSERT (tx->tx_u.pmr == NULL); LASSERT (tx->tx_nfrags == 0); return tx; } -void +static void kiblnd_drop_rx(kib_rx_t *rx) { kib_conn_t *conn = rx->rx_conn; @@ -148,7 +158,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) kib_conn_t *conn = rx->rx_conn; kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data; struct ib_recv_wr *bad_wrq = NULL; - struct ib_mr *mr; + struct ib_mr *mr = conn->ibc_hdev->ibh_mrs; int rc; LASSERT (net != NULL); @@ -156,9 +166,7 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) LASSERT (credit == IBLND_POSTRX_NO_CREDIT || credit == IBLND_POSTRX_PEER_CREDIT || credit == IBLND_POSTRX_RSRVD_CREDIT); - - mr = kiblnd_find_dma_mr(conn->ibc_hdev, rx->rx_msgaddr, IBLND_MSG_SIZE); - LASSERT (mr != NULL); + LASSERT(mr != NULL); rx->rx_sge.lkey = mr->lkey; rx->rx_sge.addr = rx->rx_msgaddr; @@ -179,24 +187,28 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) rx->rx_nob = -1; /* flag posted */ - rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); - if (rc != 0) { - CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); - rx->rx_nob = 0; - } + /* NB: need an extra reference after ib_post_recv because we don't + * own this rx (and rx::rx_conn) anymore, LU-5678. + */ + kiblnd_conn_addref(conn); + rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq); + if (unlikely(rc != 0)) { + CERROR("Can't post rx for %s: %d, bad_wrq: %p\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq); + rx->rx_nob = 0; + } - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ - return rc; + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) /* Initial post */ + goto out; - if (rc != 0) { - kiblnd_close_conn(conn, rc); - kiblnd_drop_rx(rx); /* No more posts for this rx */ - return rc; - } + if (unlikely(rc != 0)) { + kiblnd_close_conn(conn, rc); + kiblnd_drop_rx(rx); /* No more posts for this rx */ + goto out; + } - if (credit == IBLND_POSTRX_NO_CREDIT) - return 0; + if (credit == IBLND_POSTRX_NO_CREDIT) + goto out; spin_lock(&conn->ibc_lock); if (credit == IBLND_POSTRX_PEER_CREDIT) @@ -206,35 +218,37 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); - return 0; +out: + kiblnd_conn_decref(conn); + return rc; } -kib_tx_t * +static kib_tx_t * kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) { - cfs_list_t *tmp; + struct list_head *tmp; - cfs_list_for_each(tmp, &conn->ibc_active_txs) { - kib_tx_t *tx = cfs_list_entry(tmp, kib_tx_t, tx_list); + list_for_each(tmp, &conn->ibc_active_txs) { + kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_sending != 0 || tx->tx_waiting); + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_sending != 0 || tx->tx_waiting); - if (tx->tx_cookie != cookie) - continue; + if (tx->tx_cookie != cookie) + continue; - if (tx->tx_waiting && - tx->tx_msg->ibm_type == txtype) - return tx; + if (tx->tx_waiting && + tx->tx_msg->ibm_type == txtype) + return tx; - CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", - tx->tx_waiting ? "" : "NOT ", - tx->tx_msg->ibm_type, txtype); - } - return NULL; + CWARN("Bad completion: %swaiting, type %x (wanted %x)\n", + tx->tx_waiting ? "" : "NOT ", + tx->tx_msg->ibm_type, txtype); + } + return NULL; } -void +static void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { kib_tx_t *tx; @@ -265,7 +279,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) idle = !tx->tx_queued && (tx->tx_sending == 0); if (idle) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); @@ -273,7 +287,7 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) kiblnd_tx_done(ni, tx); } -void +static void kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { lnet_ni_t *ni = conn->ibc_peer->ibp_ni; @@ -292,7 +306,7 @@ kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) kiblnd_queue_tx(tx, conn); } -void +static void kiblnd_handle_rx (kib_rx_t *rx) { kib_msg_t *msg = rx->rx_msg; @@ -315,19 +329,19 @@ kiblnd_handle_rx (kib_rx_t *rx) spin_lock(&conn->ibc_lock); if (conn->ibc_credits + credits > - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { + conn->ibc_queue_depth) { rc2 = conn->ibc_credits; spin_unlock(&conn->ibc_lock); - CERROR("Bad credits from %s: %d + %d > %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc2, credits, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); + CERROR("Bad credits from %s: %d + %d > %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc2, credits, + conn->ibc_queue_depth); - kiblnd_close_conn(conn, -EPROTO); - kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); - return; - } + kiblnd_close_conn(conn, -EPROTO); + kiblnd_post_rx(rx, IBLND_POSTRX_NO_CREDIT); + return; + } conn->ibc_credits += credits; @@ -392,7 +406,7 @@ kiblnd_handle_rx (kib_rx_t *rx) tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, msg->ibm_u.putack.ibpam_src_cookie); if (tx != NULL) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); if (tx == NULL) { @@ -453,7 +467,7 @@ kiblnd_handle_rx (kib_rx_t *rx) kiblnd_post_rx(rx, post_credit); } -void +static void kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) { kib_msg_t *msg = rx->rx_msg; @@ -508,7 +522,7 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) write_lock_irqsave(g_lock, flags); /* must check holding global lock to eliminate race */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); + list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); write_unlock_irqrestore(g_lock, flags); return; } @@ -524,13 +538,12 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) kiblnd_drop_rx(rx); /* Don't re-post rx. */ } -struct page * +static struct page * kiblnd_kvaddr_to_page (unsigned long vaddr) { struct page *page; - if (vaddr >= VMALLOC_START && - vaddr < VMALLOC_END) { + if (is_vmalloc_addr((void *)vaddr)) { page = vmalloc_to_page ((void *)vaddr); LASSERT (page != NULL); return page; @@ -576,73 +589,33 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; fps = net->ibn_fmr_ps[cpt]; - rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); + rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr); if (rc != 0) { CERROR ("Can't map %d pages: %d\n", npages, rc); return rc; } - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : - tx->tx_u.fmr.fmr_pfmr->fmr->lkey; - rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; - rd->rd_frags[0].rf_nob = nob; - rd->rd_nfrags = 1; - - return 0; -} - -static int -kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) -{ - kib_hca_dev_t *hdev; - kib_pmr_poolset_t *pps; - __u64 iova; - int cpt; - int rc; - - LASSERT(tx->tx_pool != NULL); - LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); - - hdev = tx->tx_pool->tpo_hdev; - - iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; - - cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; - - pps = net->ibn_pmr_ps[cpt]; - rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); - if (rc != 0) { - CERROR("Failed to create MR by phybuf: %d\n", rc); - return rc; - } - - /* If rd is not tx_rd, it's going to get sent to a peer, who will need - * the rkey */ - rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey : - tx->tx_u.pmr->pmr_mr->lkey; - rd->rd_nfrags = 1; - rd->rd_frags[0].rf_addr = iova; - rd->rd_frags[0].rf_nob = nob; + /* If rd is not tx_rd, it's going to get sent to a peer, who will need + * the rkey */ + rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey : + tx->fmr.fmr_pfmr->fmr->lkey; + rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask; + rd->rd_frags[0].rf_nob = nob; + rd->rd_nfrags = 1; - return 0; + return 0; } -void +static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { kib_net_t *net = ni->ni_data; LASSERT(net != NULL); - if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { - kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); - tx->tx_u.fmr.fmr_pfmr = NULL; - - } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) { - kiblnd_pmr_pool_unmap(tx->tx_u.pmr); - tx->tx_u.pmr = NULL; + if (net->ibn_fmr_ps != NULL && tx->fmr.fmr_pfmr != NULL) { + kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); + tx->fmr.fmr_pfmr = NULL; } if (tx->tx_nfrags != 0) { @@ -652,24 +625,22 @@ kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) } } -int -kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, - kib_rdma_desc_t *rd, int nfrags) +static int +kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - kib_net_t *net = ni->ni_data; - struct ib_mr *mr = NULL; - __u32 nob; - int i; + kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; + kib_net_t *net = ni->ni_data; + struct ib_mr *mr = NULL; + __u32 nob; + int i; /* If rd is not tx_rd, it's going to get sent to a peer and I'm the * RDMA sink */ tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; - tx->tx_nfrags = nfrags; + tx->tx_nfrags = nfrags; - rd->rd_nfrags = - kiblnd_dma_map_sg(hdev->ibh_ibdev, - tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); + rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags, + tx->tx_nfrags, tx->tx_dmadir); for (i = 0, nob = 0; i < rd->rd_nfrags; i++) { rd->rd_frags[i].rf_nob = kiblnd_sg_dma_len( @@ -679,26 +650,25 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, nob += rd->rd_frags[i].rf_nob; } - /* looking for pre-mapping MR */ - mr = kiblnd_find_rd_dma_mr(hdev, rd); - if (mr != NULL) { - /* found pre-mapping MR */ - rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; - return 0; - } + mr = kiblnd_find_rd_dma_mr(hdev, rd, + (tx->tx_conn != NULL) ? + tx->tx_conn->ibc_max_frags : -1); + if (mr != NULL) { + /* found pre-mapping MR */ + rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; + return 0; + } if (net->ibn_fmr_ps != NULL) return kiblnd_fmr_map_tx(net, tx, rd, nob); - else if (net->ibn_pmr_ps != NULL) - return kiblnd_pmr_map_tx(net, tx, rd, nob); return -EINVAL; } -int +static int kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, - unsigned int niov, struct iovec *iov, int offset, int nob) + unsigned int niov, struct kvec *iov, int offset, int nob) { kib_net_t *net = ni->ni_data; struct page *page; @@ -749,7 +719,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } -int +static int kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) { @@ -789,8 +759,9 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); } -int +static int kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) +__must_hold(&conn->ibc_lock) { kib_msg_t *msg = tx->tx_msg; kib_peer_t *peer = conn->ibc_peer; @@ -799,16 +770,16 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) int done; struct ib_send_wr *bad_wrq; - LASSERT (tx->tx_queued); - /* We rely on this for QP sizing */ - LASSERT (tx->tx_nwrq > 0); - LASSERT (tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); + LASSERT(tx->tx_queued); + /* We rely on this for QP sizing */ + LASSERT(tx->tx_nwrq > 0); + LASSERT(tx->tx_nwrq <= 1 + conn->ibc_max_frags); - LASSERT (credit == 0 || credit == 1); - LASSERT (conn->ibc_outstanding_credits >= 0); - LASSERT (conn->ibc_outstanding_credits <= IBLND_MSG_QUEUE_SIZE(ver)); - LASSERT (conn->ibc_credits >= 0); - LASSERT (conn->ibc_credits <= IBLND_MSG_QUEUE_SIZE(ver)); + LASSERT(credit == 0 || credit == 1); + LASSERT(conn->ibc_outstanding_credits >= 0); + LASSERT(conn->ibc_outstanding_credits <= conn->ibc_queue_depth); + LASSERT(conn->ibc_credits >= 0); + LASSERT(conn->ibc_credits <= conn->ibc_queue_depth); if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) { /* tx completions outstanding... */ @@ -832,7 +803,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) } /* NB don't drop ibc_lock before bumping tx_sending */ - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); tx->tx_queued = 0; if (msg->ibm_type == IBLND_MSG_NOOP && @@ -867,7 +838,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) * tx_sending is non-zero if we've not done the tx_complete() * from the first send; hence the ++ rather than = below. */ tx->tx_sending++; - cfs_list_add(&tx->tx_list, &conn->ibc_active_txs); + list_add(&tx->tx_list, &conn->ibc_active_txs); /* I'm still holding ibc_lock! */ if (conn->ibc_state != IBLND_CONN_ESTABLISHED) { @@ -900,7 +871,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) done = (tx->tx_sending == 0); if (done) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); spin_unlock(&conn->ibc_lock); @@ -921,7 +892,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) return -EIO; } -void +static void kiblnd_check_sends (kib_conn_t *conn) { int ver = conn->ibc_version; @@ -943,11 +914,11 @@ kiblnd_check_sends (kib_conn_t *conn) LASSERT (conn->ibc_reserved_credits >= 0); while (conn->ibc_reserved_credits > 0 && - !cfs_list_empty(&conn->ibc_tx_queue_rsrvd)) { - tx = cfs_list_entry(conn->ibc_tx_queue_rsrvd.next, + !list_empty(&conn->ibc_tx_queue_rsrvd)) { + tx = list_entry(conn->ibc_tx_queue_rsrvd.next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); - cfs_list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); + list_del(&tx->tx_list); + list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); conn->ibc_reserved_credits--; } @@ -968,18 +939,18 @@ kiblnd_check_sends (kib_conn_t *conn) for (;;) { int credit; - if (!cfs_list_empty(&conn->ibc_tx_queue_nocred)) { + if (!list_empty(&conn->ibc_tx_queue_nocred)) { credit = 0; - tx = cfs_list_entry(conn->ibc_tx_queue_nocred.next, + tx = list_entry(conn->ibc_tx_queue_nocred.next, kib_tx_t, tx_list); - } else if (!cfs_list_empty(&conn->ibc_tx_noops)) { + } else if (!list_empty(&conn->ibc_tx_noops)) { LASSERT (!IBLND_OOB_CAPABLE(ver)); credit = 1; - tx = cfs_list_entry(conn->ibc_tx_noops.next, + tx = list_entry(conn->ibc_tx_noops.next, kib_tx_t, tx_list); - } else if (!cfs_list_empty(&conn->ibc_tx_queue)) { + } else if (!list_empty(&conn->ibc_tx_queue)) { credit = 1; - tx = cfs_list_entry(conn->ibc_tx_queue.next, + tx = list_entry(conn->ibc_tx_queue.next, kib_tx_t, tx_list); } else break; @@ -993,7 +964,7 @@ kiblnd_check_sends (kib_conn_t *conn) kiblnd_conn_decref(conn); /* ...until here */ } -void +static void kiblnd_tx_complete (kib_tx_t *tx, int status) { int failed = (status != IB_WC_SUCCESS); @@ -1034,7 +1005,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) !tx->tx_waiting && /* Not waiting for peer */ !tx->tx_queued; /* Not re-queued (PUT_DONE) */ if (idle) - cfs_list_del(&tx->tx_list); + list_del(&tx->tx_list); kiblnd_conn_addref(conn); /* 1 ref for me.... */ @@ -1048,24 +1019,22 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) kiblnd_conn_decref(conn); /* ...until here */ } -void +static void kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) { kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; int nob = offsetof (kib_msg_t, ibm_u) + body_nob; - struct ib_mr *mr; + struct ib_mr *mr = hdev->ibh_mrs; - LASSERT (tx->tx_nwrq >= 0); - LASSERT (tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); - LASSERT (nob <= IBLND_MSG_SIZE); + LASSERT(tx->tx_nwrq >= 0); + LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); + LASSERT(nob <= IBLND_MSG_SIZE); + LASSERT(mr != NULL); kiblnd_init_msg(tx->tx_msg, type, body_nob); - mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); - LASSERT (mr != NULL); - sge->lkey = mr->lkey; sge->addr = tx->tx_msgaddr; sge->length = nob; @@ -1082,9 +1051,9 @@ kiblnd_init_tx_msg (lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) tx->tx_nwrq++; } -int -kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, - int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) +static int +kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, + int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) { kib_msg_t *ibmsg = tx->tx_msg; kib_rdma_desc_t *srcrd = tx->tx_rd; @@ -1115,16 +1084,16 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, break; } - if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { - CERROR("RDMA too fragmented for %s (%d): " - "%d/%d src %d/%d dst frags\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - IBLND_RDMA_FRAGS(conn->ibc_version), - srcidx, srcrd->rd_nfrags, - dstidx, dstrd->rd_nfrags); - rc = -EMSGSIZE; - break; - } + if (tx->tx_nwrq >= conn->ibc_max_frags) { + CERROR("RDMA has too many fragments for peer %s (%d), " + "src idx/frags: %d/%d dst idx/frags: %d/%d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + conn->ibc_max_frags, + srcidx, srcrd->rd_nfrags, + dstidx, dstrd->rd_nfrags); + rc = -EMSGSIZE; + break; + } wrknob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx), kiblnd_rd_frag_size(dstrd, dstidx)), resid); @@ -1167,17 +1136,19 @@ kiblnd_init_rdma (kib_conn_t *conn, kib_tx_t *tx, int type, return rc; } -void -kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) +static void +kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) { - cfs_list_t *q; + struct list_head *q; - LASSERT (tx->tx_nwrq > 0); /* work items set up */ - LASSERT (!tx->tx_queued); /* not queued for sending already */ - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(tx->tx_nwrq > 0); /* work items set up */ + LASSERT(!tx->tx_queued); /* not queued for sending already */ + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); tx->tx_queued = 1; - tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); + tx->tx_deadline = jiffies + + msecs_to_jiffies(*kiblnd_tunables.kib_timeout * + MSEC_PER_SEC); if (tx->tx_conn == NULL) { kiblnd_conn_addref(conn); @@ -1217,10 +1188,10 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) break; } - cfs_list_add_tail(&tx->tx_list, q); + list_add_tail(&tx->tx_list, q); } -void +static void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn) { spin_lock(&conn->ibc_lock); @@ -1238,14 +1209,12 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, unsigned short port; int rc; -#ifdef HAVE_OFED_RDMA_SET_REUSEADDR /* allow the port to be reused */ rc = rdma_set_reuseaddr(cmid, 1); if (rc != 0) { CERROR("Unable to set reuse on cmid: %d\n", rc); return rc; } -#endif /* look for a free privileged port */ for (port = PROT_SOCK-1; port > 0; port--) { @@ -1266,9 +1235,6 @@ static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, } CERROR("Failed to bind to a free privileged port\n"); -#ifndef HAVE_OFED_RDMA_SET_REUSEADDR - CERROR("You may need IB verbs that supports rdma_set_reuseaddr()\n"); -#endif return rc; } @@ -1324,9 +1290,9 @@ kiblnd_connect_peer (kib_peer_t *peer) } LASSERT (cmid->device != NULL); - CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n", + CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, - HIPQUAD(dev->ibd_ifip), cmid->device->name); + &dev->ibd_ifip, cmid->device->name); return; @@ -1358,7 +1324,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) read_lock_irqsave(g_lock, flags); peer = kiblnd_find_peer_locked(nid); - if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) { + if (peer != NULL && !list_empty(&peer->ibp_conns)) { /* Found a peer with an established connection */ conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ @@ -1377,12 +1343,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) peer = kiblnd_find_peer_locked(nid); if (peer != NULL) { - if (cfs_list_empty(&peer->ibp_conns)) { + if (list_empty(&peer->ibp_conns)) { /* found a peer, but it's still connecting... */ LASSERT (peer->ibp_connecting != 0 || peer->ibp_accepting != 0); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, + list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { @@ -1400,28 +1366,28 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) write_unlock_irqrestore(g_lock, flags); - /* Allocate a peer ready to add to the peer table and retry */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { - CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); - if (tx != NULL) { - tx->tx_status = -EHOSTUNREACH; - tx->tx_waiting = 0; - kiblnd_tx_done(ni, tx); - } - return; - } + /* Allocate a peer ready to add to the peer table and retry */ + rc = kiblnd_create_peer(ni, &peer, nid); + if (rc != 0) { + CERROR("Can't create peer %s\n", libcfs_nid2str(nid)); + if (tx != NULL) { + tx->tx_status = -EHOSTUNREACH; + tx->tx_waiting = 0; + kiblnd_tx_done(ni, tx); + } + return; + } write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); if (peer2 != NULL) { - if (cfs_list_empty(&peer2->ibp_conns)) { + if (list_empty(&peer2->ibp_conns)) { /* found a peer, but it's still connecting... */ LASSERT (peer2->ibp_connecting != 0 || peer2->ibp_accepting != 0); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, + list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue); write_unlock_irqrestore(g_lock, flags); } else { @@ -1447,10 +1413,10 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) LASSERT (((kib_net_t *)ni->ni_data)->ibn_shutdown == 0); if (tx != NULL) - cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); + list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); kiblnd_peer_addref(peer); - cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); @@ -1462,19 +1428,20 @@ int kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) { lnet_hdr_t *hdr = &lntmsg->msg_hdr; - int type = lntmsg->msg_type; - lnet_process_id_t target = lntmsg->msg_target; - int target_is_router = lntmsg->msg_target_is_router; - int routing = lntmsg->msg_routing; - unsigned int payload_niov = lntmsg->msg_niov; - struct iovec *payload_iov = lntmsg->msg_iov; - lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; - unsigned int payload_offset = lntmsg->msg_offset; - unsigned int payload_nob = lntmsg->msg_len; - kib_msg_t *ibmsg; - kib_tx_t *tx; - int nob; - int rc; + int type = lntmsg->msg_type; + lnet_process_id_t target = lntmsg->msg_target; + int target_is_router = lntmsg->msg_target_is_router; + int routing = lntmsg->msg_routing; + unsigned int payload_niov = lntmsg->msg_niov; + struct kvec *payload_iov = lntmsg->msg_iov; + lnet_kiov_t *payload_kiov = lntmsg->msg_kiov; + unsigned int payload_offset = lntmsg->msg_offset; + unsigned int payload_nob = lntmsg->msg_len; + kib_msg_t *ibmsg; + kib_rdma_desc_t *rd; + kib_tx_t *tx; + int nob; + int rc; /* NB 'private' is different depending on what we're sending.... */ @@ -1514,30 +1481,28 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return -ENOMEM; } - ibmsg = tx->tx_msg; - - if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) - rc = kiblnd_setup_rd_iov(ni, tx, - &ibmsg->ibm_u.get.ibgm_rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.iov, - 0, lntmsg->msg_md->md_length); - else - rc = kiblnd_setup_rd_kiov(ni, tx, - &ibmsg->ibm_u.get.ibgm_rd, - lntmsg->msg_md->md_niov, - lntmsg->msg_md->md_iov.kiov, - 0, lntmsg->msg_md->md_length); - if (rc != 0) { - CERROR("Can't setup GET sink for %s: %d\n", - libcfs_nid2str(target.nid), rc); - kiblnd_tx_done(ni, tx); - return -EIO; - } + ibmsg = tx->tx_msg; + rd = &ibmsg->ibm_u.get.ibgm_rd; + if ((lntmsg->msg_md->md_options & LNET_MD_KIOV) == 0) + rc = kiblnd_setup_rd_iov(ni, tx, rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.iov, + 0, lntmsg->msg_md->md_length); + else + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + lntmsg->msg_md->md_niov, + lntmsg->msg_md->md_iov.kiov, + 0, lntmsg->msg_md->md_length); + if (rc != 0) { + CERROR("Can't setup GET sink for %s: %d\n", + libcfs_nid2str(target.nid), rc); + kiblnd_tx_done(ni, tx); + return -EIO; + } - nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]); - ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; - ibmsg->ibm_u.get.ibgm_hdr = *hdr; + nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]); + ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; + ibmsg->ibm_u.get.ibgm_hdr = *hdr; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); @@ -1629,12 +1594,12 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) return 0; } -void +static void kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) { lnet_process_id_t target = lntmsg->msg_target; unsigned int niov = lntmsg->msg_niov; - struct iovec *iov = lntmsg->msg_iov; + struct kvec *iov = lntmsg->msg_iov; lnet_kiov_t *kiov = lntmsg->msg_kiov; unsigned int offset = lntmsg->msg_offset; unsigned int nob = lntmsg->msg_len; @@ -1692,15 +1657,14 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) } int -kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, - unsigned int niov, struct iovec *iov, lnet_kiov_t *kiov, - unsigned int offset, unsigned int mlen, unsigned int rlen) +kiblnd_recv(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, + unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov, + unsigned int offset, unsigned int mlen, unsigned int rlen) { kib_rx_t *rx = private; kib_msg_t *rxmsg = rx->rx_msg; kib_conn_t *conn = rx->rx_conn; kib_tx_t *tx; - kib_msg_t *txmsg; int nob; int post_credit = IBLND_POSTRX_PEER_CREDIT; int rc = 0; @@ -1737,7 +1701,10 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, lnet_finalize (ni, lntmsg, 0); break; - case IBLND_MSG_PUT_REQ: + case IBLND_MSG_PUT_REQ: { + kib_msg_t *txmsg; + kib_rdma_desc_t *rd; + if (mlen == 0) { lnet_finalize(ni, lntmsg, 0); kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, 0, @@ -1754,28 +1721,27 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, break; } - txmsg = tx->tx_msg; - if (kiov == NULL) - rc = kiblnd_setup_rd_iov(ni, tx, - &txmsg->ibm_u.putack.ibpam_rd, - niov, iov, offset, mlen); - else - rc = kiblnd_setup_rd_kiov(ni, tx, - &txmsg->ibm_u.putack.ibpam_rd, - niov, kiov, offset, mlen); - if (rc != 0) { - CERROR("Can't setup PUT sink for %s: %d\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); - kiblnd_tx_done(ni, tx); - /* tell peer it's over */ - kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, - rxmsg->ibm_u.putreq.ibprm_cookie); - break; - } + txmsg = tx->tx_msg; + rd = &txmsg->ibm_u.putack.ibpam_rd; + if (kiov == NULL) + rc = kiblnd_setup_rd_iov(ni, tx, rd, + niov, iov, offset, mlen); + else + rc = kiblnd_setup_rd_kiov(ni, tx, rd, + niov, kiov, offset, mlen); + if (rc != 0) { + CERROR("Can't setup PUT sink for %s: %d\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); + kiblnd_tx_done(ni, tx); + /* tell peer it's over */ + kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK, rc, + rxmsg->ibm_u.putreq.ibprm_cookie); + break; + } - nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]); - txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; - txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; + nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]); + txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; + txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); @@ -1786,6 +1752,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, /* reposted buffer reserved for PUT_DONE */ post_credit = IBLND_POSTRX_NO_CREDIT; break; + } case IBLND_MSG_GET_REQ: if (lntmsg != NULL) { @@ -1812,17 +1779,17 @@ kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) if (IS_ERR(task)) return PTR_ERR(task); - cfs_atomic_inc(&kiblnd_data.kib_nthreads); + atomic_inc(&kiblnd_data.kib_nthreads); return 0; } -void +static void kiblnd_thread_fini (void) { - cfs_atomic_dec (&kiblnd_data.kib_nthreads); + atomic_dec (&kiblnd_data.kib_nthreads); } -void +static void kiblnd_peer_alive (kib_peer_t *peer) { /* This is racy, but everyone's only writing cfs_time_current() */ @@ -1830,7 +1797,7 @@ kiblnd_peer_alive (kib_peer_t *peer) smp_mb(); } -void +static void kiblnd_peer_notify (kib_peer_t *peer) { int error = 0; @@ -1839,7 +1806,7 @@ kiblnd_peer_notify (kib_peer_t *peer) read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (cfs_list_empty(&peer->ibp_conns) && + if (list_empty(&peer->ibp_conns) && peer->ibp_accepting == 0 && peer->ibp_connecting == 0 && peer->ibp_error != 0) { @@ -1878,28 +1845,30 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) return; /* already being handled */ if (error == 0 && - cfs_list_empty(&conn->ibc_tx_noops) && - cfs_list_empty(&conn->ibc_tx_queue) && - cfs_list_empty(&conn->ibc_tx_queue_rsrvd) && - cfs_list_empty(&conn->ibc_tx_queue_nocred) && - cfs_list_empty(&conn->ibc_active_txs)) { + list_empty(&conn->ibc_tx_noops) && + list_empty(&conn->ibc_tx_queue) && + list_empty(&conn->ibc_tx_queue_rsrvd) && + list_empty(&conn->ibc_tx_queue_nocred) && + list_empty(&conn->ibc_active_txs)) { CDEBUG(D_NET, "closing conn to %s\n", libcfs_nid2str(peer->ibp_nid)); } else { CNETERR("Closing conn to %s: error %d%s%s%s%s%s\n", libcfs_nid2str(peer->ibp_nid), error, - cfs_list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", - cfs_list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", - cfs_list_empty(&conn->ibc_tx_queue_rsrvd) ? "" : "(sending_rsrvd)", - cfs_list_empty(&conn->ibc_tx_queue_nocred) ? "" : "(sending_nocred)", - cfs_list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); + list_empty(&conn->ibc_tx_queue) ? "" : "(sending)", + list_empty(&conn->ibc_tx_noops) ? "" : "(sending_noops)", + list_empty(&conn->ibc_tx_queue_rsrvd) ? + "" : "(sending_rsrvd)", + list_empty(&conn->ibc_tx_queue_nocred) ? + "" : "(sending_nocred)", + list_empty(&conn->ibc_active_txs) ? "" : "(waiting)"); } dev = ((kib_net_t *)peer->ibp_ni->ni_data)->ibn_dev; - cfs_list_del(&conn->ibc_list); + list_del(&conn->ibc_list); /* connd (see below) takes over ibc_list's ref */ - if (cfs_list_empty (&peer->ibp_conns) && /* no more conns */ + if (list_empty(&peer->ibp_conns) && /* no more conns */ kiblnd_peer_active(peer)) { /* still in peer table */ kiblnd_unlink_peer_locked(peer); @@ -1911,14 +1880,14 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) if (error != 0 && kiblnd_dev_can_failover(dev)) { - cfs_list_add_tail(&dev->ibd_fail_list, + list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); wake_up(&kiblnd_data.kib_failover_waitq); } spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); - cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); + list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); wake_up(&kiblnd_data.kib_connd_waitq); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); @@ -1936,7 +1905,7 @@ kiblnd_close_conn(kib_conn_t *conn, int error) write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } -void +static void kiblnd_handle_early_rxs(kib_conn_t *conn) { unsigned long flags; @@ -1946,10 +1915,10 @@ kiblnd_handle_early_rxs(kib_conn_t *conn) LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!cfs_list_empty(&conn->ibc_early_rxs)) { - rx = cfs_list_entry(conn->ibc_early_rxs.next, + while (!list_empty(&conn->ibc_early_rxs)) { + rx = list_entry(conn->ibc_early_rxs.next, kib_rx_t, rx_list); - cfs_list_del(&rx->rx_list); + list_del(&rx->rx_list); write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_handle_rx(rx); @@ -1959,43 +1928,43 @@ kiblnd_handle_early_rxs(kib_conn_t *conn) write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } -void -kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs) +static void +kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs) { - CFS_LIST_HEAD (zombies); - cfs_list_t *tmp; - cfs_list_t *nxt; - kib_tx_t *tx; + struct list_head zombies = LIST_HEAD_INIT(zombies); + struct list_head *tmp; + struct list_head *nxt; + kib_tx_t *tx; spin_lock(&conn->ibc_lock); - cfs_list_for_each_safe (tmp, nxt, txs) { - tx = cfs_list_entry (tmp, kib_tx_t, tx_list); + list_for_each_safe(tmp, nxt, txs) { + tx = list_entry(tmp, kib_tx_t, tx_list); - if (txs == &conn->ibc_active_txs) { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || - tx->tx_sending != 0); - } else { - LASSERT (tx->tx_queued); - } + if (txs == &conn->ibc_active_txs) { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || + tx->tx_sending != 0); + } else { + LASSERT(tx->tx_queued); + } - tx->tx_status = -ECONNABORTED; - tx->tx_waiting = 0; + tx->tx_status = -ECONNABORTED; + tx->tx_waiting = 0; - if (tx->tx_sending == 0) { - tx->tx_queued = 0; - cfs_list_del (&tx->tx_list); - cfs_list_add (&tx->tx_list, &zombies); - } - } + if (tx->tx_sending == 0) { + tx->tx_queued = 0; + list_del(&tx->tx_list); + list_add(&tx->tx_list, &zombies); + } + } spin_unlock(&conn->ibc_lock); kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); } -void +static void kiblnd_finalise_conn (kib_conn_t *conn) { LASSERT (!in_interrupt()); @@ -2020,11 +1989,11 @@ kiblnd_finalise_conn (kib_conn_t *conn) kiblnd_handle_early_rxs(conn); } -void -kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) +static void +kiblnd_peer_connect_failed(kib_peer_t *peer, int active, int error) { - CFS_LIST_HEAD (zombies); - unsigned long flags; + struct list_head zombies = LIST_HEAD_INIT(zombies); + unsigned long flags; LASSERT (error != 0); LASSERT (!in_interrupt()); @@ -2040,48 +2009,48 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) } if (peer->ibp_connecting != 0 || - peer->ibp_accepting != 0) { - /* another connection attempt under way... */ + peer->ibp_accepting != 0) { + /* another connection attempt under way... */ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - return; - } + flags); + return; + } - if (cfs_list_empty(&peer->ibp_conns)) { - /* Take peer's blocked transmits to complete with error */ - cfs_list_add(&zombies, &peer->ibp_tx_queue); - cfs_list_del_init(&peer->ibp_tx_queue); + if (list_empty(&peer->ibp_conns)) { + /* Take peer's blocked transmits to complete with error */ + list_add(&zombies, &peer->ibp_tx_queue); + list_del_init(&peer->ibp_tx_queue); - if (kiblnd_peer_active(peer)) - kiblnd_unlink_peer_locked(peer); + if (kiblnd_peer_active(peer)) + kiblnd_unlink_peer_locked(peer); - peer->ibp_error = error; - } else { - /* Can't have blocked transmits if there are connections */ - LASSERT (cfs_list_empty(&peer->ibp_tx_queue)); - } + peer->ibp_error = error; + } else { + /* Can't have blocked transmits if there are connections */ + LASSERT(list_empty(&peer->ibp_tx_queue)); + } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_peer_notify(peer); + kiblnd_peer_notify(peer); - if (cfs_list_empty (&zombies)) - return; + if (list_empty(&zombies)) + return; - CNETERR("Deleting messages for %s: connection failed\n", - libcfs_nid2str(peer->ibp_nid)); + CNETERR("Deleting messages for %s: connection failed\n", + libcfs_nid2str(peer->ibp_nid)); - kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); + kiblnd_txlist_done(peer->ibp_ni, &zombies, -EHOSTUNREACH); } -void +static void kiblnd_connreq_done(kib_conn_t *conn, int status) { - kib_peer_t *peer = conn->ibc_peer; - kib_tx_t *tx; - cfs_list_t txs; - unsigned long flags; - int active; + kib_peer_t *peer = conn->ibc_peer; + kib_tx_t *tx; + struct list_head txs; + unsigned long flags; + int active; active = (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); @@ -2112,14 +2081,14 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); kiblnd_peer_alive(peer); - /* Add conn to peer's list and nuke any dangling conns from a different - * peer instance... */ - kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ - cfs_list_add(&conn->ibc_list, &peer->ibp_conns); - if (active) - peer->ibp_connecting--; - else - peer->ibp_accepting--; + /* Add conn to peer's list and nuke any dangling conns from a different + * peer instance... */ + kiblnd_conn_addref(conn); /* +1 ref for ibc_list */ + list_add(&conn->ibc_list, &peer->ibp_conns); + if (active) + peer->ibp_connecting--; + else + peer->ibp_accepting--; if (peer->ibp_version == 0) { peer->ibp_version = conn->ibc_version; @@ -2134,9 +2103,9 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) peer->ibp_incarnation = conn->ibc_incarnation; } - /* grab pending txs while I have the lock */ - cfs_list_add(&txs, &peer->ibp_tx_queue); - cfs_list_del_init(&peer->ibp_tx_queue); + /* grab pending txs while I have the lock */ + list_add(&txs, &peer->ibp_tx_queue); + list_del_init(&peer->ibp_tx_queue); if (!kiblnd_peer_active(peer) || /* peer has been deleted */ conn->ibc_comms_error != 0) { /* error has happened already */ @@ -2155,9 +2124,9 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) /* Schedule blocked txs */ spin_lock(&conn->ibc_lock); - while (!cfs_list_empty(&txs)) { - tx = cfs_list_entry(txs.next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); + while (!list_empty(&txs)) { + tx = list_entry(txs.next, kib_tx_t, tx_list); + list_del(&tx->tx_list); kiblnd_queue_tx_locked(tx, conn); } @@ -2169,7 +2138,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) kiblnd_handle_early_rxs(conn); } -void +static void kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) { int rc; @@ -2180,8 +2149,8 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) CWARN("Error %d sending reject\n", rc); } -int -kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) +static int +kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob) { rwlock_t *g_lock = &kiblnd_data.kib_global_lock; kib_msg_t *reqmsg = priv; @@ -2213,9 +2182,9 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) peer_addr = (struct sockaddr_in *)&(cmid->route.addr.dst_addr); if (*kiblnd_tunables.kib_require_priv_port && ntohs(peer_addr->sin_port) >= PROT_SOCK) { - __u32 ip = ntohl(peer_addr->sin_addr.s_addr); - CERROR("Peer's port (%u.%u.%u.%u:%hu) is not privileged\n", - HIPQUAD(ip), ntohs(peer_addr->sin_port)); + __u32 ip = ntohl(peer_addr->sin_addr.s_addr); + CERROR("Peer's port (%pI4h:%hu) is not privileged\n", + &ip, ntohs(peer_addr->sin_port)); goto failed; } @@ -2259,11 +2228,11 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) if (ni == NULL || /* no matching net */ ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */ net->ibn_dev != ibdev) { /* wrong device */ - CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): " + CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): " "bad dst nid %s\n", libcfs_nid2str(nid), ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid), ibdev->ibd_ifname, ibdev->ibd_nnets, - HIPQUAD(ibdev->ibd_ifip), + &ibdev->ibd_ifip, libcfs_nid2str(reqmsg->ibm_dstnid)); goto failed; @@ -2286,32 +2255,46 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - if (reqmsg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(version)) { - CERROR("Can't accept %s: incompatible queue depth %d (%d wanted)\n", - libcfs_nid2str(nid), reqmsg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(version)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; + if (reqmsg->ibm_u.connparams.ibcp_queue_depth > + IBLND_MSG_QUEUE_SIZE(version)) { + CERROR("Can't accept conn from %s, queue depth too large: " + " %d (<=%d wanted)\n", + libcfs_nid2str(nid), + reqmsg->ibm_u.connparams.ibcp_queue_depth, + IBLND_MSG_QUEUE_SIZE(version)); - goto failed; - } + if (version == IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE; - if (reqmsg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(version)) { - CERROR("Can't accept %s(version %x): " - "incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(nid), version, - reqmsg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(version)); - - if (version == IBLND_MSG_VERSION) - rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; - - goto failed; + goto failed; + } - } + if (reqmsg->ibm_u.connparams.ibcp_max_frags > + IBLND_RDMA_FRAGS(version)) { + CWARN("Can't accept conn from %s (version %x): " + "max_frags %d too large (%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); + + if (version >= IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + + goto failed; + } else if (reqmsg->ibm_u.connparams.ibcp_max_frags < + IBLND_RDMA_FRAGS(version) && net->ibn_fmr_ps == NULL) { + CWARN("Can't accept conn from %s (version %x): " + "max_frags %d incompatible without FMR pool " + "(%d wanted)\n", + libcfs_nid2str(nid), version, + reqmsg->ibm_u.connparams.ibcp_max_frags, + IBLND_RDMA_FRAGS(version)); + + if (version >= IBLND_MSG_VERSION) + rej.ibr_why = IBLND_REJECT_RDMA_FRAGS; + + goto failed; + } if (reqmsg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("Can't accept %s: message size %d too big (%d max)\n", @@ -2321,13 +2304,17 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - /* assume 'nid' is a new peer; create */ - rc = kiblnd_create_peer(ni, &peer, nid); - if (rc != 0) { - CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); - rej.ibr_why = IBLND_REJECT_NO_RESOURCES; - goto failed; - } + /* assume 'nid' is a new peer; create */ + rc = kiblnd_create_peer(ni, &peer, nid); + if (rc != 0) { + CERROR("Can't create peer for %s\n", libcfs_nid2str(nid)); + rej.ibr_why = IBLND_REJECT_NO_RESOURCES; + goto failed; + } + + /* We have validated the peer's parameters so use those */ + peer->ibp_max_frags = reqmsg->ibm_u.connparams.ibcp_max_frags; + peer->ibp_queue_depth = reqmsg->ibm_u.connparams.ibcp_queue_depth; write_lock_irqsave(g_lock, flags); @@ -2367,6 +2354,12 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) peer2->ibp_accepting++; kiblnd_peer_addref(peer2); + /* Race with kiblnd_launch_tx (active connect) to create peer + * so copy validated parameters since we now know what the + * peer's limits are */ + peer2->ibp_max_frags = peer->ibp_max_frags; + peer2->ibp_queue_depth = peer->ibp_queue_depth; + write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; @@ -2384,12 +2377,12 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) LASSERT (net->ibn_shutdown == 0); kiblnd_peer_addref(peer); - cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); + list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); write_unlock_irqrestore(g_lock, flags); } - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 0, -ENOMEM); kiblnd_peer_decref(peer); @@ -2399,21 +2392,20 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) /* conn now "owns" cmid, so I return success from here on to ensure the * CM callback doesn't destroy cmid. */ - - conn->ibc_incarnation = reqmsg->ibm_srcstamp; - conn->ibc_credits = IBLND_MSG_QUEUE_SIZE(version); - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(version); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(version) - <= IBLND_RX_MSGS(version)); + conn->ibc_incarnation = reqmsg->ibm_srcstamp; + conn->ibc_credits = conn->ibc_queue_depth; + conn->ibc_reserved_credits = conn->ibc_queue_depth; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(version) <= IBLND_RX_MSGS(conn)); ackmsg = &conn->ibc_connvars->cv_msg; memset(ackmsg, 0, sizeof(*ackmsg)); kiblnd_init_msg(ackmsg, IBLND_MSG_CONNACK, sizeof(ackmsg->ibm_u.connparams)); - ackmsg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; - ackmsg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); + ackmsg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + ackmsg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; + ackmsg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(ni, ackmsg, version, 0, nid, reqmsg->ibm_srcstamp); @@ -2454,14 +2446,14 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) return -ECONNREFUSED; } -void +static void kiblnd_reconnect (kib_conn_t *conn, int version, __u64 incarnation, int why, kib_connparams_t *cp) { - kib_peer_t *peer = conn->ibc_peer; - char *reason; - int retry = 0; - unsigned long flags; + kib_peer_t *peer = conn->ibc_peer; + char *reason; + int retry_now = 0; + unsigned long flags; LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */ @@ -2473,20 +2465,27 @@ kiblnd_reconnect (kib_conn_t *conn, int version, * NB: reconnect is still needed even when ibp_tx_queue is * empty if ibp_version != version because reconnect may be * initiated by kiblnd_query() */ - if ((!cfs_list_empty(&peer->ibp_tx_queue) || + if ((!list_empty(&peer->ibp_tx_queue) || peer->ibp_version != version) && peer->ibp_connecting == 1 && peer->ibp_accepting == 0) { - retry = 1; - peer->ibp_connecting++; - - peer->ibp_version = version; - peer->ibp_incarnation = incarnation; + if (why == IBLND_REJECT_CONN_RACE) { + /* don't reconnect immediately, intensive reconnecting + * may consume a lot of memory. kiblnd_destroy_conn + * will reconnect after releasing all resources of + * this connection */ + conn->ibc_conn_race = 1; + } else { + retry_now = 1; + } + peer->ibp_connecting++; + peer->ibp_version = version; + peer->ibp_incarnation = incarnation; } write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - if (!retry) + if (!retry_now) return; switch (why) { @@ -2494,31 +2493,66 @@ kiblnd_reconnect (kib_conn_t *conn, int version, reason = "Unknown"; break; + case IBLND_REJECT_RDMA_FRAGS: + if (!cp) + goto failed; + + if (conn->ibc_max_frags <= cp->ibcp_max_frags) { + CNETERR("Unsupported max frags, peer supports %d\n", + cp->ibcp_max_frags); + goto failed; + } else if (*kiblnd_tunables.kib_map_on_demand == 0) { + CNETERR("map_on_demand must be enabled to support " + "map_on_demand peers\n"); + goto failed; + } + + peer->ibp_max_frags = cp->ibcp_max_frags; + reason = "rdma fragments"; + break; + + case IBLND_REJECT_MSG_QUEUE_SIZE: + if (!cp) + goto failed; + + if (conn->ibc_queue_depth <= cp->ibcp_queue_depth) { + CNETERR("Unsupported queue depth, peer supports %d\n", + cp->ibcp_queue_depth); + goto failed; + } + + peer->ibp_queue_depth = cp->ibcp_queue_depth; + reason = "queue depth"; + break; + case IBLND_REJECT_CONN_STALE: reason = "stale"; break; - case IBLND_REJECT_CONN_RACE: - reason = "conn race"; - break; - case IBLND_REJECT_CONN_UNCOMPAT: reason = "version negotiation"; break; } - CNETERR("%s: retrying (%s), %x, %x, " - "queue_dep: %d, max_frag: %d, msg_size: %d\n", - libcfs_nid2str(peer->ibp_nid), - reason, IBLND_MSG_VERSION, version, - cp != NULL? cp->ibcp_queue_depth :IBLND_MSG_QUEUE_SIZE(version), - cp != NULL? cp->ibcp_max_frags : IBLND_RDMA_FRAGS(version), - cp != NULL? cp->ibcp_max_msg_size: IBLND_MSG_SIZE); + CNETERR("%s: retrying (%s), %x, %x, " + "queue_depth: %d, max_frags: %d, msg_size: %d\n", + libcfs_nid2str(peer->ibp_nid), + reason, IBLND_MSG_VERSION, version, + conn->ibc_queue_depth, conn->ibc_max_frags, + cp != NULL ? cp->ibcp_max_msg_size : IBLND_MSG_SIZE); kiblnd_connect_peer(peer); + return; + + failed: + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + peer->ibp_connecting--; + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + return; } -void +static void kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; @@ -2609,22 +2643,12 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) case IBLND_REJECT_CONN_RACE: case IBLND_REJECT_CONN_STALE: case IBLND_REJECT_CONN_UNCOMPAT: + case IBLND_REJECT_MSG_QUEUE_SIZE: + case IBLND_REJECT_RDMA_FRAGS: kiblnd_reconnect(conn, rej->ibr_version, incarnation, rej->ibr_why, cp); break; - case IBLND_REJECT_MSG_QUEUE_SIZE: - CERROR("%s rejected: incompatible message queue depth %d, %d\n", - libcfs_nid2str(peer->ibp_nid), cp->ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)); - break; - - case IBLND_REJECT_RDMA_FRAGS: - CERROR("%s rejected: incompatible # of RDMA fragments %d, %d\n", - libcfs_nid2str(peer->ibp_nid), cp->ibcp_max_frags, - IBLND_RDMA_FRAGS(conn->ibc_version)); - break; - case IBLND_REJECT_NO_RESOURCES: CERROR("%s rejected: o2iblnd no resources\n", libcfs_nid2str(peer->ibp_nid)); @@ -2653,7 +2677,7 @@ kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob) kiblnd_connreq_done(conn, -ECONNREFUSED); } -void +static void kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) { kib_peer_t *peer = conn->ibc_peer; @@ -2687,25 +2711,25 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - if (msg->ibm_u.connparams.ibcp_queue_depth != - IBLND_MSG_QUEUE_SIZE(ver)) { - CERROR("%s has incompatible queue depth %d(%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_queue_depth, - IBLND_MSG_QUEUE_SIZE(ver)); - rc = -EPROTO; - goto failed; - } + if (msg->ibm_u.connparams.ibcp_queue_depth > + conn->ibc_queue_depth) { + CERROR("%s has incompatible queue depth %d (<=%d wanted)\n", + libcfs_nid2str(peer->ibp_nid), + msg->ibm_u.connparams.ibcp_queue_depth, + conn->ibc_queue_depth); + rc = -EPROTO; + goto failed; + } - if (msg->ibm_u.connparams.ibcp_max_frags != - IBLND_RDMA_FRAGS(ver)) { - CERROR("%s has incompatible max_frags %d (%d wanted)\n", - libcfs_nid2str(peer->ibp_nid), - msg->ibm_u.connparams.ibcp_max_frags, - IBLND_RDMA_FRAGS(ver)); - rc = -EPROTO; - goto failed; - } + if (msg->ibm_u.connparams.ibcp_max_frags > + conn->ibc_max_frags) { + CERROR("%s has incompatible max_frags %d (<=%d wanted)\n", + libcfs_nid2str(peer->ibp_nid), + msg->ibm_u.connparams.ibcp_max_frags, + conn->ibc_max_frags); + rc = -EPROTO; + goto failed; + } if (msg->ibm_u.connparams.ibcp_max_msg_size > IBLND_MSG_SIZE) { CERROR("%s max message size %d too big (%d max)\n", @@ -2732,11 +2756,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - conn->ibc_incarnation = msg->ibm_srcstamp; - conn->ibc_credits = - conn->ibc_reserved_credits = IBLND_MSG_QUEUE_SIZE(ver); - LASSERT (conn->ibc_credits + conn->ibc_reserved_credits + IBLND_OOB_MSGS(ver) - <= IBLND_RX_MSGS(ver)); + conn->ibc_incarnation = msg->ibm_srcstamp; + conn->ibc_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_reserved_credits = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_queue_depth = msg->ibm_u.connparams.ibcp_queue_depth; + conn->ibc_max_frags = msg->ibm_u.connparams.ibcp_max_frags; + LASSERT(conn->ibc_credits + conn->ibc_reserved_credits + + IBLND_OOB_MSGS(ver) <= IBLND_RX_MSGS(conn)); kiblnd_connreq_done(conn, 0); return; @@ -2752,7 +2778,7 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) kiblnd_connreq_done(conn, 0); } -int +static int kiblnd_active_connect (struct rdma_cm_id *cmid) { kib_peer_t *peer = (kib_peer_t *)cmid->context; @@ -2772,7 +2798,8 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); + conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, + version); if (conn == NULL) { kiblnd_peer_connect_failed(peer, 1, -ENOMEM); kiblnd_peer_decref(peer); /* lose cmid's ref */ @@ -2785,11 +2812,11 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) msg = &conn->ibc_connvars->cv_msg; - memset(msg, 0, sizeof(*msg)); - kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); - msg->ibm_u.connparams.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version); - msg->ibm_u.connparams.ibcp_max_frags = IBLND_RDMA_FRAGS(version); - msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; + memset(msg, 0, sizeof(*msg)); + kiblnd_init_msg(msg, IBLND_MSG_CONNREQ, sizeof(msg->ibm_u.connparams)); + msg->ibm_u.connparams.ibcp_queue_depth = conn->ibc_queue_depth; + msg->ibm_u.connparams.ibcp_max_frags = conn->ibc_max_frags; + msg->ibm_u.connparams.ibcp_max_msg_size = IBLND_MSG_SIZE; kiblnd_pack_msg(peer->ibp_ni, msg, version, 0, peer->ibp_nid, incarnation); @@ -2956,11 +2983,10 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) /* net keeps its ref on conn! */ return 0; -#ifdef HAVE_OFED_RDMA_CMEV_TIMEWAIT_EXIT case RDMA_CM_EVENT_TIMEWAIT_EXIT: CDEBUG(D_NET, "Ignore TIMEWAIT_EXIT event\n"); return 0; -#endif + case RDMA_CM_EVENT_DISCONNECTED: conn = (kib_conn_t *)cmid->context; if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { @@ -2982,39 +3008,37 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) * to ignore this */ return 0; -#ifdef HAVE_OFED_RDMA_CMEV_ADDRCHANGE case RDMA_CM_EVENT_ADDR_CHANGE: LCONSOLE_INFO("Physical link changed (eg hca/port)\n"); return 0; -#endif } } static int -kiblnd_check_txs_locked(kib_conn_t *conn, cfs_list_t *txs) +kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs) { - kib_tx_t *tx; - cfs_list_t *ttmp; + kib_tx_t *tx; + struct list_head *ttmp; - cfs_list_for_each (ttmp, txs) { - tx = cfs_list_entry (ttmp, kib_tx_t, tx_list); + list_for_each(ttmp, txs) { + tx = list_entry(ttmp, kib_tx_t, tx_list); - if (txs != &conn->ibc_active_txs) { - LASSERT (tx->tx_queued); - } else { - LASSERT (!tx->tx_queued); - LASSERT (tx->tx_waiting || tx->tx_sending != 0); - } + if (txs != &conn->ibc_active_txs) { + LASSERT(tx->tx_queued); + } else { + LASSERT(!tx->tx_queued); + LASSERT(tx->tx_waiting || tx->tx_sending != 0); + } - if (cfs_time_aftereq (jiffies, tx->tx_deadline)) { - CERROR("Timed out tx: %s, %lu seconds\n", - kiblnd_queue2str(conn, txs), - cfs_duration_sec(jiffies - tx->tx_deadline)); - return 1; - } - } + if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { + CERROR("Timed out tx: %s, %lu seconds\n", + kiblnd_queue2str(conn, txs), + cfs_duration_sec(jiffies - tx->tx_deadline)); + return 1; + } + } - return 0; + return 0; } static int @@ -3027,33 +3051,33 @@ kiblnd_conn_timed_out_locked(kib_conn_t *conn) kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); } -void +static void kiblnd_check_conns (int idx) { - CFS_LIST_HEAD (closes); - CFS_LIST_HEAD (checksends); - cfs_list_t *peers = &kiblnd_data.kib_peers[idx]; - cfs_list_t *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; - cfs_list_t *ctmp; - unsigned long flags; + struct list_head closes = LIST_HEAD_INIT(closes); + struct list_head checksends = LIST_HEAD_INIT(checksends); + struct list_head *peers = &kiblnd_data.kib_peers[idx]; + struct list_head *ptmp; + kib_peer_t *peer; + kib_conn_t *conn; + struct list_head *ctmp; + unsigned long flags; /* NB. We expect to have a look at all the peers and not find any * RDMAs to time out, so we just use a shared lock while we * take a look... */ read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - cfs_list_for_each (ptmp, peers) { - peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list); + list_for_each(ptmp, peers) { + peer = list_entry(ptmp, kib_peer_t, ibp_list); - cfs_list_for_each (ctmp, &peer->ibp_conns) { - int timedout; - int sendnoop; + list_for_each(ctmp, &peer->ibp_conns) { + int timedout; + int sendnoop; - conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list); + conn = list_entry(ctmp, kib_conn_t, ibc_list); - LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED); + LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED); spin_lock(&conn->ibc_lock); @@ -3061,25 +3085,24 @@ kiblnd_check_conns (int idx) timedout = kiblnd_conn_timed_out_locked(conn); if (!sendnoop && !timedout) { spin_unlock(&conn->ibc_lock); - continue; - } + continue; + } - if (timedout) { - CERROR("Timed out RDMA with %s (%lu): " - "c: %u, oc: %u, rc: %u\n", - libcfs_nid2str(peer->ibp_nid), - cfs_duration_sec(cfs_time_current() - - peer->ibp_last_alive), - conn->ibc_credits, - conn->ibc_outstanding_credits, - conn->ibc_reserved_credits); - cfs_list_add(&conn->ibc_connd_list, &closes); - } else { - cfs_list_add(&conn->ibc_connd_list, - &checksends); - } - /* +ref for 'closes' or 'checksends' */ - kiblnd_conn_addref(conn); + if (timedout) { + CERROR("Timed out RDMA with %s (%lu): " + "c: %u, oc: %u, rc: %u\n", + libcfs_nid2str(peer->ibp_nid), + cfs_duration_sec(cfs_time_current() - + peer->ibp_last_alive), + conn->ibc_credits, + conn->ibc_outstanding_credits, + conn->ibc_reserved_credits); + list_add(&conn->ibc_connd_list, &closes); + } else { + list_add(&conn->ibc_connd_list, &checksends); + } + /* +ref for 'closes' or 'checksends' */ + kiblnd_conn_addref(conn); spin_unlock(&conn->ibc_lock); } @@ -3087,30 +3110,30 @@ kiblnd_check_conns (int idx) read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - /* Handle timeout by closing the whole - * connection. We can only be sure RDMA activity - * has ceased once the QP has been modified. */ - while (!cfs_list_empty(&closes)) { - conn = cfs_list_entry(closes.next, - kib_conn_t, ibc_connd_list); - cfs_list_del(&conn->ibc_connd_list); - kiblnd_close_conn(conn, -ETIMEDOUT); - kiblnd_conn_decref(conn); - } + /* Handle timeout by closing the whole + * connection. We can only be sure RDMA activity + * has ceased once the QP has been modified. */ + while (!list_empty(&closes)) { + conn = list_entry(closes.next, + kib_conn_t, ibc_connd_list); + list_del(&conn->ibc_connd_list); + kiblnd_close_conn(conn, -ETIMEDOUT); + kiblnd_conn_decref(conn); + } - /* In case we have enough credits to return via a - * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - while (!cfs_list_empty(&checksends)) { - conn = cfs_list_entry(checksends.next, - kib_conn_t, ibc_connd_list); - cfs_list_del(&conn->ibc_connd_list); - kiblnd_check_sends(conn); - kiblnd_conn_decref(conn); - } + /* In case we have enough credits to return via a + * NOOP, but there were no non-blocking tx descs + * free to do it last time... */ + while (!list_empty(&checksends)) { + conn = list_entry(checksends.next, + kib_conn_t, ibc_connd_list); + list_del(&conn->ibc_connd_list); + kiblnd_check_sends(conn); + kiblnd_conn_decref(conn); + } } -void +static void kiblnd_disconnect_conn (kib_conn_t *conn) { LASSERT (!in_interrupt()); @@ -3135,9 +3158,9 @@ kiblnd_connd (void *arg) int peer_index = 0; unsigned long deadline = jiffies; - cfs_block_allsigs (); + cfs_block_allsigs(); - init_waitqueue_entry_current (&wait); + init_waitqueue_entry(&wait, current); kiblnd_data.kib_connd = current; spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); @@ -3146,11 +3169,11 @@ kiblnd_connd (void *arg) dropped_lock = 0; - if (!cfs_list_empty (&kiblnd_data.kib_connd_zombies)) { - conn = cfs_list_entry(kiblnd_data. \ + if (!list_empty(&kiblnd_data.kib_connd_zombies)) { + conn = list_entry(kiblnd_data. \ kib_connd_zombies.next, kib_conn_t, ibc_list); - cfs_list_del(&conn->ibc_list); + list_del(&conn->ibc_list); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); @@ -3161,10 +3184,10 @@ kiblnd_connd (void *arg) spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } - if (!cfs_list_empty(&kiblnd_data.kib_connd_conns)) { - conn = cfs_list_entry(kiblnd_data.kib_connd_conns.next, + if (!list_empty(&kiblnd_data.kib_connd_conns)) { + conn = list_entry(kiblnd_data.kib_connd_conns.next, kib_conn_t, ibc_list); - cfs_list_del(&conn->ibc_list); + list_del(&conn->ibc_list); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); @@ -3206,7 +3229,7 @@ kiblnd_connd (void *arg) kiblnd_data.kib_peer_hash_size; } - deadline += p * HZ; + deadline += msecs_to_jiffies(p * MSEC_PER_SEC); spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } @@ -3218,7 +3241,7 @@ kiblnd_connd (void *arg) add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); - waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout); + schedule_timeout(timeout); set_current_state(TASK_RUNNING); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); @@ -3249,7 +3272,7 @@ kiblnd_qp_event(struct ib_event *event, void *arg) } } -void +static void kiblnd_complete (struct ib_wc *wc) { switch (kiblnd_wreqid2type(wc->wr_id)) { @@ -3301,7 +3324,7 @@ kiblnd_cq_completion(struct ib_cq *cq, void *arg) conn->ibc_nsends_posted > 0)) { kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ conn->ibc_scheduled = 1; - cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); + list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); @@ -3334,7 +3357,7 @@ kiblnd_scheduler(void *arg) cfs_block_allsigs(); - init_waitqueue_entry_current(&wait); + init_waitqueue_entry(&wait, current); sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; @@ -3360,12 +3383,12 @@ kiblnd_scheduler(void *arg) did_something = 0; - if (!cfs_list_empty(&sched->ibs_conns)) { - conn = cfs_list_entry(sched->ibs_conns.next, + if (!list_empty(&sched->ibs_conns)) { + conn = list_entry(sched->ibs_conns.next, kib_conn_t, ibc_sched_list); /* take over kib_sched_conns' ref on conn... */ LASSERT(conn->ibc_scheduled); - cfs_list_del(&conn->ibc_sched_list); + list_del(&conn->ibc_sched_list); conn->ibc_ready = 0; spin_unlock_irqrestore(&sched->ibs_lock, flags); @@ -3407,7 +3430,7 @@ kiblnd_scheduler(void *arg) * this one... */ /* +1 ref for sched_conns */ kiblnd_conn_addref(conn); - cfs_list_add_tail(&conn->ibc_sched_list, + list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); if (waitqueue_active(&sched->ibs_waitq)) wake_up(&sched->ibs_waitq); @@ -3433,7 +3456,7 @@ kiblnd_scheduler(void *arg) add_wait_queue_exclusive(&sched->ibs_waitq, &wait); spin_unlock_irqrestore(&sched->ibs_lock, flags); - waitq_wait(&wait, TASK_INTERRUPTIBLE); + schedule(); busy_loops = 0; remove_wait_queue(&sched->ibs_waitq, &wait); @@ -3451,23 +3474,23 @@ int kiblnd_failover_thread(void *arg) { rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_dev_t *dev; - wait_queue_t wait; - unsigned long flags; - int rc; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; - LASSERT (*kiblnd_tunables.kib_dev_failover != 0); + LASSERT(*kiblnd_tunables.kib_dev_failover != 0); - cfs_block_allsigs (); + cfs_block_allsigs(); - init_waitqueue_entry_current(&wait); + init_waitqueue_entry(&wait, current); write_lock_irqsave(glock, flags); while (!kiblnd_data.kib_shutdown) { int do_failover = 0; int long_sleep; - cfs_list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, + list_for_each_entry(dev, &kiblnd_data.kib_failed_devs, ibd_fail_list) { if (cfs_time_before(cfs_time_current(), dev->ibd_next_failover)) @@ -3477,7 +3500,7 @@ kiblnd_failover_thread(void *arg) } if (do_failover) { - cfs_list_del_init(&dev->ibd_fail_list); + list_del_init(&dev->ibd_fail_list); dev->ibd_failover = 1; write_unlock_irqrestore(glock, flags); @@ -3496,7 +3519,7 @@ kiblnd_failover_thread(void *arg) dev->ibd_next_failover = cfs_time_shift(min(dev->ibd_failed_failover, 10)); if (kiblnd_dev_can_failover(dev)) { - cfs_list_add_tail(&dev->ibd_fail_list, + list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); } @@ -3504,7 +3527,7 @@ kiblnd_failover_thread(void *arg) } /* long sleep if no more pending failover */ - long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs); + long_sleep = list_empty(&kiblnd_data.kib_failed_devs); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); @@ -3523,9 +3546,9 @@ kiblnd_failover_thread(void *arg) * we need checking like this because if there is not active * connection on the dev and no SEND from local, we may listen * on wrong HCA for ever while there is a bonding failover */ - cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { + list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) { if (kiblnd_dev_can_failover(dev)) { - cfs_list_add_tail(&dev->ibd_fail_list, + list_add_tail(&dev->ibd_fail_list, &kiblnd_data.kib_failed_devs); } }