X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lnet%2Fklnds%2Fo2iblnd%2Fo2iblnd_cb.c;h=5c9d1a148f5021a36da86418e487b6e458e91f65;hp=374d16181f26fcf907e0ddc9770ce001ed4917cd;hb=59071a8334bbc1a3a6d31565b7474063438d1f43;hpb=adb6cea0b70ac465b2a47635d9dc45d64ab1605b diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index 374d161..5c9d1a14 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -100,13 +100,15 @@ kiblnd_txlist_done (lnet_ni_t *ni, cfs_list_t *txlist, int status) } kib_tx_t * -kiblnd_get_idle_tx (lnet_ni_t *ni) +kiblnd_get_idle_tx(lnet_ni_t *ni, lnet_nid_t target) { - kib_net_t *net = (kib_net_t *)ni->ni_data; - cfs_list_t *node; - kib_tx_t *tx; + kib_net_t *net = (kib_net_t *)ni->ni_data; + cfs_list_t *node; + kib_tx_t *tx; + kib_tx_poolset_t *tps; - node = kiblnd_pool_alloc_node(&net->ibn_tx_ps.tps_poolset); + tps = net->ibn_tx_ps[lnet_cpt_of_nid(target)]; + node = kiblnd_pool_alloc_node(&tps->tps_poolset); if (node == NULL) return NULL; tx = container_of(node, kib_tx_t, tx_list); @@ -126,17 +128,18 @@ kiblnd_get_idle_tx (lnet_ni_t *ni) } void -kiblnd_drop_rx (kib_rx_t *rx) +kiblnd_drop_rx(kib_rx_t *rx) { - kib_conn_t *conn = rx->rx_conn; - unsigned long flags; - - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); - LASSERT (conn->ibc_nrx > 0); - conn->ibc_nrx--; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + kib_conn_t *conn = rx->rx_conn; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; + + spin_lock_irqsave(&sched->ibs_lock, flags); + LASSERT(conn->ibc_nrx > 0); + conn->ibc_nrx--; + spin_unlock_irqrestore(&sched->ibs_lock, flags); - kiblnd_conn_decref(conn); + kiblnd_conn_decref(conn); } int @@ -195,15 +198,15 @@ kiblnd_post_rx (kib_rx_t *rx, int credit) if (credit == IBLND_POSTRX_NO_CREDIT) return 0; - cfs_spin_lock(&conn->ibc_lock); - if (credit == IBLND_POSTRX_PEER_CREDIT) - conn->ibc_outstanding_credits++; - else - conn->ibc_reserved_credits++; - cfs_spin_unlock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); + if (credit == IBLND_POSTRX_PEER_CREDIT) + conn->ibc_outstanding_credits++; + else + conn->ibc_reserved_credits++; + spin_unlock(&conn->ibc_lock); - kiblnd_check_sends(conn); - return 0; + kiblnd_check_sends(conn); + return 0; } kib_tx_t * @@ -234,15 +237,15 @@ kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie) void kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) { - kib_tx_t *tx; - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - int idle; + kib_tx_t *tx; + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + int idle; - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); - if (tx == NULL) { - cfs_spin_unlock(&conn->ibc_lock); + tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); + if (tx == NULL) { + spin_unlock(&conn->ibc_lock); CWARN("Unmatched completion type %x cookie "LPX64" from %s\n", txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid)); @@ -264,17 +267,17 @@ kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie) if (idle) cfs_list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); - if (idle) - kiblnd_tx_done(ni, tx); + if (idle) + kiblnd_tx_done(ni, tx); } void -kiblnd_send_completion (kib_conn_t *conn, int type, int status, __u64 cookie) +kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie) { - lnet_ni_t *ni = conn->ibc_peer->ibp_ni; - kib_tx_t *tx = kiblnd_get_idle_tx(ni); + lnet_ni_t *ni = conn->ibc_peer->ibp_ni; + kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for completion %x for %s\n", @@ -309,12 +312,12 @@ kiblnd_handle_rx (kib_rx_t *rx) if (credits != 0) { /* Have I received credits that will let me send? */ - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - if (conn->ibc_credits + credits > - IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { - rc2 = conn->ibc_credits; - cfs_spin_unlock(&conn->ibc_lock); + if (conn->ibc_credits + credits > + IBLND_MSG_QUEUE_SIZE(conn->ibc_version)) { + rc2 = conn->ibc_credits; + spin_unlock(&conn->ibc_lock); CERROR("Bad credits from %s: %d + %d > %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), @@ -333,7 +336,7 @@ kiblnd_handle_rx (kib_rx_t *rx) !IBLND_OOB_CAPABLE(conn->ibc_version)) /* v1 only */ conn->ibc_outstanding_credits++; - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); kiblnd_check_sends(conn); } @@ -385,12 +388,12 @@ kiblnd_handle_rx (kib_rx_t *rx) case IBLND_MSG_PUT_ACK: post_credit = IBLND_POSTRX_RSRVD_CREDIT; - cfs_spin_lock(&conn->ibc_lock); - tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, - msg->ibm_u.putack.ibpam_src_cookie); - if (tx != NULL) - cfs_list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); + tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, + msg->ibm_u.putack.ibpam_src_cookie); + if (tx != NULL) + cfs_list_del(&tx->tx_list); + spin_unlock(&conn->ibc_lock); if (tx == NULL) { CERROR("Unmatched PUT_ACK from %s\n", @@ -414,11 +417,11 @@ kiblnd_handle_rx (kib_rx_t *rx) CERROR("Can't setup rdma for PUT to %s: %d\n", libcfs_nid2str(conn->ibc_peer->ibp_nid), rc2); - cfs_spin_lock(&conn->ibc_lock); - tx->tx_waiting = 0; /* clear waiting and queue atomically */ - kiblnd_queue_tx_locked(tx, conn); - cfs_spin_unlock(&conn->ibc_lock); - break; + spin_lock(&conn->ibc_lock); + tx->tx_waiting = 0; /* clear waiting and queue atomically */ + kiblnd_queue_tx_locked(tx, conn); + spin_unlock(&conn->ibc_lock); + break; case IBLND_MSG_PUT_DONE: post_credit = IBLND_POSTRX_PEER_CREDIT; @@ -499,17 +502,17 @@ kiblnd_rx_complete (kib_rx_t *rx, int status, int nob) /* racing with connection establishment/teardown! */ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; - unsigned long flags; - - cfs_write_lock_irqsave(g_lock, flags); - /* must check holding global lock to eliminate race */ - if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { - cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); - cfs_write_unlock_irqrestore(g_lock, flags); - return; - } - cfs_write_unlock_irqrestore(g_lock, flags); + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + unsigned long flags; + + write_lock_irqsave(g_lock, flags); + /* must check holding global lock to eliminate race */ + if (conn->ibc_state < IBLND_CONN_ESTABLISHED) { + cfs_list_add_tail(&rx->rx_list, &conn->ibc_early_rxs); + write_unlock_irqrestore(g_lock, flags); + return; + } + write_unlock_irqrestore(g_lock, flags); } kiblnd_handle_rx(rx); return; @@ -548,12 +551,19 @@ kiblnd_kvaddr_to_page (unsigned long vaddr) static int kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - __u64 *pages = tx->tx_pages; - int npages; - int size; - int rc; - int i; + kib_hca_dev_t *hdev; + __u64 *pages = tx->tx_pages; + kib_fmr_poolset_t *fps; + int npages; + int size; + int cpt; + int rc; + int i; + + LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); + + hdev = tx->tx_pool->tpo_hdev; for (i = 0, npages = 0; i < rd->rd_nfrags; i++) { for (size = 0; size < rd->rd_frags[i].rf_nob; @@ -563,7 +573,10 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) } } - rc = kiblnd_fmr_pool_map(&net->ibn_fmr_ps, pages, npages, 0, &tx->tx_u.fmr); + cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; + + fps = net->ibn_fmr_ps[cpt]; + rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); if (rc != 0) { CERROR ("Can't map %d pages: %d\n", npages, rc); return rc; @@ -583,13 +596,23 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) static int kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) { - kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; - __u64 iova; - int rc; + kib_hca_dev_t *hdev; + kib_pmr_poolset_t *pps; + __u64 iova; + int cpt; + int rc; + + LASSERT(tx->tx_pool != NULL); + LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); - iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; + hdev = tx->tx_pool->tpo_hdev; - rc = kiblnd_pmr_pool_map(&net->ibn_pmr_ps, hdev, rd, &iova, &tx->tx_u.pmr); + iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; + + cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; + + pps = net->ibn_pmr_ps[cpt]; + rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); if (rc != 0) { CERROR("Failed to create MR by phybuf: %d\n", rc); return rc; @@ -609,17 +632,18 @@ kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) { - kib_net_t *net = ni->ni_data; + kib_net_t *net = ni->ni_data; - LASSERT (net != NULL); + LASSERT(net != NULL); - if (net->ibn_with_fmr && tx->tx_u.fmr.fmr_pfmr != NULL) { - kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); - tx->tx_u.fmr.fmr_pfmr = NULL; - } else if (net->ibn_with_pmr && tx->tx_u.pmr != NULL) { - kiblnd_pmr_pool_unmap(tx->tx_u.pmr); - tx->tx_u.pmr = NULL; - } + if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { + kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); + tx->tx_u.fmr.fmr_pfmr = NULL; + + } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) { + kiblnd_pmr_pool_unmap(tx->tx_u.pmr); + tx->tx_u.pmr = NULL; + } if (tx->tx_nfrags != 0) { kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, @@ -663,12 +687,12 @@ kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, return 0; } - if (net->ibn_with_fmr) - return kiblnd_fmr_map_tx(net, tx, rd, nob); - else if (net->ibn_with_pmr) - return kiblnd_pmr_map_tx(net, tx, rd, nob); + if (net->ibn_fmr_ps != NULL) + return kiblnd_fmr_map_tx(net, tx, rd, nob); + else if (net->ibn_pmr_ps != NULL) + return kiblnd_pmr_map_tx(net, tx, rd, nob); - return -EINVAL; + return -EINVAL; } @@ -752,7 +776,6 @@ kiblnd_setup_rd_kiov (lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, fragnob = min((int)(kiov->kiov_len - offset), nob); - memset(sg, 0, sizeof(*sg)); sg_set_page(sg, kiov->kiov_page, fragnob, kiov->kiov_offset + offset); sg++; @@ -813,15 +836,15 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) tx->tx_queued = 0; if (msg->ibm_type == IBLND_MSG_NOOP && - (!kiblnd_send_noop(conn) || /* redundant NOOP */ + (!kiblnd_need_noop(conn) || /* redundant NOOP */ (IBLND_OOB_CAPABLE(ver) && /* posted enough NOOP */ conn->ibc_noops_posted == IBLND_OOB_MSGS(ver)))) { /* OK to drop when posted enough NOOPs, since * kiblnd_check_sends will queue NOOP again when * posted NOOPs complete */ - cfs_spin_unlock(&conn->ibc_lock); - kiblnd_tx_done(peer->ibp_ni, tx); - cfs_spin_lock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); + kiblnd_tx_done(peer->ibp_ni, tx); + spin_lock(&conn->ibc_lock); CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n", libcfs_nid2str(peer->ibp_nid), conn->ibc_noops_posted); @@ -879,7 +902,7 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) if (done) cfs_list_del(&tx->tx_list); - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); if (conn->ibc_state == IBLND_CONN_ESTABLISHED) CERROR("Error %d posting transmit to %s\n", @@ -893,9 +916,9 @@ kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit) if (done) kiblnd_tx_done(peer->ibp_ni, tx); - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); - return -EIO; + return -EIO; } void @@ -912,7 +935,7 @@ kiblnd_check_sends (kib_conn_t *conn) return; } - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); LASSERT (conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver)); LASSERT (!IBLND_OOB_CAPABLE(ver) || @@ -928,14 +951,14 @@ kiblnd_check_sends (kib_conn_t *conn) conn->ibc_reserved_credits--; } - if (kiblnd_send_noop(conn)) { - cfs_spin_unlock(&conn->ibc_lock); + if (kiblnd_need_noop(conn)) { + spin_unlock(&conn->ibc_lock); - tx = kiblnd_get_idle_tx(ni); - if (tx != NULL) - kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); + if (tx != NULL) + kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); if (tx != NULL) kiblnd_queue_tx_locked(tx, conn); } @@ -965,9 +988,9 @@ kiblnd_check_sends (kib_conn_t *conn) break; } - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); - kiblnd_conn_decref(conn); /* ...until here */ + kiblnd_conn_decref(conn); /* ...until here */ } void @@ -992,7 +1015,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) kiblnd_peer_alive(conn->ibc_peer); } - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); /* I could be racing with rdma completion. Whoever makes 'tx' idle * gets to free it, which also drops its ref on 'conn'. */ @@ -1015,7 +1038,7 @@ kiblnd_tx_complete (kib_tx_t *tx, int status) kiblnd_conn_addref(conn); /* 1 ref for me.... */ - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); if (idle) kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); @@ -1153,8 +1176,8 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) LASSERT (!tx->tx_queued); /* not queued for sending already */ LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); - tx->tx_queued = 1; - tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * CFS_HZ); + tx->tx_queued = 1; + tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); if (tx->tx_conn == NULL) { kiblnd_conn_addref(conn); @@ -1200,11 +1223,11 @@ kiblnd_queue_tx_locked (kib_tx_t *tx, kib_conn_t *conn) void kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn) { - cfs_spin_lock(&conn->ibc_lock); - kiblnd_queue_tx_locked(tx, conn); - cfs_spin_unlock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); + kiblnd_queue_tx_locked(tx, conn); + spin_unlock(&conn->ibc_lock); - kiblnd_check_sends(conn); + kiblnd_check_sends(conn); } static int kiblnd_resolve_addr(struct rdma_cm_id *cmid, @@ -1262,7 +1285,9 @@ kiblnd_connect_peer (kib_peer_t *peer) LASSERT (net != NULL); LASSERT (peer->ibp_connecting > 0); - cmid = rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP); + cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer, RDMA_PS_TCP, + IB_QPT_RC); + if (IS_ERR(cmid)) { CERROR("Can't create CMID for %s: %ld\n", libcfs_nid2str(peer->ibp_nid), PTR_ERR(cmid)); @@ -1318,7 +1343,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) kib_peer_t *peer; kib_peer_t *peer2; kib_conn_t *conn; - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; unsigned long flags; int rc; @@ -1330,7 +1355,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) /* First time, just use a read lock since I expect to find my peer * connected */ - cfs_read_lock_irqsave(g_lock, flags); + read_lock_irqsave(g_lock, flags); peer = kiblnd_find_peer_locked(nid); if (peer != NULL && !cfs_list_empty(&peer->ibp_conns)) { @@ -1338,7 +1363,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) conn = kiblnd_get_conn_locked(peer); kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_read_unlock_irqrestore(g_lock, flags); + read_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); @@ -1346,9 +1371,9 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) return; } - cfs_read_unlock(g_lock); - /* Re-try with a write lock */ - cfs_write_lock(g_lock); + read_unlock(g_lock); + /* Re-try with a write lock */ + write_lock(g_lock); peer = kiblnd_find_peer_locked(nid); if (peer != NULL) { @@ -1359,12 +1384,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (tx != NULL) cfs_list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); - cfs_write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer); - kiblnd_conn_addref(conn); /* 1 ref for me... */ + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer); + kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); @@ -1373,7 +1398,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) return; } - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); /* Allocate a peer ready to add to the peer table and retry */ rc = kiblnd_create_peer(ni, &peer, nid); @@ -1387,7 +1412,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) return; } - cfs_write_lock_irqsave(g_lock, flags); + write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); if (peer2 != NULL) { @@ -1398,12 +1423,12 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) if (tx != NULL) cfs_list_add_tail(&tx->tx_list, &peer2->ibp_tx_queue); - cfs_write_unlock_irqrestore(g_lock, flags); - } else { - conn = kiblnd_get_conn_locked(peer2); - kiblnd_conn_addref(conn); /* 1 ref for me... */ + write_unlock_irqrestore(g_lock, flags); + } else { + conn = kiblnd_get_conn_locked(peer2); + kiblnd_conn_addref(conn); /* 1 ref for me... */ - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); if (tx != NULL) kiblnd_queue_tx(tx, conn); @@ -1427,7 +1452,7 @@ kiblnd_launch_tx (lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) kiblnd_peer_addref(peer); cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); kiblnd_connect_peer(peer); kiblnd_peer_decref(peer); @@ -1482,12 +1507,12 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni); - if (tx == NULL) { - CERROR("Can't allocate txd for GET to %s: \n", - libcfs_nid2str(target.nid)); - return -ENOMEM; - } + tx = kiblnd_get_idle_tx(ni, target.nid); + if (tx == NULL) { + CERROR("Can't allocate txd for GET to %s\n", + libcfs_nid2str(target.nid)); + return -ENOMEM; + } ibmsg = tx->tx_msg; @@ -1536,7 +1561,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) if (nob <= IBLND_MSG_SIZE) break; /* send IMMEDIATE */ - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR("Can't allocate %s txd for %s\n", type == LNET_MSG_PUT ? "PUT" : "REPLY", @@ -1575,7 +1600,7 @@ kiblnd_send (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg) LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]) <= IBLND_MSG_SIZE); - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, target.nid); if (tx == NULL) { CERROR ("Can't send %d to %s: tx descs exhausted\n", type, libcfs_nid2str(target.nid)); @@ -1616,7 +1641,7 @@ kiblnd_reply (lnet_ni_t *ni, kib_rx_t *rx, lnet_msg_t *lntmsg) kib_tx_t *tx; int rc; - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't get tx for REPLY to %s\n", libcfs_nid2str(target.nid)); @@ -1720,7 +1745,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, break; } - tx = kiblnd_get_idle_tx(ni); + tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); if (tx == NULL) { CERROR("Can't allocate tx for %s\n", libcfs_nid2str(conn->ibc_peer->ibp_nid)); @@ -1780,15 +1805,15 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed, } int -kiblnd_thread_start (int (*fn)(void *arg), void *arg) +kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name) { - long pid = cfs_create_thread (fn, arg, 0); + struct task_struct *task = kthread_run(fn, arg, name); - if (pid < 0) - return ((int)pid); + if (IS_ERR(task)) + return PTR_ERR(task); - cfs_atomic_inc (&kiblnd_data.kib_nthreads); - return (0); + cfs_atomic_inc(&kiblnd_data.kib_nthreads); + return 0; } void @@ -1812,7 +1837,7 @@ kiblnd_peer_notify (kib_peer_t *peer) cfs_time_t last_alive = 0; unsigned long flags; - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (cfs_list_empty(&peer->ibp_conns) && peer->ibp_accepting == 0 && @@ -1824,7 +1849,7 @@ kiblnd_peer_notify (kib_peer_t *peer) last_alive = peer->ibp_last_alive; } - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (error != 0) lnet_notify(peer->ibp_ni, @@ -1884,55 +1909,54 @@ kiblnd_close_conn_locked (kib_conn_t *conn, int error) kiblnd_set_conn_state(conn, IBLND_CONN_CLOSING); - if (error != 0 && - kiblnd_dev_can_failover(dev)) { - cfs_list_add_tail(&dev->ibd_fail_list, - &kiblnd_data.kib_failed_devs); - cfs_waitq_signal(&kiblnd_data.kib_failover_waitq); - } + if (error != 0 && + kiblnd_dev_can_failover(dev)) { + cfs_list_add_tail(&dev->ibd_fail_list, + &kiblnd_data.kib_failed_devs); + wake_up(&kiblnd_data.kib_failover_waitq); + } - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); - cfs_list_add_tail (&conn->ibc_list, &kiblnd_data.kib_connd_conns); - cfs_waitq_signal (&kiblnd_data.kib_connd_waitq); + cfs_list_add_tail(&conn->ibc_list, &kiblnd_data.kib_connd_conns); + wake_up(&kiblnd_data.kib_connd_waitq); - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); } void -kiblnd_close_conn (kib_conn_t *conn, int error) +kiblnd_close_conn(kib_conn_t *conn, int error) { - unsigned long flags; + unsigned long flags; - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - kiblnd_close_conn_locked(conn, error); + kiblnd_close_conn_locked(conn, error); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } void kiblnd_handle_early_rxs(kib_conn_t *conn) { - unsigned long flags; - kib_rx_t *rx; + unsigned long flags; + kib_rx_t *rx; - LASSERT (!cfs_in_interrupt()); - LASSERT (conn->ibc_state >= IBLND_CONN_ESTABLISHED); + LASSERT(!cfs_in_interrupt()); + LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - while (!cfs_list_empty(&conn->ibc_early_rxs)) { - rx = cfs_list_entry(conn->ibc_early_rxs.next, - kib_rx_t, rx_list); - cfs_list_del(&rx->rx_list); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + while (!cfs_list_empty(&conn->ibc_early_rxs)) { + rx = cfs_list_entry(conn->ibc_early_rxs.next, + kib_rx_t, rx_list); + cfs_list_del(&rx->rx_list); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_handle_rx(rx); + kiblnd_handle_rx(rx); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); } void @@ -1943,7 +1967,7 @@ kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs) cfs_list_t *nxt; kib_tx_t *tx; - cfs_spin_lock(&conn->ibc_lock); + spin_lock(&conn->ibc_lock); cfs_list_for_each_safe (tmp, nxt, txs) { tx = cfs_list_entry (tmp, kib_tx_t, tx_list); @@ -1966,10 +1990,9 @@ kiblnd_abort_txs(kib_conn_t *conn, cfs_list_t *txs) } } - cfs_spin_unlock(&conn->ibc_lock); + spin_unlock(&conn->ibc_lock); - kiblnd_txlist_done(conn->ibc_peer->ibp_ni, - &zombies, -ECONNABORTED); + kiblnd_txlist_done(conn->ibc_peer->ibp_ni, &zombies, -ECONNABORTED); } void @@ -2006,7 +2029,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) LASSERT (error != 0); LASSERT (!cfs_in_interrupt()); - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); if (active) { LASSERT (peer->ibp_connecting > 0); @@ -2019,7 +2042,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) if (peer->ibp_connecting != 0 || peer->ibp_accepting != 0) { /* another connection attempt under way... */ - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); return; } @@ -2038,7 +2061,7 @@ kiblnd_peer_connect_failed (kib_peer_t *peer, int active, int error) LASSERT (cfs_list_empty(&peer->ibp_tx_queue)); } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); kiblnd_peer_notify(peer); @@ -2083,7 +2106,7 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) } /* connection established */ - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); conn->ibc_last_send = jiffies; kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED); @@ -2121,30 +2144,29 @@ kiblnd_connreq_done(kib_conn_t *conn, int status) /* start to shut down connection */ kiblnd_close_conn_locked(conn, -ECONNABORTED); - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - kiblnd_txlist_done(ni, &txs, -ECONNABORTED); + kiblnd_txlist_done(ni, &txs, -ECONNABORTED); - return; - } + return; + } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); - /* Schedule blocked txs */ - cfs_spin_lock (&conn->ibc_lock); - while (!cfs_list_empty (&txs)) { - tx = cfs_list_entry (txs.next, kib_tx_t, tx_list); - cfs_list_del(&tx->tx_list); + /* Schedule blocked txs */ + spin_lock(&conn->ibc_lock); + while (!cfs_list_empty(&txs)) { + tx = cfs_list_entry(txs.next, kib_tx_t, tx_list); + cfs_list_del(&tx->tx_list); - kiblnd_queue_tx_locked(tx, conn); - } - cfs_spin_unlock (&conn->ibc_lock); + kiblnd_queue_tx_locked(tx, conn); + } + spin_unlock(&conn->ibc_lock); - kiblnd_check_sends(conn); + kiblnd_check_sends(conn); - /* schedule blocked rxs */ - kiblnd_handle_early_rxs(conn); + /* schedule blocked rxs */ + kiblnd_handle_early_rxs(conn); } void @@ -2161,7 +2183,7 @@ kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej) int kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) { - cfs_rwlock_t *g_lock = &kiblnd_data.kib_global_lock; + rwlock_t *g_lock = &kiblnd_data.kib_global_lock; kib_msg_t *reqmsg = priv; kib_msg_t *ackmsg; kib_dev_t *ibdev; @@ -2307,7 +2329,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) goto failed; } - cfs_write_lock_irqsave(g_lock, flags); + write_lock_irqsave(g_lock, flags); peer2 = kiblnd_find_peer_locked(nid); if (peer2 != NULL) { @@ -2320,7 +2342,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) if (peer2->ibp_incarnation != reqmsg->ibm_srcstamp || peer2->ibp_version != version) { kiblnd_close_peer_conns_locked(peer2, -ESTALE); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); CWARN("Conn stale %s [old ver: %x, new ver: %x]\n", libcfs_nid2str(nid), peer2->ibp_version, version); @@ -2333,7 +2355,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) /* tie-break connection race in favour of the higher NID */ if (peer2->ibp_connecting != 0 && nid < ni->ni_nid) { - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); CWARN("Conn race %s\n", libcfs_nid2str(peer2->ibp_nid)); @@ -2345,7 +2367,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) peer2->ibp_accepting++; kiblnd_peer_addref(peer2); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); kiblnd_peer_decref(peer); peer = peer2; } else { @@ -2364,7 +2386,7 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob) kiblnd_peer_addref(peer); cfs_list_add_tail(&peer->ibp_list, kiblnd_nid2peerlist(nid)); - cfs_write_unlock_irqrestore(g_lock, flags); + write_unlock_irqrestore(g_lock, flags); } conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_PASSIVE_WAIT, version); @@ -2444,7 +2466,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version, LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT); LASSERT (peer->ibp_connecting > 0); /* 'conn' at least */ - cfs_write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + write_lock_irqsave(&kiblnd_data.kib_global_lock, flags); /* retry connection if it's still needed and no other connection * attempts (active or passive) are in progress @@ -2462,7 +2484,7 @@ kiblnd_reconnect (kib_conn_t *conn, int version, peer->ibp_incarnation = incarnation; } - cfs_write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (!retry) return; @@ -2694,13 +2716,13 @@ kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob) goto failed; } - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - if (msg->ibm_dstnid == ni->ni_nid && - msg->ibm_dststamp == net->ibn_incarnation) - rc = 0; - else - rc = -ESTALE; - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + if (msg->ibm_dstnid == ni->ni_nid && + msg->ibm_dststamp == net->ibn_incarnation) + rc = 0; + else + rc = -ESTALE; + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); if (rc != 0) { CERROR("Bad connection reply from %s, rc = %d, " @@ -2742,12 +2764,13 @@ kiblnd_active_connect (struct rdma_cm_id *cmid) unsigned long flags; int rc; - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); - incarnation = peer->ibp_incarnation; - version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : peer->ibp_version; + incarnation = peer->ibp_incarnation; + version = (peer->ibp_version == 0) ? IBLND_MSG_VERSION : + peer->ibp_version; - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); conn = kiblnd_create_conn(peer, cmid, IBLND_CONN_ACTIVE_CONNECT, version); if (conn == NULL) { @@ -2967,14 +2990,11 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) } } -int -kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs) +static int +kiblnd_check_txs_locked(kib_conn_t *conn, cfs_list_t *txs) { kib_tx_t *tx; cfs_list_t *ttmp; - int timed_out = 0; - - cfs_spin_lock(&conn->ibc_lock); cfs_list_for_each (ttmp, txs) { tx = cfs_list_entry (ttmp, kib_tx_t, tx_list); @@ -2987,83 +3007,107 @@ kiblnd_check_txs (kib_conn_t *conn, cfs_list_t *txs) } if (cfs_time_aftereq (jiffies, tx->tx_deadline)) { - timed_out = 1; CERROR("Timed out tx: %s, %lu seconds\n", kiblnd_queue2str(conn, txs), cfs_duration_sec(jiffies - tx->tx_deadline)); - break; + return 1; } } - cfs_spin_unlock(&conn->ibc_lock); - return timed_out; + return 0; } -int -kiblnd_conn_timed_out (kib_conn_t *conn) +static int +kiblnd_conn_timed_out_locked(kib_conn_t *conn) { - return kiblnd_check_txs(conn, &conn->ibc_tx_queue) || - kiblnd_check_txs(conn, &conn->ibc_tx_noops) || - kiblnd_check_txs(conn, &conn->ibc_tx_queue_rsrvd) || - kiblnd_check_txs(conn, &conn->ibc_tx_queue_nocred) || - kiblnd_check_txs(conn, &conn->ibc_active_txs); + return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_rsrvd) || + kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue_nocred) || + kiblnd_check_txs_locked(conn, &conn->ibc_active_txs); } void kiblnd_check_conns (int idx) { - cfs_list_t *peers = &kiblnd_data.kib_peers[idx]; - cfs_list_t *ptmp; - kib_peer_t *peer; - kib_conn_t *conn; - cfs_list_t *ctmp; - unsigned long flags; + CFS_LIST_HEAD (closes); + CFS_LIST_HEAD (checksends); + cfs_list_t *peers = &kiblnd_data.kib_peers[idx]; + cfs_list_t *ptmp; + kib_peer_t *peer; + kib_conn_t *conn; + cfs_list_t *ctmp; + unsigned long flags; - again: /* NB. We expect to have a look at all the peers and not find any - * rdmas to time out, so we just use a shared lock while we + * RDMAs to time out, so we just use a shared lock while we * take a look... */ - cfs_read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); + read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); cfs_list_for_each (ptmp, peers) { peer = cfs_list_entry (ptmp, kib_peer_t, ibp_list); cfs_list_for_each (ctmp, &peer->ibp_conns) { - conn = cfs_list_entry (ctmp, kib_conn_t, ibc_list); + int timedout; + int sendnoop; + + conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list); LASSERT (conn->ibc_state == IBLND_CONN_ESTABLISHED); - /* In case we have enough credits to return via a - * NOOP, but there were no non-blocking tx descs - * free to do it last time... */ - kiblnd_check_sends(conn); + spin_lock(&conn->ibc_lock); - if (!kiblnd_conn_timed_out(conn)) + sendnoop = kiblnd_need_noop(conn); + timedout = kiblnd_conn_timed_out_locked(conn); + if (!sendnoop && !timedout) { + spin_unlock(&conn->ibc_lock); continue; + } - /* Handle timeout by closing the whole connection. We - * can only be sure RDMA activity has ceased once the - * QP has been modified. */ - - kiblnd_conn_addref(conn); /* 1 ref for me... */ - - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, - flags); - - CERROR("Timed out RDMA with %s (%lu)\n", - libcfs_nid2str(peer->ibp_nid), - cfs_duration_sec(cfs_time_current() - - peer->ibp_last_alive)); - - kiblnd_close_conn(conn, -ETIMEDOUT); - kiblnd_conn_decref(conn); /* ...until here */ - - /* start again now I've dropped the lock */ - goto again; - } + if (timedout) { + CERROR("Timed out RDMA with %s (%lu): " + "c: %u, oc: %u, rc: %u\n", + libcfs_nid2str(peer->ibp_nid), + cfs_duration_sec(cfs_time_current() - + peer->ibp_last_alive), + conn->ibc_credits, + conn->ibc_outstanding_credits, + conn->ibc_reserved_credits); + cfs_list_add(&conn->ibc_connd_list, &closes); + } else { + cfs_list_add(&conn->ibc_connd_list, + &checksends); + } + /* +ref for 'closes' or 'checksends' */ + kiblnd_conn_addref(conn); + + spin_unlock(&conn->ibc_lock); + } + } + + read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + + /* Handle timeout by closing the whole + * connection. We can only be sure RDMA activity + * has ceased once the QP has been modified. */ + while (!cfs_list_empty(&closes)) { + conn = cfs_list_entry(closes.next, + kib_conn_t, ibc_connd_list); + cfs_list_del(&conn->ibc_connd_list); + kiblnd_close_conn(conn, -ETIMEDOUT); + kiblnd_conn_decref(conn); } - cfs_read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags); + /* In case we have enough credits to return via a + * NOOP, but there were no non-blocking tx descs + * free to do it last time... */ + while (!cfs_list_empty(&checksends)) { + conn = cfs_list_entry(checksends.next, + kib_conn_t, ibc_connd_list); + cfs_list_del(&conn->ibc_connd_list); + kiblnd_check_sends(conn); + kiblnd_conn_decref(conn); + } } void @@ -3082,22 +3126,21 @@ kiblnd_disconnect_conn (kib_conn_t *conn) int kiblnd_connd (void *arg) { - cfs_waitlink_t wait; - unsigned long flags; - kib_conn_t *conn; - int timeout; - int i; - int dropped_lock; - int peer_index = 0; - unsigned long deadline = jiffies; + wait_queue_t wait; + unsigned long flags; + kib_conn_t *conn; + int timeout; + int i; + int dropped_lock; + int peer_index = 0; + unsigned long deadline = jiffies; - cfs_daemonize ("kiblnd_connd"); - cfs_block_allsigs (); + cfs_block_allsigs (); - cfs_waitlink_init (&wait); - kiblnd_data.kib_connd = current; + init_waitqueue_entry_current (&wait); + kiblnd_data.kib_connd = current; - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); while (!kiblnd_data.kib_shutdown) { @@ -3109,30 +3152,28 @@ kiblnd_connd (void *arg) kib_conn_t, ibc_list); cfs_list_del(&conn->ibc_list); - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, - flags); - dropped_lock = 1; + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, + flags); + dropped_lock = 1; - kiblnd_destroy_conn(conn); + kiblnd_destroy_conn(conn); - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, - flags); - } + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } - if (!cfs_list_empty (&kiblnd_data.kib_connd_conns)) { - conn = cfs_list_entry (kiblnd_data.kib_connd_conns.next, - kib_conn_t, ibc_list); - cfs_list_del(&conn->ibc_list); + if (!cfs_list_empty(&kiblnd_data.kib_connd_conns)) { + conn = cfs_list_entry(kiblnd_data.kib_connd_conns.next, + kib_conn_t, ibc_list); + cfs_list_del(&conn->ibc_list); - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, - flags); - dropped_lock = 1; + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, + flags); + dropped_lock = 1; - kiblnd_disconnect_conn(conn); - kiblnd_conn_decref(conn); + kiblnd_disconnect_conn(conn); + kiblnd_conn_decref(conn); - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, - flags); + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); } /* careful with the jiffy wrap... */ @@ -3142,7 +3183,7 @@ kiblnd_connd (void *arg) const int p = 1; int chunk = kiblnd_data.kib_peer_hash_size; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); dropped_lock = 1; /* Time to check for RDMA timeouts on a few more @@ -3159,36 +3200,35 @@ kiblnd_connd (void *arg) if (chunk == 0) chunk = 1; - for (i = 0; i < chunk; i++) { - kiblnd_check_conns(peer_index); - peer_index = (peer_index + 1) % - kiblnd_data.kib_peer_hash_size; - } + for (i = 0; i < chunk; i++) { + kiblnd_check_conns(peer_index); + peer_index = (peer_index + 1) % + kiblnd_data.kib_peer_hash_size; + } - deadline += p * CFS_HZ; - cfs_spin_lock_irqsave(&kiblnd_data.kib_connd_lock, - flags); - } + deadline += p * HZ; + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } - if (dropped_lock) - continue; + if (dropped_lock) + continue; - /* Nothing to do for 'timeout' */ - cfs_set_current_state (CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add (&kiblnd_data.kib_connd_waitq, &wait); - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags); + /* Nothing to do for 'timeout' */ + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); - cfs_waitq_timedwait(&wait, CFS_TASK_INTERRUPTIBLE, timeout); + waitq_timedwait(&wait, TASK_INTERRUPTIBLE, timeout); - cfs_set_current_state (CFS_TASK_RUNNING); - cfs_waitq_del (&kiblnd_data.kib_connd_waitq, &wait); - cfs_spin_lock_irqsave (&kiblnd_data.kib_connd_lock, flags); - } + set_current_state(TASK_RUNNING); + remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); + spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); + } - cfs_spin_unlock_irqrestore (&kiblnd_data.kib_connd_lock, flags); + spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags); - kiblnd_thread_fini(); - return (0); + kiblnd_thread_fini(); + return 0; } void @@ -3239,33 +3279,35 @@ kiblnd_complete (struct ib_wc *wc) } void -kiblnd_cq_completion (struct ib_cq *cq, void *arg) +kiblnd_cq_completion(struct ib_cq *cq, void *arg) { - /* NB I'm not allowed to schedule this conn once its refcount has - * reached 0. Since fundamentally I'm racing with scheduler threads - * consuming my CQ I could be called after all completions have - * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 - * and this CQ is about to be destroyed so I NOOP. */ - kib_conn_t *conn = (kib_conn_t *)arg; - unsigned long flags; + /* NB I'm not allowed to schedule this conn once its refcount has + * reached 0. Since fundamentally I'm racing with scheduler threads + * consuming my CQ I could be called after all completions have + * occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0 + * and this CQ is about to be destroyed so I NOOP. */ + kib_conn_t *conn = (kib_conn_t *)arg; + struct kib_sched_info *sched = conn->ibc_sched; + unsigned long flags; - LASSERT (cq == conn->ibc_cq); + LASSERT(cq == conn->ibc_cq); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); + spin_lock_irqsave(&sched->ibs_lock, flags); - conn->ibc_ready = 1; + conn->ibc_ready = 1; - if (!conn->ibc_scheduled && - (conn->ibc_nrx > 0 || - conn->ibc_nsends_posted > 0)) { - kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ - conn->ibc_scheduled = 1; - cfs_list_add_tail(&conn->ibc_sched_list, - &kiblnd_data.kib_sched_conns); - cfs_waitq_signal(&kiblnd_data.kib_sched_waitq); - } + if (!conn->ibc_scheduled && + (conn->ibc_nrx > 0 || + conn->ibc_nsends_posted > 0)) { + kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ + conn->ibc_scheduled = 1; + cfs_list_add_tail(&conn->ibc_sched_list, &sched->ibs_conns); + + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + spin_unlock_irqrestore(&sched->ibs_lock, flags); } void @@ -3280,48 +3322,53 @@ kiblnd_cq_event(struct ib_event *event, void *arg) int kiblnd_scheduler(void *arg) { - long id = (long)arg; - cfs_waitlink_t wait; - char name[16]; - unsigned long flags; - kib_conn_t *conn; - struct ib_wc wc; - int rc; - int did_something; - int busy_loops = 0; + long id = (long)arg; + struct kib_sched_info *sched; + kib_conn_t *conn; + wait_queue_t wait; + unsigned long flags; + struct ib_wc wc; + int did_something; + int busy_loops = 0; + int rc; - snprintf(name, sizeof(name), "kiblnd_sd_%02ld", id); - cfs_daemonize(name); - cfs_block_allsigs(); + cfs_block_allsigs(); - cfs_waitlink_init(&wait); + init_waitqueue_entry_current(&wait); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); + sched = kiblnd_data.kib_scheds[KIB_THREAD_CPT(id)]; - while (!kiblnd_data.kib_shutdown) { - if (busy_loops++ >= IBLND_RESCHED) { - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, - flags); + rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt); + if (rc != 0) { + CWARN("Failed to bind on CPT %d, please verify whether " + "all CPUs are healthy and reload modules if necessary, " + "otherwise your system might under risk of low " + "performance\n", sched->ibs_cpt); + } - cfs_cond_resched(); - busy_loops = 0; + spin_lock_irqsave(&sched->ibs_lock, flags); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, - flags); - } + while (!kiblnd_data.kib_shutdown) { + if (busy_loops++ >= IBLND_RESCHED) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + + cond_resched(); + busy_loops = 0; + + spin_lock_irqsave(&sched->ibs_lock, flags); + } - did_something = 0; + did_something = 0; - if (!cfs_list_empty(&kiblnd_data.kib_sched_conns)) { - conn = cfs_list_entry(kiblnd_data.kib_sched_conns.next, - kib_conn_t, ibc_sched_list); - /* take over kib_sched_conns' ref on conn... */ - LASSERT(conn->ibc_scheduled); - cfs_list_del(&conn->ibc_sched_list); - conn->ibc_ready = 0; + if (!cfs_list_empty(&sched->ibs_conns)) { + conn = cfs_list_entry(sched->ibs_conns.next, + kib_conn_t, ibc_sched_list); + /* take over kib_sched_conns' ref on conn... */ + LASSERT(conn->ibc_scheduled); + cfs_list_del(&conn->ibc_sched_list); + conn->ibc_ready = 0; - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, - flags); + spin_unlock_irqrestore(&sched->ibs_lock, flags); rc = ib_poll_cq(conn->ibc_cq, 1, &wc); if (rc == 0) { @@ -3333,52 +3380,46 @@ kiblnd_scheduler(void *arg) libcfs_nid2str(conn->ibc_peer->ibp_nid), rc); kiblnd_close_conn(conn, -EIO); kiblnd_conn_decref(conn); - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, - flags); - continue; - } - - rc = ib_poll_cq(conn->ibc_cq, 1, &wc); - } - - if (rc < 0) { - CWARN("%s: ib_poll_cq failed: %d, " - "closing connection\n", - libcfs_nid2str(conn->ibc_peer->ibp_nid), - rc); - kiblnd_close_conn(conn, -EIO); - kiblnd_conn_decref(conn); - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, flags); - continue; - } - - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, - flags); - - if (rc != 0 || conn->ibc_ready) { - /* There may be another completion waiting; get - * another scheduler to check while I handle - * this one... */ - kiblnd_conn_addref(conn); /* +1 ref for sched_conns */ - cfs_list_add_tail(&conn->ibc_sched_list, - &kiblnd_data.kib_sched_conns); - cfs_waitq_signal(&kiblnd_data.kib_sched_waitq); - } else { - conn->ibc_scheduled = 0; - } - - if (rc != 0) { - cfs_spin_unlock_irqrestore(&kiblnd_data. \ - kib_sched_lock, - flags); - - kiblnd_complete(&wc); - - cfs_spin_lock_irqsave(&kiblnd_data. \ - kib_sched_lock, - flags); + spin_lock_irqsave(&sched->ibs_lock, + flags); + continue; + } + + rc = ib_poll_cq(conn->ibc_cq, 1, &wc); + } + + if (rc < 0) { + CWARN("%s: ib_poll_cq failed: %d, " + "closing connection\n", + libcfs_nid2str(conn->ibc_peer->ibp_nid), + rc); + kiblnd_close_conn(conn, -EIO); + kiblnd_conn_decref(conn); + spin_lock_irqsave(&sched->ibs_lock, flags); + continue; + } + + spin_lock_irqsave(&sched->ibs_lock, flags); + + if (rc != 0 || conn->ibc_ready) { + /* There may be another completion waiting; get + * another scheduler to check while I handle + * this one... */ + /* +1 ref for sched_conns */ + kiblnd_conn_addref(conn); + cfs_list_add_tail(&conn->ibc_sched_list, + &sched->ibs_conns); + if (waitqueue_active(&sched->ibs_waitq)) + wake_up(&sched->ibs_waitq); + } else { + conn->ibc_scheduled = 0; + } + + if (rc != 0) { + spin_unlock_irqrestore(&sched->ibs_lock, flags); + kiblnd_complete(&wc); + + spin_lock_irqsave(&sched->ibs_lock, flags); } kiblnd_conn_decref(conn); /* ...drop my ref from above */ @@ -3388,40 +3429,39 @@ kiblnd_scheduler(void *arg) if (did_something) continue; - cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add_exclusive(&kiblnd_data.kib_sched_waitq, &wait); - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue_exclusive(&sched->ibs_waitq, &wait); + spin_unlock_irqrestore(&sched->ibs_lock, flags); - cfs_waitq_wait(&wait, CFS_TASK_INTERRUPTIBLE); - busy_loops = 0; + waitq_wait(&wait, TASK_INTERRUPTIBLE); + busy_loops = 0; - cfs_waitq_del(&kiblnd_data.kib_sched_waitq, &wait); - cfs_set_current_state(CFS_TASK_RUNNING); - cfs_spin_lock_irqsave(&kiblnd_data.kib_sched_lock, flags); - } + remove_wait_queue(&sched->ibs_waitq, &wait); + set_current_state(TASK_RUNNING); + spin_lock_irqsave(&sched->ibs_lock, flags); + } - cfs_spin_unlock_irqrestore(&kiblnd_data.kib_sched_lock, flags); + spin_unlock_irqrestore(&sched->ibs_lock, flags); - kiblnd_thread_fini(); - return (0); + kiblnd_thread_fini(); + return 0; } int kiblnd_failover_thread(void *arg) { - cfs_rwlock_t *glock = &kiblnd_data.kib_global_lock; - kib_dev_t *dev; - cfs_waitlink_t wait; - unsigned long flags; - int rc; + rwlock_t *glock = &kiblnd_data.kib_global_lock; + kib_dev_t *dev; + wait_queue_t wait; + unsigned long flags; + int rc; - LASSERT (*kiblnd_tunables.kib_dev_failover != 0); + LASSERT (*kiblnd_tunables.kib_dev_failover != 0); - cfs_daemonize ("kiblnd_failover"); - cfs_block_allsigs (); + cfs_block_allsigs (); - cfs_waitlink_init(&wait); - cfs_write_lock_irqsave(glock, flags); + init_waitqueue_entry_current(&wait); + write_lock_irqsave(glock, flags); while (!kiblnd_data.kib_shutdown) { int do_failover = 0; @@ -3439,11 +3479,11 @@ kiblnd_failover_thread(void *arg) if (do_failover) { cfs_list_del_init(&dev->ibd_fail_list); dev->ibd_failover = 1; - cfs_write_unlock_irqrestore(glock, flags); + write_unlock_irqrestore(glock, flags); - rc = kiblnd_dev_failover(dev); + rc = kiblnd_dev_failover(dev); - cfs_write_lock_irqsave(glock, flags); + write_lock_irqsave(glock, flags); LASSERT (dev->ibd_failover); dev->ibd_failover = 0; @@ -3466,15 +3506,15 @@ kiblnd_failover_thread(void *arg) /* long sleep if no more pending failover */ long_sleep = cfs_list_empty(&kiblnd_data.kib_failed_devs); - cfs_set_current_state(CFS_TASK_INTERRUPTIBLE); - cfs_waitq_add(&kiblnd_data.kib_failover_waitq, &wait); - cfs_write_unlock_irqrestore(glock, flags); + set_current_state(TASK_INTERRUPTIBLE); + add_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_unlock_irqrestore(glock, flags); - rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : - cfs_time_seconds(1)); - cfs_set_current_state(CFS_TASK_RUNNING); - cfs_waitq_del(&kiblnd_data.kib_failover_waitq, &wait); - cfs_write_lock_irqsave(glock, flags); + rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) : + cfs_time_seconds(1)); + set_current_state(TASK_RUNNING); + remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait); + write_lock_irqsave(glock, flags); if (!long_sleep || rc != 0) continue; @@ -3491,7 +3531,7 @@ kiblnd_failover_thread(void *arg) } } - cfs_write_unlock_irqrestore(glock, flags); + write_unlock_irqrestore(glock, flags); kiblnd_thread_fini(); return 0;