*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
msg->ibm_cksum = msg_cksum;
if (flip) {
- /* leave magic unflipped as a clue to peer endianness */
+ /* leave magic unflipped as a clue to peer_ni endianness */
msg->ibm_version = version;
CLASSERT (sizeof(msg->ibm_type) == 1);
CLASSERT (sizeof(msg->ibm_credits) == 1);
}
int
-kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+kiblnd_create_peer(lnet_ni_t *ni, kib_peer_ni_t **peerp, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
kib_net_t *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid);
+ int cpt = lnet_cpt_of_nid(nid, ni);
unsigned long flags;
LASSERT(net != NULL);
LASSERT(nid != LNET_NID_ANY);
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
- CERROR("Cannot allocate peer\n");
+ LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
+ if (peer_ni == NULL) {
+ CERROR("Cannot allocate peer_ni\n");
return -ENOMEM;
}
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ peer_ni->ibp_ni = ni;
+ peer_ni->ibp_nid = nid;
+ peer_ni->ibp_error = 0;
+ peer_ni->ibp_last_alive = 0;
+ peer_ni->ibp_max_frags = kiblnd_cfg_rdma_frags(peer_ni->ibp_ni);
+ peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
+ atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */
+ INIT_LIST_HEAD(&peer_ni->ibp_conns);
+ INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- *peerp = peer;
+ *peerp = peer_ni;
return 0;
}
void
-kiblnd_destroy_peer (kib_peer_t *peer)
+kiblnd_destroy_peer (kib_peer_ni_t *peer_ni)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
LASSERT(net != NULL);
- LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT(!kiblnd_peer_active(peer));
- LASSERT(kiblnd_peer_idle(peer));
- LASSERT(list_empty(&peer->ibp_tx_queue));
+ LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
+ LASSERT(!kiblnd_peer_active(peer_ni));
+ LASSERT(kiblnd_peer_idle(peer_ni));
+ LASSERT(list_empty(&peer_ni->ibp_tx_queue));
- LIBCFS_FREE(peer, sizeof(*peer));
+ LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- /* NB a peer's connections keep a reference on their peer until
+ /* NB a peer_ni's connections keep a reference on their peer_ni until
* they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
+ * with this peer_ni has been cleaned up when its refcount drops to
* zero. */
atomic_dec(&net->ibn_npeers);
}
-kib_peer_t *
-kiblnd_find_peer_locked (lnet_nid_t nid)
+kib_peer_ni_t *
+kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
{
/* the caller is responsible for accounting the additional reference
* that this creates */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_nid != nid)
+ peer_ni = list_entry(tmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
+
+ /*
+ * Match a peer if its NID and the NID of the local NI it
+ * communicates over are the same. Otherwise don't match
+ * the peer, which will result in a new lnd peer being
+ * created.
+ */
+ if (peer_ni->ibp_nid != nid ||
+ peer_ni->ibp_ni->ni_nid != ni->ni_nid)
continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
+ CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
+ peer_ni, libcfs_nid2str(nid),
+ atomic_read(&peer_ni->ibp_refcount),
+ peer_ni->ibp_version);
+ return peer_ni;
}
return NULL;
}
void
-kiblnd_unlink_peer_locked (kib_peer_t *peer)
+kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni)
{
- LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- LASSERT (kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
+ LASSERT (kiblnd_peer_active(peer_ni));
+ list_del_init(&peer_ni->ibp_list);
/* lose peerlist's ref */
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
}
static int
kiblnd_get_peer_info(lnet_ni_t *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
int i;
unsigned long flags;
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
if (index-- > 0)
continue;
- *nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
+ *nidp = peer_ni->ibp_nid;
+ *count = atomic_read(&peer_ni->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
}
static void
-kiblnd_del_peer_locked (kib_peer_t *peer)
+kiblnd_del_peer_locked (kib_peer_ni_t *peer_ni)
{
struct list_head *ctmp;
struct list_head *cnxt;
kib_conn_t *conn;
- if (list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
+ if (list_empty(&peer_ni->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer_ni);
} else {
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
- /* NB closing peer's last conn unlinked it. */
+ /* NB closing peer_ni's last conn unlinked it. */
}
- /* NB peer now unlinked; might even be freed if the peer table had the
+ /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the
* last ref on it. */
}
struct list_head zombies = LIST_HEAD_INIT(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
int lo;
int hi;
int i;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid))
continue;
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT(list_empty(&peer->ibp_conns));
+ if (!list_empty(&peer_ni->ibp_tx_queue)) {
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- list_splice_init(&peer->ibp_tx_queue,
+ list_splice_init(&peer_ni->ibp_tx_queue,
&zombies);
}
- kiblnd_del_peer_locked(peer);
+ kiblnd_del_peer_locked(peer_ni);
rc = 0; /* matched something */
}
}
static kib_conn_t *
kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
kib_conn_t *conn;
struct list_head *ctmp;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- list_for_each(ctmp, &peer->ibp_conns) {
+ list_for_each(ctmp, &peer_ni->ibp_conns) {
if (index-- > 0)
continue;
kiblnd_debug_tx (kib_tx_t *tx)
{
CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
- "cookie "LPX64" msg %s%s type %x cred %d\n",
+ "cookie %#llx msg %s%s type %x cred %d\n",
tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
tx->tx_status, tx->tx_deadline, tx->tx_cookie,
tx->tx_lntmsg[0] == NULL ? "-" : "!",
int vectors;
int off;
int i;
+ lnet_nid_t ibp_nid;
vectors = conn->ibc_cmid->device->num_comp_vectors;
if (vectors <= 1)
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
/* hash NID to CPU id in this partition... */
- off = conn->ibc_peer->ibp_nid % cpumask_weight(mask);
+ ibp_nid = conn->ibc_peer->ibp_nid;
+ off = do_div(ibp_nid, cpumask_weight(mask));
for_each_cpu(i, mask) {
if (off-- == 0)
return i % vectors;
}
kib_conn_t *
-kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
{
/* CAVEAT EMPTOR:
* If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * ref on 'peer_ni'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer_ni' remains and
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
kib_dev_t *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
dev = net->ibn_dev;
- cpt = lnet_cpt_of_nid(peer->ibp_nid);
+ cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
sched = kiblnd_data.kib_scheds[cpt];
LASSERT(sched->ibs_nthreads > 0);
sizeof(*init_qp_attr));
if (init_qp_attr == NULL) {
CERROR("Can't allocate qp_attr for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_0;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
if (conn == NULL) {
CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_1;
}
conn->ibc_state = IBLND_CONN_INIT;
conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
+ conn->ibc_peer = peer_ni; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
- conn->ibc_max_frags = peer->ibp_max_frags;
- conn->ibc_queue_depth = peer->ibp_queue_depth;
+ conn->ibc_max_frags = peer_ni->ibp_max_frags;
+ conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
conn->ibc_sched = sched;
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
- goto failed_2;
- }
+ do {
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
+ if (!rc || init_qp_attr->cap.max_send_wr < 16)
+ break;
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ init_qp_attr->cap.max_send_wr -= init_qp_attr->cap.max_send_wr / 4;
+ } while (rc);
+
+ if (rc) {
+ CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
+ rc, init_qp_attr->cap.max_send_wr,
+ init_qp_attr->cap.max_recv_wr);
+ goto failed_2;
+ }
+
+ if (init_qp_attr->cap.max_send_wr != IBLND_SEND_WRS(conn))
+ CDEBUG(D_NET, "original send wr %d, created with %d\n",
+ IBLND_SEND_WRS(conn), init_qp_attr->cap.max_send_wr);
- /* 1 ref for caller and each rxmsg */
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+
+ /* 1 ref for caller and each rxmsg */
atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
conn->ibc_nrx = IBLND_RX_MSGS(conn);
- /* post receives */
+ /* post receives */
for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
- rc = kiblnd_post_rx(&conn->ibc_rxs[i],
- IBLND_POSTRX_NO_CREDIT);
+ rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
if (rc != 0) {
CERROR("Can't post rxmsg: %d\n", rc);
kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
+ kib_peer_ni_t *peer_ni = conn->ibc_peer;
int rc;
LASSERT (!in_interrupt());
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
rdma_destroy_id(cmid);
atomic_dec(&net->ibn_nconns);
}
}
int
-kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(kib_peer_ni_t *peer_ni, int why)
{
kib_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, "
"version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, why);
kiblnd_close_conn_locked(conn, why);
}
int
-kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+kiblnd_close_stale_conns_locked(kib_peer_ni_t *peer_ni,
int version, __u64 incarnation)
{
kib_conn_t *conn;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
if (conn->ibc_version == version &&
continue;
CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:"LPX64"(%x, "LPX64")\n",
- libcfs_nid2str(peer->ibp_nid),
+ "incarnation:%#llx(%x, %#llx)\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, conn->ibc_incarnation,
version, incarnation);
static int
kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid))
continue;
- count += kiblnd_close_peer_conns_locked(peer, 0);
+ count += kiblnd_close_peer_conns_locked(peer_ni, 0);
}
}
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
unsigned long flags;
read_lock_irqsave(glock, flags);
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL)
- last_alive = peer->ibp_last_alive;
+ peer_ni = kiblnd_find_peer_locked(ni, nid);
+ if (peer_ni != NULL)
+ last_alive = peer_ni->ibp_last_alive;
read_unlock_irqrestore(glock, flags);
if (last_alive != 0)
*when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
+ /* peer_ni is not persistent in hash, trigger peer_ni creation
* and connection establishment with a NULL tx */
- if (peer == NULL)
+ if (peer_ni == NULL)
kiblnd_launch_tx(ni, NULL, nid);
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
+ CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago\n",
+ libcfs_nid2str(nid), peer_ni,
last_alive ? cfs_duration_sec(now - last_alive) : -1);
return;
}
rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
- CDEBUG(D_NET, "rx %d: %p "LPX64"("LPX64")\n",
+ CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
i, rx->rx_msg, rx->rx_msgaddr,
(__u64)(page_to_phys(pg) + pg_off));
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
struct ib_mr *
-kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags)
{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+ __u16 nfrags;
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
LASSERT(hdev->ibh_mrs != NULL);
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
return NULL;
return hdev->ibh_mrs;
}
static void
-kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
{
- LASSERT (pool->fpo_map_count == 0);
+ LASSERT(fpo->fpo_map_count == 0);
- if (pool->fpo_fmr_pool != NULL)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (fpo->fpo_is_fmr) {
+ if (fpo->fmr.fpo_fmr_pool)
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (pool->fpo_hdev != NULL)
- kiblnd_hdev_decref(pool->fpo_hdev);
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
static void
kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ kib_fmr_pool_t *fpo, *tmp;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int
-kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
+static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
- struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
- .page_shift = PAGE_SHIFT,
- .access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
+ struct ib_fmr_pool_param param = {
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .page_shift = PAGE_SHIFT,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
.pool_size = fps->fps_pool_size,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ ¶m);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
+
+ return rc;
+}
+
+static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+{
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
+
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ frd->frd_mr = NULL;
+
+#ifndef HAVE_IB_MAP_MR_SG
+ frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+ if (IS_ERR(frd->frd_frpl)) {
+ rc = PTR_ERR(frd->frd_frpl);
+ CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
+ rc);
+ frd->frd_frpl = NULL;
+ goto out_middle;
+ }
+#endif
+
+#ifdef HAVE_IB_ALLOC_FAST_REG_MR
+ frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#else
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#endif
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ /* There appears to be a bug in MLX5 code where you must
+ * invalidate the rkey of a new FastReg pool before first
+ * using it. Thus, I am marking the FRD invalid here. */
+ frd->frd_valid = false;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+#ifndef HAVE_IB_MAP_MR_SG
+ if (frd->frd_frpl)
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
+}
+
+static int
+kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
+{
+ struct ib_device_attr *dev_attr;
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ kib_fmr_pool_t *fpo;
int rc;
- LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
+#ifndef HAVE_IB_DEVICE_ATTRS
+ dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
+ if (!dev_attr)
return -ENOMEM;
+#endif
+
+ LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
+ if (!fpo) {
+ rc = -ENOMEM;
+ goto out_dev_attr;
+ }
fpo->fpo_hdev = kiblnd_current_hdev(dev);
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
+#ifdef HAVE_IB_DEVICE_ATTRS
+ dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
+#else
+ rc = ib_query_device(fpo->fpo_hdev->ibh_ibdev, dev_attr);
+ if (rc) {
+ CERROR("Query device failed for %s: %d\n",
+ fpo->fpo_hdev->ibh_ibdev->name, rc);
+ goto out_dev_attr;
+ }
+#endif
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
- return rc;
- }
+ /* Check for FMR or FastReg support */
+ fpo->fpo_is_fmr = 0;
+ if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
+ LCONSOLE_INFO("Using FMR for registration\n");
+ fpo->fpo_is_fmr = 1;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ } else {
+ rc = -ENOSYS;
+ LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
+ goto out_dev_attr;
+ }
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
- *pp_fpo = fpo;
+ if (fpo->fpo_is_fmr)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo);
+ if (rc)
+ goto out_fpo;
- return 0;
+#ifndef HAVE_IB_DEVICE_ATTRS
+ kfree(dev_attr);
+#endif
+ fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_owner = fps;
+ *pp_fpo = fpo;
+
+ return 0;
+
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+
+out_dev_attr:
+#ifndef HAVE_IB_DEVICE_ATTRS
+ kfree(dev_attr);
+#endif
+
+ return rc;
}
static void
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
- int pool_size, int flush_trigger)
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
kib_fmr_pool_t *fpo;
int rc;
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ kib_hca_dev_t *hdev;
+ __u64 *pages = tx->tx_pages;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
struct list_head zombies = LIST_HEAD_INIT(zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ kib_fmr_poolset_t *fps;
cfs_time_t now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(rc == 0);
+ if (!fpo)
+ return;
- if (status != 0) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(rc == 0);
- }
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(!rc);
+ fmr->fmr_pfmr = NULL;
+ }
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--; /* decref the pool */
}
int
-kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ __u32 nob, __u64 iov, kib_fmr_t *fmr)
{
- struct ib_pool_fmr *pfmr;
- kib_fmr_pool_t *fpo;
- __u64 version;
- int rc;
+ kib_fmr_pool_t *fpo;
+ __u64 *pages = tx->tx_pages;
+ __u64 version;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
+ int npages = 0;
+ int rc;
again:
spin_lock(&fps->fps_lock);
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
- }
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey
+ : pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct kib_fast_reg_descriptor *frd;
+#ifdef HAVE_IB_MAP_MR_SG
+ struct ib_reg_wr *wr;
+ int n;
+#else
+ struct ib_rdma_wr *wr;
+ struct ib_fast_reg_page_list *frpl;
+#endif
+ struct ib_mr *mr;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+#ifndef HAVE_IB_MAP_MR_SG
+ frpl = frd->frd_frpl;
+#endif
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ struct ib_rdma_wr *inv_wr;
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+
+ inv_wr->wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr.wr_id = IBLND_WID_MR;
+ inv_wr->wr.ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_IB_MAP_MR_SG_5ARGS
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+#else
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, PAGE_SIZE);
+#endif
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d "
+ "elements\n", n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#else
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ LASSERT(npages <= frpl->max_page_list_len);
+ memcpy(frpl->page_list, pages,
+ sizeof(*pages) * npages);
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_FAST_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+
+ wr->wr.wr.fast_reg.iova_start = iov;
+ wr->wr.wr.fast_reg.page_list = frpl;
+ wr->wr.wr.fast_reg.page_list_len = npages;
+ wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.wr.fast_reg.length = nob;
+ wr->wr.wr.fast_reg.rkey =
+ is_rx ? mr->rkey : mr->lkey;
+ wr->wr.wr.fast_reg.access_flags =
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#endif
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
+ }
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
struct list_head *node;
kib_pool_t *pool;
int rc;
+ unsigned int interval = 1;
+ cfs_time_t time_before;
+ unsigned int trips = 0;
again:
spin_lock(&ps->ps_lock);
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
+ trips++;
CDEBUG(D_NET, "Another thread is allocating new "
- "%s pool, waiting for her to complete\n",
- ps->ps_name);
- schedule();
+ "%s pool, waiting %d HZs for her to complete."
+ "trips = %d\n",
+ ps->ps_name, interval, trips);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(interval);
+ if (interval < cfs_time_seconds(1))
+ interval *= 2;
+
goto again;
}
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-
+ time_before = cfs_time_current();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+ CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
+ cfs_time_current() - time_before);
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
sizeof(*tx->tx_pages));
if (tx->tx_frags != NULL)
LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_wrq != NULL)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_frags == NULL)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
}
static int
-kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
unsigned long flags;
int cpt;
int rc;
int i;
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0) {
+ if (tunables->lnd_map_on_demand == 0) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
goto create_tx_pool;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
for (i = 0; i < ncpts; i++) {
cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc != 0) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
static int
kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
{
- struct ib_device_attr *attr;
- int rc;
+#ifndef HAVE_IB_DEVICE_ATTRS
+ struct ib_device_attr *attr;
+ int rc;
+#endif
/* It's safe to assume a HCA can handle a page size
* matching that of the native system */
hdev->ibh_page_size = 1 << PAGE_SHIFT;
hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+#ifdef HAVE_IB_DEVICE_ATTRS
+ hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
+#else
LIBCFS_ALLOC(attr, sizeof(*attr));
if (attr == NULL) {
CERROR("Out of memory\n");
CERROR("Failed to query IB device: %d\n", rc);
return rc;
}
+#endif
if (hdev->ibh_mr_size == ~0ULL) {
hdev->ibh_mr_shift = 64;
return 0;
}
- CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
+ CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
return -EINVAL;
}
if (dev == NULL)
return NULL;
- memset(dev, 0, sizeof(*dev));
netdev = dev_get_by_name(&init_net, ifname);
if (netdev == NULL) {
dev->ibd_can_failover = 0;
/* nuke all existing peers within this net */
kiblnd_del_peer(ni, LNET_NID_ANY);
- /* Wait for all peer state to clean up */
+ /* Wait for all peer_ni state to clean up */
i = 2;
while (atomic_read(&net->ibn_npeers) != 0) {
i++;
unsigned long flags;
int rc;
int newdev;
+ int node_id;
- LASSERT (ni->ni_lnd == &the_o2iblnd);
+ LASSERT (ni->ni_net->net_lnd == &the_o2iblnd);
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
if (net == NULL)
goto failed;
- memset(net, 0, sizeof(*net));
-
do_gettimeofday(&tv);
net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ kiblnd_tunables_setup(ni);
if (ni->ni_interfaces[0] != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
newdev = ibdev == NULL;
/* hmm...create kib_dev even for alias */
if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
+ ibdev = kiblnd_create_dev(ifname);
- if (ibdev == NULL)
- goto failed;
+ if (ibdev == NULL)
+ goto failed;
+
+ node_id = dev_to_node(ibdev->ibd_hdev->ibh_ibdev->dma_device);
+ ni->dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
- net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
+ net->ibn_dev = ibdev;
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
rc = kiblnd_dev_start_threads(ibdev, newdev,
ni->ni_cpts, ni->ni_ncpts);
if (rc != 0)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
static int __init ko2iblnd_init(void)