* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2015, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Author: Eric Barton <eric@bartonsoftware.com>
*/
+#include <asm/page.h>
#include "o2iblnd.h"
-lnd_t the_o2iblnd = {
- .lnd_type = O2IBLND,
- .lnd_startup = kiblnd_startup,
- .lnd_shutdown = kiblnd_shutdown,
- .lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
- .lnd_send = kiblnd_send,
- .lnd_recv = kiblnd_recv,
-};
+static lnd_t the_o2iblnd;
kib_data_t kiblnd_data;
-__u32
+static __u32
kiblnd_cksum (void *ptr, int nob)
{
char *c = ptr;
return -ENOMEM;
}
- memset(peer, 0, sizeof(*peer)); /* zero flags etc */
-
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ peer->ibp_ni = ni;
+ peer->ibp_nid = nid;
+ peer->ibp_error = 0;
+ peer->ibp_last_alive = 0;
+ peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
+ peer->ibp_queue_depth = ni->ni_peertxcredits;
+ atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
- CFS_INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- CFS_INIT_LIST_HEAD(&peer->ibp_conns);
- CFS_INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
+ INIT_LIST_HEAD(&peer->ibp_conns);
+ INIT_LIST_HEAD(&peer->ibp_tx_queue);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- /* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT (net->ibn_shutdown == 0);
+ /* always called with a ref on ni, which prevents ni being shutdown */
+ LASSERT(net->ibn_shutdown == 0);
- /* npeers only grows with the global lock held */
+ /* npeers only grows with the global lock held */
atomic_inc(&net->ibn_npeers);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- *peerp = peer;
- return 0;
+ *peerp = peer;
+ return 0;
}
void
kiblnd_destroy_peer (kib_peer_t *peer)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer->ibp_ni->ni_data;
- LASSERT (net != NULL);
+ LASSERT(net != NULL);
LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT (!kiblnd_peer_active(peer));
- LASSERT (peer->ibp_connecting == 0);
- LASSERT (peer->ibp_accepting == 0);
- LASSERT (cfs_list_empty(&peer->ibp_conns));
- LASSERT (cfs_list_empty(&peer->ibp_tx_queue));
-
- LIBCFS_FREE(peer, sizeof(*peer));
-
- /* NB a peer's connections keep a reference on their peer until
- * they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
- * zero. */
+ LASSERT(!kiblnd_peer_active(peer));
+ LASSERT(kiblnd_peer_idle(peer));
+ LASSERT(list_empty(&peer->ibp_tx_queue));
+
+ LIBCFS_FREE(peer, sizeof(*peer));
+
+ /* NB a peer's connections keep a reference on their peer until
+ * they are destroyed, so we can be assured that _all_ state to do
+ * with this peer has been cleaned up when its refcount drops to
+ * zero. */
atomic_dec(&net->ibn_npeers);
}
kib_peer_t *
kiblnd_find_peer_locked (lnet_nid_t nid)
{
- /* the caller is responsible for accounting the additional reference
- * that this creates */
- cfs_list_t *peer_list = kiblnd_nid2peerlist(nid);
- cfs_list_t *tmp;
- kib_peer_t *peer;
-
- cfs_list_for_each (tmp, peer_list) {
+ /* the caller is responsible for accounting the additional reference
+ * that this creates */
+ struct list_head *peer_list = kiblnd_nid2peerlist(nid);
+ struct list_head *tmp;
+ kib_peer_t *peer;
- peer = cfs_list_entry(tmp, kib_peer_t, ibp_list);
+ list_for_each(tmp, peer_list) {
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns)); /* active conn */
+ peer = list_entry(tmp, kib_peer_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer));
- if (peer->ibp_nid != nid)
- continue;
+ if (peer->ibp_nid != nid)
+ continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
+ CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
+ peer, libcfs_nid2str(nid),
atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
- }
- return NULL;
+ peer->ibp_version);
+ return peer;
+ }
+ return NULL;
}
void
kiblnd_unlink_peer_locked (kib_peer_t *peer)
{
- LASSERT (cfs_list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer->ibp_conns));
LASSERT (kiblnd_peer_active(peer));
- cfs_list_del_init(&peer->ibp_list);
+ list_del_init(&peer->ibp_list);
/* lose peerlist's ref */
kiblnd_peer_decref(peer);
}
-int
-kiblnd_get_peer_info (lnet_ni_t *ni, int index,
- lnet_nid_t *nidp, int *count)
+static int
+kiblnd_get_peer_info(lnet_ni_t *ni, int index,
+ lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ int i;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (index-- > 0)
- continue;
+ if (index-- > 0)
+ continue;
- *nidp = peer->ibp_nid;
+ *nidp = peer->ibp_nid;
*count = atomic_read(&peer->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- return -ENOENT;
+ return -ENOENT;
}
-void
+static void
kiblnd_del_peer_locked (kib_peer_t *peer)
{
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ kib_conn_t *conn;
- if (cfs_list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
- } else {
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ if (list_empty(&peer->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer);
+ } else {
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- kiblnd_close_conn_locked(conn, 0);
- }
- /* NB closing peer's last conn unlinked it. */
- }
- /* NB peer now unlinked; might even be freed if the peer table had the
- * last ref on it. */
+ kiblnd_close_conn_locked(conn, 0);
+ }
+ /* NB closing peer's last conn unlinked it. */
+ }
+ /* NB peer now unlinked; might even be freed if the peer table had the
+ * last ref on it. */
}
-int
+static int
kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
{
- CFS_LIST_HEAD (zombies);
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
- kib_peer_t *peer;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int rc = -ENOENT;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ kib_peer_t *peer;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int rc = -ENOENT;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
hi = kiblnd_data.kib_peer_hash_size - 1;
}
- for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
- continue;
+ if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ continue;
- if (!cfs_list_empty(&peer->ibp_tx_queue)) {
- LASSERT (cfs_list_empty(&peer->ibp_conns));
+ if (!list_empty(&peer->ibp_tx_queue)) {
+ LASSERT(list_empty(&peer->ibp_conns));
- cfs_list_splice_init(&peer->ibp_tx_queue,
- &zombies);
- }
+ list_splice_init(&peer->ibp_tx_queue,
+ &zombies);
+ }
- kiblnd_del_peer_locked(peer);
- rc = 0; /* matched something */
- }
- }
+ kiblnd_del_peer_locked(peer);
+ rc = 0; /* matched something */
+ }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &zombies, -EIO);
+ kiblnd_txlist_done(ni, &zombies, -EIO);
- return rc;
+ return rc;
}
-kib_conn_t *
-kiblnd_get_conn_by_idx (lnet_ni_t *ni, int index)
+static kib_conn_t *
+kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- int i;
- unsigned long flags;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ int i;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- cfs_list_for_each (ptmp, &kiblnd_data.kib_peers[i]) {
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
+ list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- cfs_list_for_each (ctmp, &peer->ibp_conns) {
- if (index-- > 0)
- continue;
+ list_for_each(ctmp, &peer->ibp_conns) {
+ if (index-- > 0)
+ continue;
- conn = cfs_list_entry(ctmp, kib_conn_t,
- ibc_list);
- kiblnd_conn_addref(conn);
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
return conn;
return NULL;
}
-void
+static void
kiblnd_debug_rx (kib_rx_t *rx)
{
CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
rx->rx_msg->ibm_credits);
}
-void
+static void
kiblnd_debug_tx (kib_tx_t *tx)
{
CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
void
kiblnd_debug_conn (kib_conn_t *conn)
{
- cfs_list_t *tmp;
- int i;
+ struct list_head *tmp;
+ int i;
spin_lock(&conn->ibc_lock);
- CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s: \n",
+ CDEBUG(D_CONSOLE, "conn[%d] %p [version %x] -> %s:\n",
atomic_read(&conn->ibc_refcount), conn,
- conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d r_cred %d\n",
- conn->ibc_state, conn->ibc_noops_posted,
- conn->ibc_nsends_posted, conn->ibc_credits,
- conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
- CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
+ conn->ibc_version, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ CDEBUG(D_CONSOLE, " state %d nposted %d/%d cred %d o_cred %d "
+ " r_cred %d\n", conn->ibc_state, conn->ibc_noops_posted,
+ conn->ibc_nsends_posted, conn->ibc_credits,
+ conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
+ CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
- CDEBUG(D_CONSOLE, " early_rxs:\n");
- cfs_list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(cfs_list_entry(tmp, kib_rx_t, rx_list));
+ CDEBUG(D_CONSOLE, " early_rxs:\n");
+ list_for_each(tmp, &conn->ibc_early_rxs)
+ kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
- CDEBUG(D_CONSOLE, " tx_noops:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_noops:\n");
+ list_for_each(tmp, &conn->ibc_tx_noops)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_nocred)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " tx_queue:\n");
- cfs_list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " tx_queue:\n");
+ list_for_each(tmp, &conn->ibc_tx_queue)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " active_txs:\n");
- cfs_list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(cfs_list_entry(tmp, kib_tx_t, tx_list));
+ CDEBUG(D_CONSOLE, " active_txs:\n");
+ list_for_each(tmp, &conn->ibc_active_txs)
+ kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
- CDEBUG(D_CONSOLE, " rxs:\n");
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++)
- kiblnd_debug_rx(&conn->ibc_rxs[i]);
+ CDEBUG(D_CONSOLE, " rxs:\n");
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++)
+ kiblnd_debug_rx(&conn->ibc_rxs[i]);
spin_unlock(&conn->ibc_lock);
}
cmid->route.path_rec->mtu = mtu;
}
-#ifdef HAVE_OFED_IB_COMP_VECTOR
static int
kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
{
int vectors;
int off;
int i;
+ lnet_nid_t ibp_nid;
vectors = conn->ibc_cmid->device->num_comp_vectors;
if (vectors <= 1)
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
/* hash NID to CPU id in this partition... */
- off = conn->ibc_peer->ibp_nid % cpus_weight(*mask);
- for_each_cpu_mask(i, *mask) {
+ ibp_nid = conn->ibc_peer->ibp_nid;
+ off = do_div(ibp_nid, cpumask_weight(mask));
+ for_each_cpu(i, mask) {
if (off-- == 0)
return i % vectors;
}
LBUG();
return 1;
}
-#endif
kib_conn_t *
kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
- int state, int version)
-{
- /* CAVEAT EMPTOR:
- * If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
- * she must dispose of 'cmid'. (Actually I'd block forever if I tried
- * to destroy 'cmid' here since I'm called from the CM which still has
- * its ref on 'cmid'). */
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ int state, int version)
+{
+ /* CAVEAT EMPTOR:
+ * If the new conn is created successfully it takes over the caller's
+ * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * she must dispose of 'cmid'. (Actually I'd block forever if I tried
+ * to destroy 'cmid' here since I'm called from the CM which still has
+ * its ref on 'cmid'). */
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ kib_net_t *net = peer->ibp_ni->ni_data;
kib_dev_t *dev;
- struct ib_qp_init_attr *init_qp_attr;
+ struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
+#ifdef HAVE_IB_CQ_INIT_ATTR
+ struct ib_cq_init_attr cq_attr = {};
+#endif
kib_conn_t *conn;
struct ib_cq *cq;
unsigned long flags;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
- if (conn == NULL) {
- CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
- goto failed_1;
- }
+ if (conn == NULL) {
+ CERROR("Can't allocate connection for %s\n",
+ libcfs_nid2str(peer->ibp_nid));
+ goto failed_1;
+ }
- conn->ibc_state = IBLND_CONN_INIT;
- conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
- cmid->context = conn; /* for future CM callbacks */
- conn->ibc_cmid = cmid;
-
- CFS_INIT_LIST_HEAD(&conn->ibc_early_rxs);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_noops);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
- CFS_INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
- CFS_INIT_LIST_HEAD(&conn->ibc_active_txs);
+ conn->ibc_state = IBLND_CONN_INIT;
+ conn->ibc_version = version;
+ conn->ibc_peer = peer; /* I take the caller's ref */
+ cmid->context = conn; /* for future CM callbacks */
+ conn->ibc_cmid = cmid;
+ conn->ibc_max_frags = peer->ibp_max_frags;
+ conn->ibc_queue_depth = peer->ibp_queue_depth;
+
+ INIT_LIST_HEAD(&conn->ibc_early_rxs);
+ INIT_LIST_HEAD(&conn->ibc_tx_noops);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
+ INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
+ INIT_LIST_HEAD(&conn->ibc_active_txs);
spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
if (dev->ibd_hdev->ibh_ibdev != cmid->device) {
/* wakeup failover thread and teardown connection */
if (kiblnd_dev_can_failover(dev)) {
- cfs_list_add_tail(&dev->ibd_fail_list,
+ list_add_tail(&dev->ibd_fail_list,
&kiblnd_data.kib_failed_devs);
wake_up(&kiblnd_data.kib_failover_waitq);
}
write_unlock_irqrestore(glock, flags);
LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(version) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
if (conn->ibc_rxs == NULL) {
CERROR("Cannot allocate RX buffers\n");
goto failed_2;
}
rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(version));
+ IBLND_RX_MSG_PAGES(conn));
if (rc != 0)
goto failed_2;
kiblnd_map_rx_descs(conn);
-#ifdef HAVE_OFED_IB_COMP_VECTOR
+#ifdef HAVE_IB_CQ_INIT_ATTR
+ cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
+ cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
cq = ib_create_cq(cmid->device,
kiblnd_cq_completion, kiblnd_cq_event, conn,
- IBLND_CQ_ENTRIES(version),
- kiblnd_get_completion_vector(conn, cpt));
+ &cq_attr);
#else
- cq = ib_create_cq(cmid->device,
- kiblnd_cq_completion, kiblnd_cq_event, conn,
- IBLND_CQ_ENTRIES(version));
+ cq = ib_create_cq(cmid->device,
+ kiblnd_cq_completion, kiblnd_cq_event, conn,
+ IBLND_CQ_ENTRIES(conn),
+ kiblnd_get_completion_vector(conn, cpt));
#endif
- if (IS_ERR(cq)) {
- CERROR("Can't create CQ: %ld, cqe: %d\n",
- PTR_ERR(cq), IBLND_CQ_ENTRIES(version));
- goto failed_2;
- }
+ if (IS_ERR(cq)) {
+ CERROR("Failed to create CQ with %d CQEs: %ld\n",
+ IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
+ goto failed_2;
+ }
conn->ibc_cq = cq;
- rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
- if (rc != 0) {
- CERROR("Can't request completion notificiation: %d\n", rc);
- goto failed_2;
- }
+ rc = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ if (rc != 0) {
+ CERROR("Can't request completion notification: %d\n", rc);
+ goto failed_2;
+ }
init_qp_attr->event_handler = kiblnd_qp_event;
init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(version);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(version);
+ init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
init_qp_attr->cap.max_send_sge = 1;
init_qp_attr->cap.max_recv_sge = 1;
init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
conn->ibc_sched = sched;
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
- goto failed_2;
- }
+ do {
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
+ if (!rc || init_qp_attr->cap.max_send_wr < 16)
+ break;
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ init_qp_attr->cap.max_send_wr -= init_qp_attr->cap.max_send_wr / 4;
+ } while (rc);
+
+ if (rc) {
+ CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
+ rc, init_qp_attr->cap.max_send_wr,
+ init_qp_attr->cap.max_recv_wr);
+ goto failed_2;
+ }
- /* 1 ref for caller and each rxmsg */
- atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(version));
- conn->ibc_nrx = IBLND_RX_MSGS(version);
+ if (init_qp_attr->cap.max_send_wr != IBLND_SEND_WRS(conn))
+ CDEBUG(D_NET, "original send wr %d, created with %d\n",
+ IBLND_SEND_WRS(conn), init_qp_attr->cap.max_send_wr);
- /* post receives */
- for (i = 0; i < IBLND_RX_MSGS(version); i++) {
- rc = kiblnd_post_rx(&conn->ibc_rxs[i],
- IBLND_POSTRX_NO_CREDIT);
- if (rc != 0) {
- CERROR("Can't post rxmsg: %d\n", rc);
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
- /* Make posted receives complete */
- kiblnd_abort_receives(conn);
+ /* 1 ref for caller and each rxmsg */
+ atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
+ conn->ibc_nrx = IBLND_RX_MSGS(conn);
- /* correct # of posted buffers
- * NB locking needed now I'm racing with completion */
+ /* post receives */
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
+ rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
+ if (rc != 0) {
+ CERROR("Can't post rxmsg: %d\n", rc);
+
+ /* Make posted receives complete */
+ kiblnd_abort_receives(conn);
+
+ /* correct # of posted buffers
+ * NB locking needed now I'm racing with completion */
spin_lock_irqsave(&sched->ibs_lock, flags);
- conn->ibc_nrx -= IBLND_RX_MSGS(version) - i;
+ conn->ibc_nrx -= IBLND_RX_MSGS(conn) - i;
spin_unlock_irqrestore(&sched->ibs_lock, flags);
/* cmid will be destroyed by CM(ofed) after cm_callback
rdma_destroy_qp(conn->ibc_cmid);
conn->ibc_cmid = NULL;
- /* Drop my own and unused rxbuffer refcounts */
- while (i++ <= IBLND_RX_MSGS(version))
- kiblnd_conn_decref(conn);
+ /* Drop my own and unused rxbuffer refcounts */
+ while (i++ <= IBLND_RX_MSGS(conn))
+ kiblnd_conn_decref(conn);
return NULL;
}
return conn;
failed_2:
- kiblnd_destroy_conn(conn);
+ kiblnd_destroy_conn(conn, true);
failed_1:
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
failed_0:
}
void
-kiblnd_destroy_conn (kib_conn_t *conn)
+kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
kib_peer_t *peer = conn->ibc_peer;
LASSERT (!in_interrupt());
LASSERT (atomic_read(&conn->ibc_refcount) == 0);
- LASSERT (cfs_list_empty(&conn->ibc_early_rxs));
- LASSERT (cfs_list_empty(&conn->ibc_tx_noops));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_rsrvd));
- LASSERT (cfs_list_empty(&conn->ibc_tx_queue_nocred));
- LASSERT (cfs_list_empty(&conn->ibc_active_txs));
+ LASSERT(list_empty(&conn->ibc_early_rxs));
+ LASSERT(list_empty(&conn->ibc_tx_noops));
+ LASSERT(list_empty(&conn->ibc_tx_queue));
+ LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
+ LASSERT(list_empty(&conn->ibc_tx_queue_nocred));
+ LASSERT(list_empty(&conn->ibc_active_txs));
LASSERT (conn->ibc_noops_posted == 0);
LASSERT (conn->ibc_nsends_posted == 0);
if (conn->ibc_rxs != NULL) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn->ibc_version) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
}
if (conn->ibc_connvars != NULL)
atomic_dec(&net->ibn_nconns);
}
- LIBCFS_FREE(conn, sizeof(*conn));
+ if (free_conn)
+ LIBCFS_FREE(conn, sizeof(*conn));
}
int
-kiblnd_close_peer_conns_locked (kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
{
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- CDEBUG(D_NET, "Closing conn -> %s, "
- "version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, why);
+ CDEBUG(D_NET, "Closing conn -> %s, "
+ "version: %x, reason: %d\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, why);
- kiblnd_close_conn_locked(conn, why);
- count++;
- }
+ kiblnd_close_conn_locked(conn, why);
+ count++;
+ }
- return count;
+ return count;
}
int
-kiblnd_close_stale_conns_locked (kib_peer_t *peer,
- int version, __u64 incarnation)
+kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+ int version, __u64 incarnation)
{
- kib_conn_t *conn;
- cfs_list_t *ctmp;
- cfs_list_t *cnxt;
- int count = 0;
+ kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ int count = 0;
- cfs_list_for_each_safe (ctmp, cnxt, &peer->ibp_conns) {
- conn = cfs_list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ conn = list_entry(ctmp, kib_conn_t, ibc_list);
- if (conn->ibc_version == version &&
- conn->ibc_incarnation == incarnation)
- continue;
+ if (conn->ibc_version == version &&
+ conn->ibc_incarnation == incarnation)
+ continue;
- CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:"LPX64"(%x, "LPX64")\n",
- libcfs_nid2str(peer->ibp_nid),
- conn->ibc_version, conn->ibc_incarnation,
- version, incarnation);
+ CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
+ "incarnation:"LPX64"(%x, "LPX64")\n",
+ libcfs_nid2str(peer->ibp_nid),
+ conn->ibc_version, conn->ibc_incarnation,
+ version, incarnation);
- kiblnd_close_conn_locked(conn, -ESTALE);
- count++;
- }
+ kiblnd_close_conn_locked(conn, -ESTALE);
+ count++;
+ }
- return count;
+ return count;
}
-int
-kiblnd_close_matching_conns (lnet_ni_t *ni, lnet_nid_t nid)
+static int
+kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
- cfs_list_t *ptmp;
- cfs_list_t *pnxt;
- int lo;
- int hi;
- int i;
- unsigned long flags;
- int count = 0;
+ kib_peer_t *peer;
+ struct list_head *ptmp;
+ struct list_head *pnxt;
+ int lo;
+ int hi;
+ int i;
+ unsigned long flags;
+ int count = 0;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (nid != LNET_NID_ANY)
- lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
- else {
- lo = 0;
- hi = kiblnd_data.kib_peer_hash_size - 1;
- }
+ if (nid != LNET_NID_ANY)
+ lo = hi = kiblnd_nid2peerlist(nid) - kiblnd_data.kib_peers;
+ else {
+ lo = 0;
+ hi = kiblnd_data.kib_peer_hash_size - 1;
+ }
- for (i = lo; i <= hi; i++) {
- cfs_list_for_each_safe (ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
+ for (i = lo; i <= hi; i++) {
+ list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = cfs_list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT (peer->ibp_connecting > 0 ||
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns));
+ peer = list_entry(ptmp, kib_peer_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer));
- if (peer->ibp_ni != ni)
- continue;
+ if (peer->ibp_ni != ni)
+ continue;
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
- continue;
+ if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ continue;
- count += kiblnd_close_peer_conns_locked(peer, 0);
- }
- }
+ count += kiblnd_close_peer_conns_locked(peer, 0);
+ }
+ }
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- /* wildcards always succeed */
- if (nid == LNET_NID_ANY)
- return 0;
+ /* wildcards always succeed */
+ if (nid == LNET_NID_ANY)
+ return 0;
- return (count == 0) ? -ENOENT : 0;
+ return (count == 0) ? -ENOENT : 0;
}
-int
+static int
kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
return rc;
}
-void
-kiblnd_query (lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+static void
+kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
{
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
read_lock_irqsave(glock, flags);
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL) {
- LASSERT (peer->ibp_connecting > 0 || /* creating conns */
- peer->ibp_accepting > 0 ||
- !cfs_list_empty(&peer->ibp_conns)); /* active conn */
- last_alive = peer->ibp_last_alive;
- }
+ peer = kiblnd_find_peer_locked(nid);
+ if (peer != NULL)
+ last_alive = peer->ibp_last_alive;
read_unlock_irqrestore(glock, flags);
- if (last_alive != 0)
- *when = last_alive;
+ if (last_alive != 0)
+ *when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
- kiblnd_launch_tx(ni, NULL, nid);
+ /* peer is not persistent in hash, trigger peer creation
+ * and connection establishment with a NULL tx */
+ if (peer == NULL)
+ kiblnd_launch_tx(ni, NULL, nid);
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
- return;
+ CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
+ libcfs_nid2str(nid), peer,
+ last_alive ? cfs_duration_sec(now - last_alive) : -1);
+ return;
}
-void
+static void
kiblnd_free_pages(kib_pages_t *p)
{
int npages = p->ibp_npages;
LASSERT (conn->ibc_rxs != NULL);
LASSERT (conn->ibc_hdev != NULL);
- for (i = 0; i < IBLND_RX_MSGS(conn->ibc_version); i++) {
- rx = &conn->ibc_rxs[i];
+ for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
+ rx = &conn->ibc_rxs[i];
- LASSERT (rx->rx_nob >= 0); /* not posted */
+ LASSERT(rx->rx_nob >= 0); /* not posted */
- kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
- KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
- rx->rx_msgaddr),
- IBLND_MSG_SIZE, DMA_FROM_DEVICE);
- }
+ kiblnd_dma_unmap_single(conn->ibc_hdev->ibh_ibdev,
+ KIBLND_UNMAP_ADDR(rx, rx_msgunmap,
+ rx->rx_msgaddr),
+ IBLND_MSG_SIZE, DMA_FROM_DEVICE);
+ }
kiblnd_free_pages(conn->ibc_rx_pages);
int ipg;
int i;
- for (pg_off = ipg = i = 0;
- i < IBLND_RX_MSGS(conn->ibc_version); i++) {
- pg = conn->ibc_rx_pages->ibp_pages[ipg];
- rx = &conn->ibc_rxs[i];
+ for (pg_off = ipg = i = 0; i < IBLND_RX_MSGS(conn); i++) {
+ pg = conn->ibc_rx_pages->ibp_pages[ipg];
+ rx = &conn->ibc_rxs[i];
- rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+ rx->rx_conn = conn;
+ rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
- rx->rx_msgaddr = kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msg, IBLND_MSG_SIZE,
- DMA_FROM_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
- rx->rx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
+ rx->rx_msgaddr =
+ kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
+ rx->rx_msg, IBLND_MSG_SIZE,
+ DMA_FROM_DEVICE);
+ LASSERT(!kiblnd_dma_mapping_error(conn->ibc_hdev->ibh_ibdev,
+ rx->rx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
- CDEBUG(D_NET,"rx %d: %p "LPX64"("LPX64")\n",
- i, rx->rx_msg, rx->rx_msgaddr,
- lnet_page2phys(pg) + pg_off);
+ CDEBUG(D_NET, "rx %d: %p "LPX64"("LPX64")\n",
+ i, rx->rx_msg, rx->rx_msgaddr,
+ (__u64)(page_to_phys(pg) + pg_off));
- pg_off += IBLND_MSG_SIZE;
- LASSERT (pg_off <= PAGE_SIZE);
+ pg_off += IBLND_MSG_SIZE;
+ LASSERT(pg_off <= PAGE_SIZE);
- if (pg_off == PAGE_SIZE) {
- pg_off = 0;
- ipg++;
- LASSERT (ipg <= IBLND_RX_MSG_PAGES(conn->ibc_version));
- }
- }
+ if (pg_off == PAGE_SIZE) {
+ pg_off = 0;
+ ipg++;
+ LASSERT(ipg <= IBLND_RX_MSG_PAGES(conn));
+ }
+ }
}
static void
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
+ set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
tpo->tpo_hdev = kiblnd_current_hdev(dev);
- for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
- page = txpgs->ibp_pages[ipage];
- tx = &tpo->tpo_tx_descs[i];
+ for (ipage = page_offset = i = 0; i < pool->po_size; i++) {
+ page = txpgs->ibp_pages[ipage];
+ tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
- page_offset);
+ tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
+ page_offset);
- tx->tx_msgaddr = kiblnd_dma_map_single(
- tpo->tpo_hdev->ibh_ibdev, tx->tx_msg,
- IBLND_MSG_SIZE, DMA_TO_DEVICE);
- LASSERT (!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
- tx->tx_msgaddr));
- KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
+ tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
+ tx->tx_msg,
+ IBLND_MSG_SIZE,
+ DMA_TO_DEVICE);
+ LASSERT(!kiblnd_dma_mapping_error(tpo->tpo_hdev->ibh_ibdev,
+ tx->tx_msgaddr));
+ KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr);
- cfs_list_add(&tx->tx_list, &pool->po_free_list);
+ list_add(&tx->tx_list, &pool->po_free_list);
- page_offset += IBLND_MSG_SIZE;
- LASSERT (page_offset <= PAGE_SIZE);
+ page_offset += IBLND_MSG_SIZE;
+ LASSERT(page_offset <= PAGE_SIZE);
- if (page_offset == PAGE_SIZE) {
- page_offset = 0;
- ipage++;
- LASSERT (ipage <= txpgs->ibp_npages);
- }
- }
+ if (page_offset == PAGE_SIZE) {
+ page_offset = 0;
+ ipage++;
+ LASSERT(ipage <= txpgs->ibp_npages);
+ }
+ }
}
struct ib_mr *
-kiblnd_find_dma_mr(kib_hca_dev_t *hdev, __u64 addr, __u64 size)
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
+ int negotiated_nfrags)
{
- __u64 index;
-
- LASSERT (hdev->ibh_mrs[0] != NULL);
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+ __u16 nfrags;
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
- index = addr >> hdev->ibh_mr_shift;
+ LASSERT(hdev->ibh_mrs != NULL);
- if (index < hdev->ibh_nmrs &&
- index == ((addr + size - 1) >> hdev->ibh_mr_shift))
- return hdev->ibh_mrs[index];
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
+ return NULL;
- return NULL;
+ return hdev->ibh_mrs;
}
-struct ib_mr *
-kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd)
+static void
+kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
{
- struct ib_mr *prev_mr;
- struct ib_mr *mr;
- int i;
-
- LASSERT (hdev->ibh_mrs[0] != NULL);
-
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= rd->rd_nfrags)
- return NULL;
+ LASSERT(fpo->fpo_map_count == 0);
- if (hdev->ibh_nmrs == 1)
- return hdev->ibh_mrs[0];
-
- for (i = 0, mr = prev_mr = NULL;
- i < rd->rd_nfrags; i++) {
- mr = kiblnd_find_dma_mr(hdev,
- rd->rd_frags[i].rf_addr,
- rd->rd_frags[i].rf_nob);
- if (prev_mr == NULL)
- prev_mr = mr;
+ if (fpo->fpo_is_fmr) {
+ if (fpo->fmr.fpo_fmr_pool)
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (mr == NULL || prev_mr != mr) {
- /* Can't covered by one single MR */
- mr = NULL;
- break;
- }
- }
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- return mr;
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
-void
-kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+static void
+kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- LASSERT (pool->fpo_map_count == 0);
+ kib_fmr_pool_t *fpo, *tmp;
- if (pool->fpo_fmr_pool != NULL)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
+ }
+}
- if (pool->fpo_hdev != NULL)
- kiblnd_hdev_decref(pool->fpo_hdev);
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
+{
+ int size = tunables->lnd_fmr_pool_size / ncpts;
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+ return max(IBLND_FMR_POOL, size);
}
-void
-kiblnd_destroy_fmr_pool_list(cfs_list_t *head)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- kib_fmr_pool_t *pool;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
- while (!cfs_list_empty(head)) {
- pool = cfs_list_entry(head->next, kib_fmr_pool_t, fpo_list);
- cfs_list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
- }
+ return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ struct ib_fmr_pool_param param = {
+ .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .page_shift = PAGE_SHIFT,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
+ .pool_size = fps->fps_pool_size,
+ .dirty_watermark = fps->fps_flush_trigger,
+ .flush_function = NULL,
+ .flush_arg = NULL,
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ ¶m);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
- return max(IBLND_FMR_POOL, size);
+ return rc;
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
- return max(IBLND_FMR_POOL_FLUSH, size);
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ frd->frd_mr = NULL;
+
+ frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+ if (IS_ERR(frd->frd_frpl)) {
+ rc = PTR_ERR(frd->frd_frpl);
+ CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
+ rc);
+ frd->frd_frpl = NULL;
+ goto out_middle;
+ }
+
+#ifdef HAVE_IB_ALLOC_FAST_REG_MR
+ frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#else
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#endif
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ frd->frd_valid = true;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+ if (frd->frd_frpl)
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
}
-int
+static int
kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
- struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
- .page_shift = PAGE_SHIFT,
- .access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
- .pool_size = fps->fps_pool_size,
- .dirty_watermark = fps->fps_flush_trigger,
- .flush_function = NULL,
- .flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ struct ib_device_attr *dev_attr;
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ kib_fmr_pool_t *fpo;
int rc;
- LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
+ dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
+ if (!dev_attr)
return -ENOMEM;
+ LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
+ if (!fpo) {
+ rc = -ENOMEM;
+ goto out_dev_attr;
+ }
+
fpo->fpo_hdev = kiblnd_current_hdev(dev);
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
+ rc = ib_query_device(fpo->fpo_hdev->ibh_ibdev, dev_attr);
+ if (rc) {
+ CERROR("Query device failed for %s: %d\n",
+ fpo->fpo_hdev->ibh_ibdev->name, rc);
+ goto out_dev_attr;
+ }
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
- return rc;
- }
+ /* Check for FMR or FastReg support */
+ fpo->fpo_is_fmr = 0;
+ if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
+ LCONSOLE_INFO("Using FMR for registration\n");
+ fpo->fpo_is_fmr = 1;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ } else {
+ rc = -ENOSYS;
+ LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
+ goto out_dev_attr;
+ }
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
- *pp_fpo = fpo;
+ if (fpo->fpo_is_fmr)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo);
+ if (rc)
+ goto out_fpo;
- return 0;
+ kfree(dev_attr);
+ fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_owner = fps;
+ *pp_fpo = fpo;
+
+ return 0;
+
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+
+out_dev_attr:
+ kfree(dev_attr);
+
+ return rc;
}
static void
-kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, cfs_list_t *zombies)
+kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
{
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
- while (!cfs_list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = cfs_list_entry(fps->fps_pool_list.next,
+ while (!list_empty(&fps->fps_pool_list)) {
+ kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
kib_fmr_pool_t, fpo_list);
- fpo->fpo_failed = 1;
- cfs_list_del(&fpo->fpo_list);
- if (fpo->fpo_map_count == 0)
- cfs_list_add(&fpo->fpo_list, zombies);
- else
- cfs_list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
- }
+ fpo->fpo_failed = 1;
+ list_del(&fpo->fpo_list);
+ if (fpo->fpo_map_count == 0)
+ list_add(&fpo->fpo_list, zombies);
+ else
+ list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ }
spin_unlock(&fps->fps_lock);
}
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
- int pool_size, int flush_trigger)
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
- kib_fmr_pool_t *fpo;
- int rc;
+ kib_fmr_pool_t *fpo;
+ int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(kib_fmr_poolset_t));
- fps->fps_net = net;
+ fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
- CFS_INIT_LIST_HEAD(&fps->fps_pool_list);
- CFS_INIT_LIST_HEAD(&fps->fps_failed_pool_list);
+ INIT_LIST_HEAD(&fps->fps_pool_list);
+ INIT_LIST_HEAD(&fps->fps_failed_pool_list);
- rc = kiblnd_create_fmr_pool(fps, &fpo);
- if (rc == 0)
- cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ rc = kiblnd_create_fmr_pool(fps, &fpo);
+ if (rc == 0)
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
- return rc;
+ return rc;
}
static int
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
- CFS_LIST_HEAD (zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
- cfs_time_t now = cfs_time_current();
- kib_fmr_pool_t *tmp;
- int rc;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ kib_fmr_pool_t *fpo = fmr->fmr_pool;
+ kib_fmr_poolset_t *fps;
+ cfs_time_t now = cfs_time_current();
+ kib_fmr_pool_t *tmp;
+ int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT (rc == 0);
+ if (!fpo)
+ return;
- if (status != 0) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT (rc == 0);
- }
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(!rc);
+ fmr->fmr_pfmr = NULL;
+ }
- fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
+ fmr->fmr_pool = NULL;
spin_lock(&fps->fps_lock);
- fpo->fpo_map_count --; /* decref the pool */
+ fpo->fpo_map_count--; /* decref the pool */
- cfs_list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
- /* the first pool is persistent */
- if (fps->fps_pool_list.next == &fpo->fpo_list)
- continue;
+ list_for_each_entry_safe(fpo, tmp, &fps->fps_pool_list, fpo_list) {
+ /* the first pool is persistent */
+ if (fps->fps_pool_list.next == &fpo->fpo_list)
+ continue;
- if (kiblnd_fmr_pool_is_idle(fpo, now)) {
- cfs_list_move(&fpo->fpo_list, &zombies);
- fps->fps_version ++;
- }
- }
+ if (kiblnd_fmr_pool_is_idle(fpo, now)) {
+ list_move(&fpo->fpo_list, &zombies);
+ fps->fps_version++;
+ }
+ }
spin_unlock(&fps->fps_lock);
- if (!cfs_list_empty(&zombies))
- kiblnd_destroy_fmr_pool_list(&zombies);
+ if (!list_empty(&zombies))
+ kiblnd_destroy_fmr_pool_list(&zombies);
}
int
kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+ __u32 nob, __u64 iov, bool is_rx, kib_fmr_t *fmr)
{
- struct ib_pool_fmr *pfmr;
- kib_fmr_pool_t *fpo;
- __u64 version;
- int rc;
+ kib_fmr_pool_t *fpo;
+ __u64 version;
+ int rc;
- again:
+again:
spin_lock(&fps->fps_lock);
version = fps->fps_version;
- cfs_list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
+ list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
- }
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey
+ : pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct ib_send_wr *wr;
+ struct kib_fast_reg_descriptor *frd;
+ struct ib_fast_reg_page_list *frpl;
+ struct ib_mr *mr;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+ frpl = frd->frd_frpl;
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ struct ib_send_wr *inv_wr;
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = IBLND_WID_MR;
+ inv_wr->ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+ LASSERT(npages <= frpl->max_page_list_len);
+ memcpy(frpl->page_list, pages,
+ sizeof(*pages) * npages);
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+ wr->opcode = IB_WR_FAST_REG_MR;
+ wr->wr_id = IBLND_WID_MR;
+ wr->wr.fast_reg.iova_start = iov;
+ wr->wr.fast_reg.page_list = frpl;
+ wr->wr.fast_reg.page_list_len = npages;
+ wr->wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.fast_reg.length = nob;
+ wr->wr.fast_reg.rkey = is_rx ? mr->rkey
+ : mr->lkey;
+ wr->wr.fast_reg.access_flags =
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
+ }
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
fps->fps_increasing = 0;
if (rc == 0) {
fps->fps_version++;
- cfs_list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
+ list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
}
static void
kiblnd_fini_pool(kib_pool_t *pool)
{
- LASSERT (cfs_list_empty(&pool->po_free_list));
- LASSERT (pool->po_allocated == 0);
+ LASSERT(list_empty(&pool->po_free_list));
+ LASSERT(pool->po_allocated == 0);
- CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
+ CDEBUG(D_NET, "Finalize %s pool\n", pool->po_owner->ps_name);
}
static void
kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
{
- CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
+ CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
- CFS_INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
+ memset(pool, 0, sizeof(kib_pool_t));
+ INIT_LIST_HEAD(&pool->po_free_list);
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_owner = ps;
+ pool->po_size = size;
}
-void
-kiblnd_destroy_pool_list(cfs_list_t *head)
+static void
+kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ kib_pool_t *pool;
- while (!cfs_list_empty(head)) {
- pool = cfs_list_entry(head->next, kib_pool_t, po_list);
- cfs_list_del(&pool->po_list);
+ while (!list_empty(head)) {
+ pool = list_entry(head->next, kib_pool_t, po_list);
+ list_del(&pool->po_list);
- LASSERT (pool->po_owner != NULL);
- pool->po_owner->ps_pool_destroy(pool);
- }
+ LASSERT(pool->po_owner != NULL);
+ pool->po_owner->ps_pool_destroy(pool);
+ }
}
static void
-kiblnd_fail_poolset(kib_poolset_t *ps, cfs_list_t *zombies)
+kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
{
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
- while (!cfs_list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = cfs_list_entry(ps->ps_pool_list.next,
+ while (!list_empty(&ps->ps_pool_list)) {
+ kib_pool_t *po = list_entry(ps->ps_pool_list.next,
kib_pool_t, po_list);
- po->po_failed = 1;
- cfs_list_del(&po->po_list);
- if (po->po_allocated == 0)
- cfs_list_add(&po->po_list, zombies);
- else
- cfs_list_add(&po->po_list, &ps->ps_failed_pool_list);
- }
+ po->po_failed = 1;
+ list_del(&po->po_list);
+ if (po->po_allocated == 0)
+ list_add(&po->po_list, zombies);
+ else
+ list_add(&po->po_list, &ps->ps_failed_pool_list);
+ }
spin_unlock(&ps->ps_lock);
}
>= sizeof(ps->ps_name))
return -E2BIG;
spin_lock_init(&ps->ps_lock);
- CFS_INIT_LIST_HEAD(&ps->ps_pool_list);
- CFS_INIT_LIST_HEAD(&ps->ps_failed_pool_list);
+ INIT_LIST_HEAD(&ps->ps_pool_list);
+ INIT_LIST_HEAD(&ps->ps_failed_pool_list);
- rc = ps->ps_pool_create(ps, size, &pool);
- if (rc == 0)
- cfs_list_add(&pool->po_list, &ps->ps_pool_list);
- else
- CERROR("Failed to create the first pool for %s\n", ps->ps_name);
+ rc = ps->ps_pool_create(ps, size, &pool);
+ if (rc == 0)
+ list_add(&pool->po_list, &ps->ps_pool_list);
+ else
+ CERROR("Failed to create the first pool for %s\n", ps->ps_name);
- return rc;
+ return rc;
}
static int
}
void
-kiblnd_pool_free_node(kib_pool_t *pool, cfs_list_t *node)
+kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
{
- CFS_LIST_HEAD (zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
- cfs_time_t now = cfs_time_current();
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ kib_poolset_t *ps = pool->po_owner;
+ kib_pool_t *tmp;
+ cfs_time_t now = cfs_time_current();
spin_lock(&ps->ps_lock);
- if (ps->ps_node_fini != NULL)
- ps->ps_node_fini(pool, node);
+ if (ps->ps_node_fini != NULL)
+ ps->ps_node_fini(pool, node);
- LASSERT (pool->po_allocated > 0);
- cfs_list_add(node, &pool->po_free_list);
- pool->po_allocated --;
+ LASSERT(pool->po_allocated > 0);
+ list_add(node, &pool->po_free_list);
+ pool->po_allocated--;
- cfs_list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
- /* the first pool is persistent */
- if (ps->ps_pool_list.next == &pool->po_list)
- continue;
+ list_for_each_entry_safe(pool, tmp, &ps->ps_pool_list, po_list) {
+ /* the first pool is persistent */
+ if (ps->ps_pool_list.next == &pool->po_list)
+ continue;
- if (kiblnd_pool_is_idle(pool, now))
- cfs_list_move(&pool->po_list, &zombies);
- }
+ if (kiblnd_pool_is_idle(pool, now))
+ list_move(&pool->po_list, &zombies);
+ }
spin_unlock(&ps->ps_lock);
- if (!cfs_list_empty(&zombies))
+ if (!list_empty(&zombies))
kiblnd_destroy_pool_list(&zombies);
}
-cfs_list_t *
+struct list_head *
kiblnd_pool_alloc_node(kib_poolset_t *ps)
{
- cfs_list_t *node;
- kib_pool_t *pool;
- int rc;
+ struct list_head *node;
+ kib_pool_t *pool;
+ int rc;
+ unsigned int interval = 1;
+ cfs_time_t time_before;
+ unsigned int trips = 0;
- again:
+again:
spin_lock(&ps->ps_lock);
- cfs_list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
- if (cfs_list_empty(&pool->po_free_list))
- continue;
-
- pool->po_allocated ++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- node = pool->po_free_list.next;
- cfs_list_del(node);
-
- if (ps->ps_node_init != NULL) {
- /* still hold the lock */
- ps->ps_node_init(pool, node);
- }
+ list_for_each_entry(pool, &ps->ps_pool_list, po_list) {
+ if (list_empty(&pool->po_free_list))
+ continue;
+
+ pool->po_allocated++;
+ pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ node = pool->po_free_list.next;
+ list_del(node);
+
+ if (ps->ps_node_init != NULL) {
+ /* still hold the lock */
+ ps->ps_node_init(pool, node);
+ }
spin_unlock(&ps->ps_lock);
return node;
}
if (ps->ps_increasing) {
/* another thread is allocating a new pool */
spin_unlock(&ps->ps_lock);
+ trips++;
CDEBUG(D_NET, "Another thread is allocating new "
- "%s pool, waiting for her to complete\n",
- ps->ps_name);
- schedule();
+ "%s pool, waiting %d HZs for her to complete."
+ "trips = %d\n",
+ ps->ps_name, interval, trips);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(interval);
+ if (interval < cfs_time_seconds(1))
+ interval *= 2;
+
goto again;
}
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
-
+ time_before = cfs_time_current();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
+ CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
+ cfs_time_current() - time_before);
spin_lock(&ps->ps_lock);
- ps->ps_increasing = 0;
- if (rc == 0) {
- cfs_list_add_tail(&pool->po_list, &ps->ps_pool_list);
- } else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
- CERROR("Can't allocate new %s pool because out of memory\n",
- ps->ps_name);
- }
+ ps->ps_increasing = 0;
+ if (rc == 0) {
+ list_add_tail(&pool->po_list, &ps->ps_pool_list);
+ } else {
+ ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ CERROR("Can't allocate new %s pool because out of memory\n",
+ ps->ps_name);
+ }
spin_unlock(&ps->ps_lock);
goto again;
}
-void
-kiblnd_pmr_pool_unmap(kib_phys_mr_t *pmr)
-{
- kib_pmr_pool_t *ppo = pmr->pmr_pool;
- struct ib_mr *mr = pmr->pmr_mr;
-
- pmr->pmr_mr = NULL;
- kiblnd_pool_free_node(&ppo->ppo_pool, &pmr->pmr_list);
- if (mr != NULL)
- ib_dereg_mr(mr);
-}
-
-int
-kiblnd_pmr_pool_map(kib_pmr_poolset_t *pps, kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr)
-{
- kib_phys_mr_t *pmr;
- cfs_list_t *node;
- int rc;
- int i;
-
- node = kiblnd_pool_alloc_node(&pps->pps_poolset);
- if (node == NULL) {
- CERROR("Failed to allocate PMR descriptor\n");
- return -ENOMEM;
- }
-
- pmr = container_of(node, kib_phys_mr_t, pmr_list);
- if (pmr->pmr_pool->ppo_hdev != hdev) {
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
- return -EAGAIN;
- }
-
- for (i = 0; i < rd->rd_nfrags; i ++) {
- pmr->pmr_ipb[i].addr = rd->rd_frags[i].rf_addr;
- pmr->pmr_ipb[i].size = rd->rd_frags[i].rf_nob;
- }
-
- pmr->pmr_mr = ib_reg_phys_mr(hdev->ibh_pd,
- pmr->pmr_ipb, rd->rd_nfrags,
- IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE,
- iova);
- if (!IS_ERR(pmr->pmr_mr)) {
- pmr->pmr_iova = *iova;
- *pp_pmr = pmr;
- return 0;
- }
-
- rc = PTR_ERR(pmr->pmr_mr);
- CERROR("Failed ib_reg_phys_mr: %d\n", rc);
-
- pmr->pmr_mr = NULL;
- kiblnd_pool_free_node(&pmr->pmr_pool->ppo_pool, node);
-
- return rc;
-}
-
-static void
-kiblnd_destroy_pmr_pool(kib_pool_t *pool)
-{
- kib_pmr_pool_t *ppo = container_of(pool, kib_pmr_pool_t, ppo_pool);
- kib_phys_mr_t *pmr;
-
- LASSERT (pool->po_allocated == 0);
-
- while (!cfs_list_empty(&pool->po_free_list)) {
- pmr = cfs_list_entry(pool->po_free_list.next,
- kib_phys_mr_t, pmr_list);
-
- LASSERT (pmr->pmr_mr == NULL);
- cfs_list_del(&pmr->pmr_list);
-
- if (pmr->pmr_ipb != NULL) {
- LIBCFS_FREE(pmr->pmr_ipb,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(struct ib_phys_buf));
- }
-
- LIBCFS_FREE(pmr, sizeof(kib_phys_mr_t));
- }
-
- kiblnd_fini_pool(pool);
- if (ppo->ppo_hdev != NULL)
- kiblnd_hdev_decref(ppo->ppo_hdev);
-
- LIBCFS_FREE(ppo, sizeof(kib_pmr_pool_t));
-}
-
-static inline int kiblnd_pmr_pool_size(int ncpts)
-{
- int size = *kiblnd_tunables.kib_pmr_pool_size / ncpts;
-
- return max(IBLND_PMR_POOL, size);
-}
-
-static int
-kiblnd_create_pmr_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
-{
- struct kib_pmr_pool *ppo;
- struct kib_pool *pool;
- kib_phys_mr_t *pmr;
- int i;
-
- LIBCFS_CPT_ALLOC(ppo, lnet_cpt_table(),
- ps->ps_cpt, sizeof(kib_pmr_pool_t));
- if (ppo == NULL) {
- CERROR("Failed to allocate PMR pool\n");
- return -ENOMEM;
- }
-
- pool = &ppo->ppo_pool;
- kiblnd_init_pool(ps, pool, size);
-
- for (i = 0; i < size; i++) {
- LIBCFS_CPT_ALLOC(pmr, lnet_cpt_table(),
- ps->ps_cpt, sizeof(kib_phys_mr_t));
- if (pmr == NULL)
- break;
-
- pmr->pmr_pool = ppo;
- LIBCFS_CPT_ALLOC(pmr->pmr_ipb, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*pmr->pmr_ipb));
- if (pmr->pmr_ipb == NULL)
- break;
-
- cfs_list_add(&pmr->pmr_list, &pool->po_free_list);
- }
-
- if (i < size) {
- ps->ps_pool_destroy(pool);
- return -ENOMEM;
- }
-
- ppo->ppo_hdev = kiblnd_current_hdev(ps->ps_net->ibn_dev);
- *pp_po = pool;
- return 0;
-}
-
static void
kiblnd_destroy_tx_pool(kib_pool_t *pool)
{
for (i = 0; i < pool->po_size; i++) {
kib_tx_t *tx = &tpo->tpo_tx_descs[i];
- cfs_list_del(&tx->tx_list);
+ list_del(&tx->tx_list);
if (tx->tx_pages != NULL)
LIBCFS_FREE(tx->tx_pages,
LNET_MAX_IOV *
}
static void
-kiblnd_tx_init(kib_pool_t *pool, cfs_list_t *node)
+kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = cfs_list_entry(node, kib_tx_t, tx_list);
+ kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
+ tps_poolset);
+ kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
- tx->tx_cookie = tps->tps_next_tx_cookie ++;
+ tx->tx_cookie = tps->tps_next_tx_cookie++;
}
-void
+static void
kiblnd_net_fini_pools(kib_net_t *net)
{
int i;
cfs_cpt_for_each(i, lnet_cpt_table()) {
kib_tx_poolset_t *tps;
kib_fmr_poolset_t *fps;
- kib_pmr_poolset_t *pps;
if (net->ibn_tx_ps != NULL) {
tps = net->ibn_tx_ps[i];
fps = net->ibn_fmr_ps[i];
kiblnd_fini_fmr_poolset(fps);
}
-
- if (net->ibn_pmr_ps != NULL) {
- pps = net->ibn_pmr_ps[i];
- kiblnd_fini_poolset(&pps->pps_poolset);
- }
}
if (net->ibn_tx_ps != NULL) {
cfs_percpt_free(net->ibn_fmr_ps);
net->ibn_fmr_ps = NULL;
}
-
- if (net->ibn_pmr_ps != NULL) {
- cfs_percpt_free(net->ibn_pmr_ps);
- net->ibn_pmr_ps = NULL;
- }
}
-int
-kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+static int
+kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts, int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
unsigned long flags;
int cpt;
int rc;
int i;
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0 &&
- net->ibn_dev->ibd_hdev->ibh_nmrs == 1) {
+ if (tunables->lnd_map_on_demand == 0) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
goto create_tx_pool;
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
}
- /* TX pool must be created later than FMR/PMR, see LU-2268
+ /* TX pool must be created later than FMR, see LU-2268
* for details */
LASSERT(net->ibn_tx_ps == NULL);
/* premapping can fail if ibd_nmr > 1, so we always create
- * FMR/PMR pool and map-on-demand if premapping failed */
+ * FMR pool and map-on-demand if premapping failed */
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
for (i = 0; i < ncpts; i++) {
cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
- if (rc == -ENOSYS && i == 0) /* no FMR */
- break; /* create PMR pool */
-
- if (rc != 0) { /* a real error */
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
+ if (rc != 0) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
goto failed;
}
}
- if (i > 0) {
+ if (i > 0)
LASSERT(i == ncpts);
- goto create_tx_pool;
- }
-
- cfs_percpt_free(net->ibn_fmr_ps);
- net->ibn_fmr_ps = NULL;
-
- CWARN("Device does not support FMR, failing back to PMR\n");
-
- if (*kiblnd_tunables.kib_pmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
- CERROR("Can't set pmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_pmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
- rc = -EINVAL;
- goto failed;
- }
-
- net->ibn_pmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_pmr_poolset_t));
- if (net->ibn_pmr_ps == NULL) {
- CERROR("Failed to allocate PMR pool array\n");
- rc = -ENOMEM;
- goto failed;
- }
-
- for (i = 0; i < ncpts; i++) {
- cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_poolset(&net->ibn_pmr_ps[cpt]->pps_poolset,
- cpt, net, "PMR",
- kiblnd_pmr_pool_size(ncpts),
- kiblnd_create_pmr_pool,
- kiblnd_destroy_pmr_pool, NULL, NULL);
- if (rc != 0) {
- CERROR("Can't initialize PMR pool for CPT %d: %d\n",
- cpt, rc);
- goto failed;
- }
- }
create_tx_pool:
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
return 0;
}
- for (hdev->ibh_mr_shift = 0;
- hdev->ibh_mr_shift < 64; hdev->ibh_mr_shift ++) {
- if (hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) ||
- hdev->ibh_mr_size == (1ULL << hdev->ibh_mr_shift) - 1)
- return 0;
- }
-
CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
return -EINVAL;
}
-void
+static void
kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
- int i;
-
- if (hdev->ibh_nmrs == 0 || hdev->ibh_mrs == NULL)
- return;
-
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- if (hdev->ibh_mrs[i] == NULL)
- break;
+ if (hdev->ibh_mrs == NULL)
+ return;
- ib_dereg_mr(hdev->ibh_mrs[i]);
- }
+ ib_dereg_mr(hdev->ibh_mrs);
- LIBCFS_FREE(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- hdev->ibh_mrs = NULL;
- hdev->ibh_nmrs = 0;
+ hdev->ibh_mrs = NULL;
}
void
LIBCFS_FREE(hdev, sizeof(*hdev));
}
-int
+static int
kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
- struct ib_mr *mr;
- int i;
- int rc;
- __u64 mm_size;
- __u64 mr_size;
- int acflags = IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE;
-
- rc = kiblnd_hdev_get_attr(hdev);
- if (rc != 0)
- return rc;
-
- if (hdev->ibh_mr_shift == 64) {
- LIBCFS_ALLOC(hdev->ibh_mrs, 1 * sizeof(*hdev->ibh_mrs));
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs table\n");
- return -ENOMEM;
- }
+ struct ib_mr *mr;
+ int rc;
+ int acflags = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE;
- hdev->ibh_mrs[0] = NULL;
- hdev->ibh_nmrs = 1;
-
- mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_get_dma_mr : %ld\n", PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
-
- hdev->ibh_mrs[0] = mr;
-
- goto out;
- }
-
- mr_size = (1ULL << hdev->ibh_mr_shift);
- mm_size = (unsigned long)high_memory - PAGE_OFFSET;
-
- hdev->ibh_nmrs = (int)((mm_size + mr_size - 1) >> hdev->ibh_mr_shift);
-
- if (hdev->ibh_mr_shift < 32 || hdev->ibh_nmrs > 1024) {
- /* it's 4T..., assume we will re-code at that time */
- CERROR("Can't support memory size: x"LPX64
- " with MR size: x"LPX64"\n", mm_size, mr_size);
- return -EINVAL;
- }
-
- /* create an array of MRs to cover all memory */
- LIBCFS_ALLOC(hdev->ibh_mrs, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
- if (hdev->ibh_mrs == NULL) {
- CERROR("Failed to allocate MRs' table\n");
- return -ENOMEM;
- }
-
- memset(hdev->ibh_mrs, 0, sizeof(*hdev->ibh_mrs) * hdev->ibh_nmrs);
-
- for (i = 0; i < hdev->ibh_nmrs; i++) {
- struct ib_phys_buf ipb;
- __u64 iova;
-
- ipb.size = hdev->ibh_mr_size;
- ipb.addr = i * mr_size;
- iova = ipb.addr;
-
- mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova);
- if (IS_ERR(mr)) {
- CERROR("Failed ib_reg_phys_mr addr "LPX64
- " size "LPX64" : %ld\n",
- ipb.addr, ipb.size, PTR_ERR(mr));
- kiblnd_hdev_cleanup_mrs(hdev);
- return PTR_ERR(mr);
- }
+ rc = kiblnd_hdev_get_attr(hdev);
+ if (rc != 0)
+ return rc;
- LASSERT (iova == ipb.addr);
+ mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
+ if (IS_ERR(mr)) {
+ CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr));
+ kiblnd_hdev_cleanup_mrs(hdev);
+ return PTR_ERR(mr);
+ }
- hdev->ibh_mrs[i] = mr;
- }
+ hdev->ibh_mrs = mr;
-out:
- if (hdev->ibh_mr_size != ~0ULL || hdev->ibh_nmrs != 1)
- LCONSOLE_INFO("Register global MR array, MR size: "
- LPX64", array size: %d\n",
- hdev->ibh_mr_size, hdev->ibh_nmrs);
- return 0;
+ return 0;
}
static int
rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
(struct sockaddr *)&dstaddr, 1);
if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%u.%u.%u.%u to device(%p): %d\n",
- dev->ibd_ifname, HIPQUAD(dev->ibd_ifip),
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
rdma_destroy_id(cmid);
return rc;
}
- if (dev->ibd_hdev->ibh_ibdev == cmid->device) {
- /* don't need device failover */
- rdma_destroy_id(cmid);
- return 0;
- }
-
- return 1;
+ rc = dev->ibd_hdev->ibh_ibdev != cmid->device; /* true for failover */
+ rdma_destroy_id(cmid);
+ return rc;
}
int
kiblnd_dev_failover(kib_dev_t *dev)
{
- CFS_LIST_HEAD (zombie_tpo);
- CFS_LIST_HEAD (zombie_ppo);
- CFS_LIST_HEAD (zombie_fpo);
+ struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
+ struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
+ struct list_head zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
kib_hca_dev_t *hdev = NULL;
kib_hca_dev_t *old;
/* Bind to failover device or port */
rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%u.%u.%u.%u to device(%p): %d\n",
- dev->ibd_ifname, HIPQUAD(dev->ibd_ifip),
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
rdma_destroy_id(cmid);
goto out;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- old = dev->ibd_hdev;
- dev->ibd_hdev = hdev; /* take over the refcount */
- hdev = old;
+ old = dev->ibd_hdev;
+ dev->ibd_hdev = hdev; /* take over the refcount */
+ hdev = old;
- cfs_list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
+ list_for_each_entry(net, &dev->ibd_nets, ibn_list) {
cfs_cpt_for_each(i, lnet_cpt_table()) {
kiblnd_fail_poolset(&net->ibn_tx_ps[i]->tps_poolset,
&zombie_tpo);
- if (net->ibn_fmr_ps != NULL) {
+ if (net->ibn_fmr_ps != NULL)
kiblnd_fail_fmr_poolset(net->ibn_fmr_ps[i],
&zombie_fpo);
-
- } else if (net->ibn_pmr_ps != NULL) {
- kiblnd_fail_poolset(&net->ibn_pmr_ps[i]->
- pps_poolset, &zombie_ppo);
- }
}
}
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
out:
- if (!cfs_list_empty(&zombie_tpo))
- kiblnd_destroy_pool_list(&zombie_tpo);
- if (!cfs_list_empty(&zombie_ppo))
- kiblnd_destroy_pool_list(&zombie_ppo);
- if (!cfs_list_empty(&zombie_fpo))
- kiblnd_destroy_fmr_pool_list(&zombie_fpo);
- if (hdev != NULL)
- kiblnd_hdev_decref(hdev);
-
- if (rc != 0)
- dev->ibd_failed_failover++;
- else
- dev->ibd_failed_failover = 0;
+ if (!list_empty(&zombie_tpo))
+ kiblnd_destroy_pool_list(&zombie_tpo);
+ if (!list_empty(&zombie_ppo))
+ kiblnd_destroy_pool_list(&zombie_ppo);
+ if (!list_empty(&zombie_fpo))
+ kiblnd_destroy_fmr_pool_list(&zombie_fpo);
+ if (hdev != NULL)
+ kiblnd_hdev_decref(hdev);
- return rc;
+ if (rc != 0)
+ dev->ibd_failed_failover++;
+ else
+ dev->ibd_failed_failover = 0;
+
+ return rc;
}
void
kiblnd_destroy_dev (kib_dev_t *dev)
{
LASSERT (dev->ibd_nnets == 0);
- LASSERT (cfs_list_empty(&dev->ibd_nets));
+ LASSERT(list_empty(&dev->ibd_nets));
- cfs_list_del(&dev->ibd_fail_list);
- cfs_list_del(&dev->ibd_list);
+ list_del(&dev->ibd_fail_list);
+ list_del(&dev->ibd_list);
if (dev->ibd_hdev != NULL)
kiblnd_hdev_decref(dev->ibd_hdev);
LIBCFS_FREE(dev, sizeof(*dev));
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_create_dev(char *ifname)
{
struct net_device *netdev;
int up;
int rc;
- rc = libcfs_ipif_query(ifname, &up, &ip, &netmask);
+ rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
if (rc != 0) {
CERROR("Can't query IPoIB interface %s: %d\n",
ifname, rc);
if (dev == NULL)
return NULL;
- memset(dev, 0, sizeof(*dev));
-#ifdef HAVE_DEV_GET_BY_NAME_2ARG
netdev = dev_get_by_name(&init_net, ifname);
-#else
- netdev = dev_get_by_name(ifname);
-#endif
if (netdev == NULL) {
dev->ibd_can_failover = 0;
} else {
dev_put(netdev);
}
- CFS_INIT_LIST_HEAD(&dev->ibd_nets);
- CFS_INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- CFS_INIT_LIST_HEAD(&dev->ibd_fail_list);
+ INIT_LIST_HEAD(&dev->ibd_nets);
+ INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&dev->ibd_fail_list);
dev->ibd_ifip = ip;
strcpy(&dev->ibd_ifname[0], ifname);
return NULL;
}
- cfs_list_add_tail(&dev->ibd_list,
+ list_add_tail(&dev->ibd_list,
&kiblnd_data.kib_devs);
return dev;
}
-void
+static void
kiblnd_base_shutdown(void)
{
struct kib_sched_info *sched;
int i;
- LASSERT (cfs_list_empty(&kiblnd_data.kib_devs));
+ LASSERT(list_empty(&kiblnd_data.kib_devs));
CDEBUG(D_MALLOC, "before LND base cleanup: kmem %d\n",
atomic_read(&libcfs_kmemory));
case IBLND_INIT_DATA:
LASSERT (kiblnd_data.kib_peers != NULL);
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
- LASSERT (cfs_list_empty(&kiblnd_data.kib_peers[i]));
+ LASSERT(list_empty(&kiblnd_data.kib_peers[i]));
}
- LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_zombies));
- LASSERT (cfs_list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_zombies));
+ LASSERT(list_empty(&kiblnd_data.kib_connd_conns));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_list));
+ LASSERT(list_empty(&kiblnd_data.kib_reconn_wait));
/* flag threads to terminate; wake and wait for them to die */
kiblnd_data.kib_shutdown = 1;
i = 2;
while (atomic_read(&kiblnd_data.kib_nthreads) != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
- "Waiting for %d threads to terminate\n",
+ i++;
+ /* power of 2? */
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- cfs_pause(cfs_time_seconds(1));
- }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
/* fall through */
if (kiblnd_data.kib_peers != NULL) {
LIBCFS_FREE(kiblnd_data.kib_peers,
- sizeof(cfs_list_t) *
+ sizeof(struct list_head) *
kiblnd_data.kib_peer_hash_size);
}
module_put(THIS_MODULE);
}
-void
+static void
kiblnd_shutdown (lnet_ni_t *ni)
{
kib_net_t *net = ni->ni_data;
/* nuke all existing peers within this net */
kiblnd_del_peer(ni, LNET_NID_ANY);
- /* Wait for all peer state to clean up */
- i = 2;
+ /* Wait for all peer state to clean up */
+ i = 2;
while (atomic_read(&net->ibn_npeers) != 0) {
- i++;
- CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* 2**n? */
- "%s: waiting for %d peers to disconnect\n",
- libcfs_nid2str(ni->ni_nid),
+ i++;
+ /* power of 2? */
+ CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
+ "%s: waiting for %d peers to disconnect\n",
+ libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- cfs_pause(cfs_time_seconds(1));
- }
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(cfs_time_seconds(1));
+ }
kiblnd_net_fini_pools(net);
write_lock_irqsave(g_lock, flags);
LASSERT(net->ibn_dev->ibd_nnets > 0);
net->ibn_dev->ibd_nnets--;
- cfs_list_del(&net->ibn_list);
+ list_del(&net->ibn_list);
write_unlock_irqrestore(g_lock, flags);
/* fall through */
LIBCFS_FREE(net, sizeof(*net));
out:
- if (cfs_list_empty(&kiblnd_data.kib_devs))
+ if (list_empty(&kiblnd_data.kib_devs))
kiblnd_base_shutdown();
return;
}
-int
+static int
kiblnd_base_startup(void)
{
struct kib_sched_info *sched;
int rc;
int i;
- LASSERT (kiblnd_data.kib_init == IBLND_INIT_NOTHING);
+ LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
try_module_get(THIS_MODULE);
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
rwlock_init(&kiblnd_data.kib_global_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_devs);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
+ INIT_LIST_HEAD(&kiblnd_data.kib_devs);
+ INIT_LIST_HEAD(&kiblnd_data.kib_failed_devs);
- kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
- LIBCFS_ALLOC(kiblnd_data.kib_peers,
- sizeof(cfs_list_t) *
- kiblnd_data.kib_peer_hash_size);
- if (kiblnd_data.kib_peers == NULL) {
- goto failed;
- }
- for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
+ kiblnd_data.kib_peer_hash_size = IBLND_PEER_HASH_SIZE;
+ LIBCFS_ALLOC(kiblnd_data.kib_peers,
+ sizeof(struct list_head) *
+ kiblnd_data.kib_peer_hash_size);
+ if (kiblnd_data.kib_peers == NULL)
+ goto failed;
+
+ for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++)
+ INIT_LIST_HEAD(&kiblnd_data.kib_peers[i]);
spin_lock_init(&kiblnd_data.kib_connd_lock);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
- CFS_INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_conns);
+ INIT_LIST_HEAD(&kiblnd_data.kib_connd_zombies);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_list);
+ INIT_LIST_HEAD(&kiblnd_data.kib_reconn_wait);
+
init_waitqueue_head(&kiblnd_data.kib_connd_waitq);
init_waitqueue_head(&kiblnd_data.kib_failover_waitq);
int nthrs;
spin_lock_init(&sched->ibs_lock);
- CFS_INIT_LIST_HEAD(&sched->ibs_conns);
+ INIT_LIST_HEAD(&sched->ibs_conns);
init_waitqueue_head(&sched->ibs_waitq);
nthrs = cfs_cpt_weight(lnet_cpt_table(), i);
return -ENETDOWN;
}
-int
+static int
kiblnd_start_schedulers(struct kib_sched_info *sched)
{
int rc = 0;
return rc;
}
-int
+static int
kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
{
int cpt;
return 0;
}
-kib_dev_t *
+static kib_dev_t *
kiblnd_dev_search(char *ifname)
{
kib_dev_t *alias = NULL;
char *colon2;
colon = strchr(ifname, ':');
- cfs_list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
+ list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
return dev;
return alias;
}
-int
+static int
kiblnd_startup (lnet_ni_t *ni)
{
char *ifname;
if (net == NULL)
goto failed;
- memset(net, 0, sizeof(*net));
-
do_gettimeofday(&tv);
net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ kiblnd_tunables_setup(ni);
if (ni->ni_interfaces[0] != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
if (rc != 0)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
- cfs_list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
net->ibn_init = IBLND_INIT_ALL;
return -ENETDOWN;
}
-void __exit
-kiblnd_module_fini (void)
+static lnd_t the_o2iblnd = {
+ .lnd_type = O2IBLND,
+ .lnd_startup = kiblnd_startup,
+ .lnd_shutdown = kiblnd_shutdown,
+ .lnd_ctl = kiblnd_ctl,
+ .lnd_query = kiblnd_query,
+ .lnd_send = kiblnd_send,
+ .lnd_recv = kiblnd_recv,
+};
+
+static void __exit ko2iblnd_exit(void)
{
- lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
+ lnet_unregister_lnd(&the_o2iblnd);
+ kiblnd_tunables_fini();
}
-int __init
-kiblnd_module_init (void)
+static int __init ko2iblnd_init(void)
{
- int rc;
+ int rc;
- CLASSERT (sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT (offsetof(kib_msg_t, ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
- CLASSERT (offsetof(kib_msg_t, ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
+ CLASSERT(offsetof(kib_msg_t,
+ ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
+ IBLND_MSG_SIZE);
+ CLASSERT(offsetof(kib_msg_t,
+ ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
+ <= IBLND_MSG_SIZE);
- rc = kiblnd_tunables_init();
- if (rc != 0)
- return rc;
+ rc = kiblnd_tunables_init();
+ if (rc != 0)
+ return rc;
- lnet_register_lnd(&the_o2iblnd);
+ lnet_register_lnd(&the_o2iblnd);
- return 0;
+ return 0;
}
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Kernel OpenIB gen2 LND v2.00");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("OpenIB gen2 LNet Network Driver");
+MODULE_VERSION("2.8.0");
MODULE_LICENSE("GPL");
-module_init(kiblnd_module_init);
-module_exit(kiblnd_module_fini);
+module_init(ko2iblnd_init);
+module_exit(ko2iblnd_exit);