*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#include <asm/page.h>
+#include <linux/inetdevice.h>
+
#include "o2iblnd.h"
-static lnd_t the_o2iblnd;
+static const struct lnet_lnd the_o2iblnd;
-kib_data_t kiblnd_data;
+struct kib_data kiblnd_data;
static __u32
kiblnd_cksum (void *ptr, int nob)
static int
kiblnd_msgtype2size(int type)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
switch (type) {
case IBLND_MSG_CONNREQ:
case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(kib_connparams_t);
+ return hdr_size + sizeof(struct kib_connparams);
case IBLND_MSG_NOOP:
return hdr_size;
case IBLND_MSG_IMMEDIATE:
- return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+ return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(kib_putreq_msg_t);
+ return hdr_size + sizeof(struct kib_putreq_msg);
case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(kib_putack_msg_t);
+ return hdr_size + sizeof(struct kib_putack_msg);
case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(kib_get_msg_t);
+ return hdr_size + sizeof(struct kib_get_msg);
case IBLND_MSG_PUT_NAK:
case IBLND_MSG_PUT_DONE:
case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(kib_completion_msg_t);
+ return hdr_size + sizeof(struct kib_completion_msg);
default:
return -1;
}
}
-static int
-kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
{
- kib_rdma_desc_t *rd;
+ struct kib_rdma_desc *rd;
int nob;
int n;
int i;
return 1;
}
- nob = offsetof (kib_msg_t, ibm_u) +
+ nob = offsetof(struct kib_msg, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
if (msg->ibm_nob < nob) {
return 0;
}
-void
-kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp)
+void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
+ int credits, lnet_nid_t dstnid, __u64 dststamp)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
/* CAVEAT EMPTOR! all message fields not set here should have been
* initialised previously. */
}
}
-int
-kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
__u32 msg_cksum;
__u16 version;
int msg_nob;
msg->ibm_cksum = msg_cksum;
if (flip) {
- /* leave magic unflipped as a clue to peer endianness */
+ /* leave magic unflipped as a clue to peer_ni endianness */
msg->ibm_version = version;
- CLASSERT (sizeof(msg->ibm_type) == 1);
- CLASSERT (sizeof(msg->ibm_credits) == 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_type) != 1);
+ BUILD_BUG_ON(sizeof(msg->ibm_credits) != 1);
msg->ibm_nob = msg_nob;
__swab64s(&msg->ibm_srcnid);
__swab64s(&msg->ibm_srcstamp);
}
int
-kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
+ lnet_nid_t nid)
{
- kib_peer_t *peer;
- kib_net_t *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid);
- unsigned long flags;
+ struct kib_peer_ni *peer_ni;
+ struct kib_net *net = ni->ni_data;
+ int cpt = lnet_cpt_of_nid(nid, ni);
+ unsigned long flags;
LASSERT(net != NULL);
LASSERT(nid != LNET_NID_ANY);
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
- CERROR("Cannot allocate peer\n");
+ LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
+ if (peer_ni == NULL) {
+ CERROR("Cannot allocate peer_ni\n");
return -ENOMEM;
}
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ peer_ni->ibp_ni = ni;
+ peer_ni->ibp_nid = nid;
+ peer_ni->ibp_error = 0;
+ peer_ni->ibp_last_alive = 0;
+ peer_ni->ibp_max_frags = IBLND_MAX_RDMA_FRAGS;
+ peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
+ atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */
+ INIT_LIST_HEAD(&peer_ni->ibp_conns);
+ INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- *peerp = peer;
+ *peerp = peer_ni;
return 0;
}
void
-kiblnd_destroy_peer (kib_peer_t *peer)
+kiblnd_destroy_peer(struct kib_peer_ni *peer_ni)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
LASSERT(net != NULL);
- LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT(!kiblnd_peer_active(peer));
- LASSERT(kiblnd_peer_idle(peer));
- LASSERT(list_empty(&peer->ibp_tx_queue));
+ LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
+ LASSERT(!kiblnd_peer_active(peer_ni));
+ LASSERT(kiblnd_peer_idle(peer_ni));
+ LASSERT(list_empty(&peer_ni->ibp_tx_queue));
- LIBCFS_FREE(peer, sizeof(*peer));
+ LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- /* NB a peer's connections keep a reference on their peer until
+ /* NB a peer_ni's connections keep a reference on their peer_ni until
* they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
+ * with this peer_ni has been cleaned up when its refcount drops to
* zero. */
atomic_dec(&net->ibn_npeers);
}
-kib_peer_t *
-kiblnd_find_peer_locked (lnet_nid_t nid)
+struct kib_peer_ni *
+kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
{
/* the caller is responsible for accounting the additional reference
* that this creates */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_t *peer;
+ struct kib_peer_ni *peer_ni;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_nid != nid)
+ peer_ni = list_entry(tmp, struct kib_peer_ni, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
+
+ /*
+ * Match a peer if its NID and the NID of the local NI it
+ * communicates over are the same. Otherwise don't match
+ * the peer, which will result in a new lnd peer being
+ * created.
+ */
+ if (peer_ni->ibp_nid != nid ||
+ peer_ni->ibp_ni->ni_nid != ni->ni_nid)
continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
+ CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
+ peer_ni, libcfs_nid2str(nid),
+ atomic_read(&peer_ni->ibp_refcount),
+ peer_ni->ibp_version);
+ return peer_ni;
}
return NULL;
}
void
-kiblnd_unlink_peer_locked (kib_peer_t *peer)
+kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni)
{
- LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- LASSERT (kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
+ LASSERT (kiblnd_peer_active(peer_ni));
+ list_del_init(&peer_ni->ibp_list);
/* lose peerlist's ref */
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
}
static int
-kiblnd_get_peer_info(lnet_ni_t *ni, int index,
+kiblnd_get_peer_info(struct lnet_ni *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
int i;
unsigned long flags;
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
if (index-- > 0)
continue;
- *nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
+ *nidp = peer_ni->ibp_nid;
+ *count = atomic_read(&peer_ni->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
}
static void
-kiblnd_del_peer_locked (kib_peer_t *peer)
+kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni)
{
- struct list_head *ctmp;
- struct list_head *cnxt;
- kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ struct kib_conn *conn;
- if (list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
+ if (list_empty(&peer_ni->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer_ni);
} else {
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
- /* NB closing peer's last conn unlinked it. */
+ /* NB closing peer_ni's last conn unlinked it. */
}
- /* NB peer now unlinked; might even be freed if the peer table had the
+ /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the
* last ref on it. */
}
static int
-kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
+kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ LIST_HEAD(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_t *peer;
+ struct kib_peer_ni *peer_ni;
int lo;
int hi;
int i;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid))
continue;
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT(list_empty(&peer->ibp_conns));
+ if (!list_empty(&peer_ni->ibp_tx_queue)) {
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- list_splice_init(&peer->ibp_tx_queue,
+ list_splice_init(&peer_ni->ibp_tx_queue,
&zombies);
}
- kiblnd_del_peer_locked(peer);
+ kiblnd_del_peer_locked(peer_ni);
rc = 0; /* matched something */
}
}
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &zombies, -EIO);
+ kiblnd_txlist_done(&zombies, -EIO, LNET_MSG_STATUS_LOCAL_ERROR);
return rc;
}
-static kib_conn_t *
-kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+static struct kib_conn *
+kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
{
- kib_peer_t *peer;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
int i;
unsigned long flags;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- list_for_each(ctmp, &peer->ibp_conns) {
+ list_for_each(ctmp, &peer_ni->ibp_conns) {
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
}
static void
-kiblnd_debug_rx (kib_rx_t *rx)
+kiblnd_debug_rx(struct kib_rx *rx)
{
- CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
- rx, rx->rx_status, rx->rx_msg->ibm_type,
- rx->rx_msg->ibm_credits);
+ CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n",
+ rx, rx->rx_msg->ibm_type,
+ rx->rx_msg->ibm_credits);
}
static void
-kiblnd_debug_tx (kib_tx_t *tx)
+kiblnd_debug_tx(struct kib_tx *tx)
{
- CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
- "cookie "LPX64" msg %s%s type %x cred %d\n",
+ CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld "
+ "cookie %#llx msg %s%s type %x cred %d\n",
tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
- tx->tx_status, tx->tx_deadline, tx->tx_cookie,
+ tx->tx_status, ktime_to_ns(tx->tx_deadline), tx->tx_cookie,
tx->tx_lntmsg[0] == NULL ? "-" : "!",
tx->tx_lntmsg[1] == NULL ? "-" : "!",
tx->tx_msg->ibm_type, tx->tx_msg->ibm_credits);
}
void
-kiblnd_debug_conn (kib_conn_t *conn)
+kiblnd_debug_conn(struct kib_conn *conn)
{
struct list_head *tmp;
int i;
CDEBUG(D_CONSOLE, " early_rxs:\n");
list_for_each(tmp, &conn->ibc_early_rxs)
- kiblnd_debug_rx(list_entry(tmp, kib_rx_t, rx_list));
+ kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list));
CDEBUG(D_CONSOLE, " tx_noops:\n");
list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue:\n");
list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " active_txs:\n");
list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " rxs:\n");
for (i = 0; i < IBLND_RX_MSGS(conn); i++)
}
static int
-kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
- cpumask_t *mask;
+ cpumask_var_t *mask;
int vectors;
int off;
int i;
+ lnet_nid_t ibp_nid;
vectors = conn->ibc_cmid->device->num_comp_vectors;
if (vectors <= 1)
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
/* hash NID to CPU id in this partition... */
- off = conn->ibc_peer->ibp_nid % cpumask_weight(mask);
- for_each_cpu(i, mask) {
+ ibp_nid = conn->ibc_peer->ibp_nid;
+ off = do_div(ibp_nid, cpumask_weight(*mask));
+ for_each_cpu(i, *mask) {
if (off-- == 0)
return i % vectors;
}
return 1;
}
-kib_conn_t *
-kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+/*
+ * Get the scheduler bound to this CPT. If the scheduler has no
+ * threads, which means that the CPT has no CPUs, then grab the
+ * next scheduler that we can use.
+ *
+ * This case would be triggered if a NUMA node is configured with
+ * no associated CPUs.
+ */
+static struct kib_sched_info *
+kiblnd_get_scheduler(int cpt)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (sched->ibs_nthreads > 0)
+ return sched;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ if (sched->ibs_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, sched->ibs_cpt);
+ return sched;
+ }
+ }
+
+ return NULL;
+}
+
+static unsigned int kiblnd_send_wrs(struct kib_conn *conn)
+{
+ /*
+ * One WR for the LNet message
+ * And ibc_max_frags for the transfer WRs
+ */
+ int ret;
+ int multiplier = 1 + conn->ibc_max_frags;
+ enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
+
+ /* FastReg needs two extra WRs for map and invalidate */
+ if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
+ multiplier += 2;
+
+ /* account for a maximum of ibc_queue_depth in-flight transfers */
+ ret = multiplier * conn->ibc_queue_depth;
+
+ if (ret > conn->ibc_hdev->ibh_max_qp_wr) {
+ CDEBUG(D_NET, "peer_credits %u will result in send work "
+ "request size %d larger than maximum %d device "
+ "can handle\n", conn->ibc_queue_depth, ret,
+ conn->ibc_hdev->ibh_max_qp_wr);
+ conn->ibc_queue_depth =
+ conn->ibc_hdev->ibh_max_qp_wr / multiplier;
+ }
+
+ /* don't go beyond the maximum the device can handle */
+ return min(ret, conn->ibc_hdev->ibh_max_qp_wr);
+}
+
+struct kib_conn *
+kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
{
/* CAVEAT EMPTOR:
* If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * ref on 'peer_ni'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer_ni' remains and
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
- kib_dev_t *dev;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
#ifdef HAVE_IB_CQ_INIT_ATTR
struct ib_cq_init_attr cq_attr = {};
#endif
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct ib_cq *cq;
unsigned long flags;
int cpt;
dev = net->ibn_dev;
- cpt = lnet_cpt_of_nid(peer->ibp_nid);
- sched = kiblnd_data.kib_scheds[cpt];
+ cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
+ sched = kiblnd_get_scheduler(cpt);
+
+ if (sched == NULL) {
+ CERROR("no schedulers available. node is unhealthy\n");
+ goto failed_0;
+ }
- LASSERT(sched->ibs_nthreads > 0);
+ /*
+ * The cpt might have changed if we ended up selecting a non cpt
+ * native scheduler. So use the scheduler's cpt instead.
+ */
+ cpt = sched->ibs_cpt;
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
if (init_qp_attr == NULL) {
CERROR("Can't allocate qp_attr for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_0;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
if (conn == NULL) {
CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_1;
}
conn->ibc_state = IBLND_CONN_INIT;
conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
+ conn->ibc_peer = peer_ni; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
- conn->ibc_max_frags = peer->ibp_max_frags;
- conn->ibc_queue_depth = peer->ibp_queue_depth;
+ conn->ibc_max_frags = peer_ni->ibp_max_frags;
+ conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
+ conn->ibc_rxs = NULL;
+ conn->ibc_rx_pages = NULL;
INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
INIT_LIST_HEAD(&conn->ibc_active_txs);
+ INIT_LIST_HEAD(&conn->ibc_zombie_txs);
spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
write_unlock_irqrestore(glock, flags);
- LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
- if (conn->ibc_rxs == NULL) {
- CERROR("Cannot allocate RX buffers\n");
- goto failed_2;
- }
-
- rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(conn));
- if (rc != 0)
- goto failed_2;
-
- kiblnd_map_rx_descs(conn);
-
#ifdef HAVE_IB_CQ_INIT_ATTR
cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
kiblnd_get_completion_vector(conn, cpt));
#endif
if (IS_ERR(cq)) {
+ /*
+ * on MLX-5 (possibly MLX-4 as well) this error could be
+ * hit if the concurrent_sends and/or peer_tx_credits is set
+ * too high. Or due to an MLX-5 bug which tries to
+ * allocate 256kb via kmalloc for WR cookie array
+ */
CERROR("Failed to create CQ with %d CQEs: %ld\n",
IBLND_CQ_ENTRIES(conn), PTR_ERR(cq));
goto failed_2;
goto failed_2;
}
- init_qp_attr->event_handler = kiblnd_qp_event;
- init_qp_attr->qp_context = conn;
- init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
+ init_qp_attr->event_handler = kiblnd_qp_event;
+ init_qp_attr->qp_context = conn;
+ init_qp_attr->cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge;
+ init_qp_attr->cap.max_recv_sge = 1;
+ init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_qp_attr->qp_type = IB_QPT_RC;
+ init_qp_attr->send_cq = cq;
+ init_qp_attr->recv_cq = cq;
+ /*
+ * kiblnd_send_wrs() can change the connection's queue depth if
+ * the maximum work requests for the device is maxed out
+ */
+ init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
- init_qp_attr->cap.max_send_sge = 1;
- init_qp_attr->cap.max_recv_sge = 1;
- init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- init_qp_attr->qp_type = IB_QPT_RC;
- init_qp_attr->send_cq = cq;
- init_qp_attr->recv_cq = cq;
+
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
+ if (rc) {
+ CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
+ "send_sge: %d, recv_sge: %d\n",
+ rc, init_qp_attr->cap.max_send_wr,
+ init_qp_attr->cap.max_recv_wr,
+ init_qp_attr->cap.max_send_sge,
+ init_qp_attr->cap.max_recv_sge);
+ goto failed_2;
+ }
conn->ibc_sched = sched;
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (rc != 0) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
- goto failed_2;
- }
+ if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth)
+ CWARN("peer %s - queue depth reduced from %u to %u"
+ " to allow for qp creation\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ peer_ni->ibp_queue_depth,
+ conn->ibc_queue_depth);
- LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+ LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
+ if (conn->ibc_rxs == NULL) {
+ CERROR("Cannot allocate RX buffers\n");
+ goto failed_2;
+ }
- /* 1 ref for caller and each rxmsg */
+ rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
+ IBLND_RX_MSG_PAGES(conn));
+ if (rc != 0)
+ goto failed_2;
+
+ kiblnd_map_rx_descs(conn);
+
+ LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
+
+ /* 1 ref for caller and each rxmsg */
atomic_set(&conn->ibc_refcount, 1 + IBLND_RX_MSGS(conn));
conn->ibc_nrx = IBLND_RX_MSGS(conn);
- /* post receives */
+ /* post receives */
for (i = 0; i < IBLND_RX_MSGS(conn); i++) {
- rc = kiblnd_post_rx(&conn->ibc_rxs[i],
- IBLND_POSTRX_NO_CREDIT);
+ rc = kiblnd_post_rx(&conn->ibc_rxs[i], IBLND_POSTRX_NO_CREDIT);
if (rc != 0) {
CERROR("Can't post rxmsg: %d\n", rc);
return conn;
failed_2:
- kiblnd_destroy_conn(conn, true);
+ kiblnd_destroy_conn(conn);
+ LIBCFS_FREE(conn, sizeof(*conn));
failed_1:
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
failed_0:
}
void
-kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
+kiblnd_destroy_conn(struct kib_conn *conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
- int rc;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
LASSERT (!in_interrupt());
LASSERT (atomic_read(&conn->ibc_refcount) == 0);
if (cmid != NULL && cmid->qp != NULL)
rdma_destroy_qp(cmid);
- if (conn->ibc_cq != NULL) {
- rc = ib_destroy_cq(conn->ibc_cq);
- if (rc != 0)
- CWARN("Error destroying CQ: %d\n", rc);
- }
+ if (conn->ibc_cq)
+ ib_destroy_cq(conn->ibc_cq);
+
+ kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED,
+ LNET_MSG_STATUS_OK);
if (conn->ibc_rx_pages != NULL)
kiblnd_unmap_rx_descs(conn);
if (conn->ibc_rxs != NULL) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
}
if (conn->ibc_connvars != NULL)
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
rdma_destroy_id(cmid);
atomic_dec(&net->ibn_nconns);
}
-
- if (free_conn)
- LIBCFS_FREE(conn, sizeof(*conn));
}
int
-kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, "
"version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, why);
kiblnd_close_conn_locked(conn, why);
}
int
-kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
int version, __u64 incarnation)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
if (conn->ibc_version == version &&
conn->ibc_incarnation == incarnation)
continue;
CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:"LPX64"(%x, "LPX64")\n",
- libcfs_nid2str(peer->ibp_nid),
+ "incarnation:%#llx(%x, %#llx)\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, conn->ibc_incarnation,
version, incarnation);
}
static int
-kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
+kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid))
continue;
- count += kiblnd_close_peer_conns_locked(peer, 0);
+ count += kiblnd_close_peer_conns_locked(peer_ni, 0);
}
}
}
static int
-kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
int rc = -EINVAL;
break;
}
case IOC_LIBCFS_GET_CONN: {
- kib_conn_t *conn;
+ struct kib_conn *conn;
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
break;
}
- LASSERT (conn->ibc_cmid != NULL);
- data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
- data->ioc_u32[0] = 0; /* iWarp has no path MTU */
- else
- data->ioc_u32[0] =
- ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
- kiblnd_conn_decref(conn);
- break;
+ LASSERT(conn->ibc_cmid != NULL);
+ data->ioc_nid = conn->ibc_peer->ibp_nid;
+ if (conn->ibc_cmid->route.path_rec == NULL)
+ data->ioc_u32[0] = 0; /* iWarp has no path MTU */
+ else
+ data->ioc_u32[0] =
+ ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
+ kiblnd_conn_decref(conn);
+ break;
}
case IOC_LIBCFS_CLOSE_CONNECTION: {
rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
}
static void
-kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
-{
- cfs_time_t last_alive = 0;
- cfs_time_t now = cfs_time_current();
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
- unsigned long flags;
-
- read_lock_irqsave(glock, flags);
-
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL)
- last_alive = peer->ibp_last_alive;
-
- read_unlock_irqrestore(glock, flags);
-
- if (last_alive != 0)
- *when = last_alive;
-
- /* peer is not persistent in hash, trigger peer creation
- * and connection establishment with a NULL tx */
- if (peer == NULL)
- kiblnd_launch_tx(ni, NULL, nid);
-
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
- last_alive ? cfs_duration_sec(now - last_alive) : -1);
- return;
-}
-
-static void
-kiblnd_free_pages(kib_pages_t *p)
+kiblnd_free_pages(struct kib_pages *p)
{
int npages = p->ibp_npages;
int i;
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+ LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
}
int
-kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
{
- kib_pages_t *p;
- int i;
+ struct kib_pages *p;
+ int i;
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(kib_pages_t, ibp_pages[npages]));
+ offsetof(struct kib_pages, ibp_pages[npages]));
if (p == NULL) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
}
void
-kiblnd_unmap_rx_descs(kib_conn_t *conn)
+kiblnd_unmap_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
int i;
LASSERT (conn->ibc_rxs != NULL);
}
void
-kiblnd_map_rx_descs(kib_conn_t *conn)
+kiblnd_map_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
struct page *pg;
int pg_off;
int ipg;
rx = &conn->ibc_rxs[i];
rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+ rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
rx->rx_msgaddr =
kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
- CDEBUG(D_NET, "rx %d: %p "LPX64"("LPX64")\n",
+ CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
i, rx->rx_msg, rx->rx_msgaddr,
(__u64)(page_to_phys(pg) + pg_off));
}
static void
-kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
{
- kib_hca_dev_t *hdev = tpo->tpo_hdev;
- kib_tx_t *tx;
- int i;
+ struct kib_hca_dev *hdev = tpo->tpo_hdev;
+ struct kib_tx *tx;
+ int i;
LASSERT (tpo->tpo_pool.po_allocated == 0);
tpo->tpo_hdev = NULL;
}
-static kib_hca_dev_t *
-kiblnd_current_hdev(kib_dev_t *dev)
+static struct kib_hca_dev *
+kiblnd_current_hdev(struct kib_dev *dev)
{
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
unsigned long flags;
int i = 0;
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 100);
+ schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
}
static void
-kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
-{
- kib_pages_t *txpgs = tpo->tpo_tx_pages;
- kib_pool_t *pool = &tpo->tpo_pool;
- kib_net_t *net = pool->po_owner->ps_net;
- kib_dev_t *dev;
- struct page *page;
- kib_tx_t *tx;
+kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
+{
+ struct kib_pages *txpgs = tpo->tpo_tx_pages;
+ struct kib_pool *pool = &tpo->tpo_pool;
+ struct kib_net *net = pool->po_owner->ps_net;
+ struct kib_dev *dev;
+ struct page *page;
+ struct kib_tx *tx;
int page_offset;
int ipage;
int i;
dev = net->ibn_dev;
- /* pre-mapped messages are not bigger than 1 page */
- CLASSERT (IBLND_MSG_SIZE <= PAGE_SIZE);
+ /* pre-mapped messages are not bigger than 1 page */
+ BUILD_BUG_ON(IBLND_MSG_SIZE > PAGE_SIZE);
- /* No fancy arithmetic when we do the buffer calculations */
- CLASSERT (PAGE_SIZE % IBLND_MSG_SIZE == 0);
+ /* No fancy arithmetic when we do the buffer calculations */
+ BUILD_BUG_ON(PAGE_SIZE % IBLND_MSG_SIZE != 0);
tpo->tpo_hdev = kiblnd_current_hdev(dev);
page = txpgs->ibp_pages[ipage];
tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
- page_offset);
+ tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
+ page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
tx->tx_msg,
}
}
-struct ib_mr *
-kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
- int negotiated_nfrags)
-{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
-
- LASSERT(hdev->ibh_mrs != NULL);
-
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
- return NULL;
-
- return hdev->ibh_mrs;
-}
-
static void
-kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
{
- LASSERT (pool->fpo_map_count == 0);
+ LASSERT(fpo->fpo_map_count == 0);
- if (pool->fpo_fmr_pool != NULL)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (pool->fpo_hdev != NULL)
- kiblnd_hdev_decref(pool->fpo_hdev);
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(kib_fmr_pool_t));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
static void
kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ struct kib_fmr_pool *fpo, *tmp;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int
-kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
-{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
- struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
- .page_shift = PAGE_SHIFT,
- .access = (IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE),
+static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool *fpo)
+{
+ struct ib_fmr_pool_param param = {
+ .max_pages_per_fmr = LNET_MAX_IOV,
+ .page_shift = PAGE_SHIFT,
+ .access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE),
.pool_size = fps->fps_pool_size,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ ¶m);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
+ fpo->fpo_is_fmr = true;
+
+ return rc;
+}
+
+static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool *fpo,
+ enum kib_dev_caps dev_caps)
+{
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
+
+ fpo->fpo_is_fmr = false;
+
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+ frd->frd_mr = NULL;
+
+#ifndef HAVE_IB_MAP_MR_SG
+ frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
+ LNET_MAX_IOV);
+ if (IS_ERR(frd->frd_frpl)) {
+ rc = PTR_ERR(frd->frd_frpl);
+ CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
+ rc);
+ frd->frd_frpl = NULL;
+ goto out_middle;
+ }
+#endif
+
+#ifdef HAVE_IB_ALLOC_FAST_REG_MR
+ frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
+ LNET_MAX_IOV);
+#else
+ /*
+ * it is expected to get here if this is an MLX-5 card.
+ * MLX-4 cards will always use FMR and MLX-5 cards will
+ * always use fast_reg. It turns out that some MLX-5 cards
+ * (possibly due to older FW versions) do not natively support
+ * gaps. So we will need to track them here.
+ */
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+#ifdef IB_MR_TYPE_SG_GAPS
+ ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
+ (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT)) ?
+ IB_MR_TYPE_SG_GAPS :
+ IB_MR_TYPE_MEM_REG,
+#else
+ IB_MR_TYPE_MEM_REG,
+#endif
+ LNET_MAX_IOV);
+ if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
+ (dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT))
+ CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n");
+#endif
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ /* There appears to be a bug in MLX5 code where you must
+ * invalidate the rkey of a new FastReg pool before first
+ * using it. Thus, I am marking the FRD invalid here. */
+ frd->frd_valid = false;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+#ifndef HAVE_IB_MAP_MR_SG
+ if (frd->frd_frpl)
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
+ ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
+}
+
+static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool **pp_fpo)
+{
+ struct kib_dev *dev = fps->fps_net->ibn_dev;
+ struct kib_fmr_pool *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
- if (fpo == NULL)
+ if (!fpo) {
return -ENOMEM;
+ }
+ memset(fpo, 0, sizeof(*fpo));
fpo->fpo_hdev = kiblnd_current_hdev(dev);
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, ¶m);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
+ if (dev->ibd_dev_caps & IBLND_DEV_CAPS_FMR_ENABLED)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo, dev->ibd_dev_caps);
+ if (rc)
+ goto out_fpo;
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(kib_fmr_pool_t));
- return rc;
- }
+ fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+ fpo->fpo_owner = fps;
+ *pp_fpo = fpo;
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
- *pp_fpo = fpo;
+ return 0;
- return 0;
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+ return rc;
}
static void
-kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
+kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
{
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
while (!list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
- kib_fmr_pool_t, fpo_list);
+ struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
+ struct kib_fmr_pool,
+ fpo_list);
+
fpo->fpo_failed = 1;
- list_del(&fpo->fpo_list);
if (fpo->fpo_map_count == 0)
- list_add(&fpo->fpo_list, zombies);
+ list_move(&fpo->fpo_list, zombies);
else
- list_add(&fpo->fpo_list, &fps->fps_failed_pool_list);
+ list_move(&fpo->fpo_list, &fps->fps_failed_pool_list);
}
spin_unlock(&fps->fps_lock);
}
static void
-kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
{
if (fps->fps_net != NULL) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
- int pool_size, int flush_trigger)
+kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
+ struct kib_net *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
- kib_fmr_pool_t *fpo;
- int rc;
+ struct kib_fmr_pool *fpo;
+ int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(struct kib_fmr_poolset));
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
}
static int
-kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, cfs_time_t now)
+kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now)
{
if (fpo->fpo_map_count != 0) /* still in use */
return 0;
if (fpo->fpo_failed)
return 1;
- return cfs_time_aftereq(now, fpo->fpo_deadline);
+ return now >= fpo->fpo_deadline;
}
-void
-kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+static int
+kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
- cfs_time_t now = cfs_time_current();
- kib_fmr_pool_t *tmp;
- int rc;
+ struct kib_hca_dev *hdev;
+ __u64 *pages = tx->tx_pages;
+ int npages;
+ int size;
+ int i;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(rc == 0);
+ hdev = tx->tx_pool->tpo_hdev;
- if (status != 0) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(rc == 0);
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
}
+ return npages;
+}
+
+void
+kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
+{
+ LIST_HEAD(zombies);
+ struct kib_fmr_pool *fpo = fmr->fmr_pool;
+ struct kib_fmr_poolset *fps;
+ time64_t now = ktime_get_seconds();
+ struct kib_fmr_pool *tmp;
+ int rc;
+
+ if (!fpo)
+ return;
+
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ fmr->fmr_pfmr = NULL;
+ }
+
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--; /* decref the pool */
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int
-kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, u32 nob, u64 iov,
+ struct kib_fmr *fmr)
{
- struct ib_pool_fmr *pfmr;
- kib_fmr_pool_t *fpo;
- __u64 version;
- int rc;
+ struct kib_fmr_pool *fpo;
+ __u64 *pages = tx->tx_pages;
+ __u64 version;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
+ int npages = 0;
+ int rc;
again:
spin_lock(&fps->fps_lock);
version = fps->fps_version;
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
- fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
- }
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey
+ : pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct kib_fast_reg_descriptor *frd;
+#ifdef HAVE_IB_MAP_MR_SG
+ struct ib_reg_wr *wr;
+ int n;
+#else
+ struct ib_rdma_wr *wr;
+ struct ib_fast_reg_page_list *frpl;
+#endif
+ struct ib_mr *mr;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+#ifndef HAVE_IB_MAP_MR_SG
+ frpl = frd->frd_frpl;
+#endif
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ struct ib_rdma_wr *inv_wr;
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+
+ inv_wr->wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr.wr_id = IBLND_WID_MR;
+ inv_wr->wr.ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_IB_MAP_MR_SG_5ARGS
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+#else
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, PAGE_SIZE);
+#endif
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d "
+ "elements\n", n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#else
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ LASSERT(npages <= frpl->max_page_list_len);
+ memcpy(frpl->page_list, pages,
+ sizeof(*pages) * npages);
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_FAST_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+
+ wr->wr.wr.fast_reg.iova_start = iov;
+ wr->wr.wr.fast_reg.page_list = frpl;
+ wr->wr.wr.fast_reg.page_list_len = npages;
+ wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.wr.fast_reg.length = nob;
+ wr->wr.wr.fast_reg.rkey =
+ is_rx ? mr->rkey : mr->lkey;
+ wr->wr.wr.fast_reg.access_flags =
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#endif
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EAGAIN;
+ }
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
}
- if (cfs_time_before(cfs_time_current(), fps->fps_next_retry)) {
+ if (ktime_get_seconds() < fps->fps_next_retry) {
/* someone failed recently */
spin_unlock(&fps->fps_lock);
return -EAGAIN;
fps->fps_version++;
list_add_tail(&fpo->fpo_list, &fps->fps_pool_list);
} else {
- fps->fps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ fps->fps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
}
spin_unlock(&fps->fps_lock);
}
static void
-kiblnd_fini_pool(kib_pool_t *pool)
+kiblnd_fini_pool(struct kib_pool *pool)
{
LASSERT(list_empty(&pool->po_free_list));
LASSERT(pool->po_allocated == 0);
}
static void
-kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
+ memset(pool, 0, sizeof(struct kib_pool));
INIT_LIST_HEAD(&pool->po_free_list);
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- pool->po_owner = ps;
- pool->po_size = size;
+ pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
+ pool->po_owner = ps;
+ pool->po_size = size;
}
static void
kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
+ pool = list_entry(head->next, struct kib_pool, po_list);
list_del(&pool->po_list);
LASSERT(pool->po_owner != NULL);
}
static void
-kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
while (!list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = list_entry(ps->ps_pool_list.next,
- kib_pool_t, po_list);
+ struct kib_pool *po = list_entry(ps->ps_pool_list.next,
+ struct kib_pool, po_list);
+
po->po_failed = 1;
- list_del(&po->po_list);
if (po->po_allocated == 0)
- list_add(&po->po_list, zombies);
+ list_move(&po->po_list, zombies);
else
- list_add(&po->po_list, &ps->ps_failed_pool_list);
+ list_move(&po->po_list, &ps->ps_failed_pool_list);
}
spin_unlock(&ps->ps_lock);
}
static void
-kiblnd_fini_poolset(kib_poolset_t *ps)
+kiblnd_fini_poolset(struct kib_poolset *ps)
{
if (ps->ps_net != NULL) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
}
static int
-kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
- kib_net_t *net, char *name, int size,
+kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
+ struct kib_net *net, char *name, int size,
kib_ps_pool_create_t po_create,
kib_ps_pool_destroy_t po_destroy,
kib_ps_node_init_t nd_init,
kib_ps_node_fini_t nd_fini)
{
- kib_pool_t *pool;
- int rc;
+ struct kib_pool *pool;
+ int rc;
- memset(ps, 0, sizeof(kib_poolset_t));
+ memset(ps, 0, sizeof(struct kib_poolset));
ps->ps_cpt = cpt;
ps->ps_net = net;
}
static int
-kiblnd_pool_is_idle(kib_pool_t *pool, cfs_time_t now)
+kiblnd_pool_is_idle(struct kib_pool *pool, time64_t now)
{
if (pool->po_allocated != 0) /* still in use */
return 0;
if (pool->po_failed)
return 1;
- return cfs_time_aftereq(now, pool->po_deadline);
+ return now >= pool->po_deadline;
}
void
-kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
- cfs_time_t now = cfs_time_current();
+ LIST_HEAD(zombies);
+ struct kib_poolset *ps = pool->po_owner;
+ struct kib_pool *tmp;
+ time64_t now = ktime_get_seconds();
spin_lock(&ps->ps_lock);
}
struct list_head *
-kiblnd_pool_alloc_node(kib_poolset_t *ps)
+kiblnd_pool_alloc_node(struct kib_poolset *ps)
{
struct list_head *node;
- kib_pool_t *pool;
+ struct kib_pool *pool;
int rc;
unsigned int interval = 1;
- cfs_time_t time_before;
- unsigned int trips = 0;
+ ktime_t time_before;
+ unsigned int trips = 0;
again:
spin_lock(&ps->ps_lock);
continue;
pool->po_allocated++;
- pool->po_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
+ pool->po_deadline = ktime_get_seconds() +
+ IBLND_POOL_DEADLINE;
node = pool->po_free_list.next;
list_del(node);
"trips = %d\n",
ps->ps_name, interval, trips);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(interval);
+ schedule_timeout_interruptible(interval);
if (interval < cfs_time_seconds(1))
interval *= 2;
goto again;
}
- if (cfs_time_before(cfs_time_current(), ps->ps_next_retry)) {
+ if (ktime_get_seconds() < ps->ps_next_retry) {
/* someone failed recently */
spin_unlock(&ps->ps_lock);
return NULL;
spin_unlock(&ps->ps_lock);
CDEBUG(D_NET, "%s pool exhausted, allocate new pool\n", ps->ps_name);
- time_before = cfs_time_current();
+ time_before = ktime_get();
rc = ps->ps_pool_create(ps, ps->ps_pool_size, &pool);
- CDEBUG(D_NET, "ps_pool_create took %lu HZ to complete",
- cfs_time_current() - time_before);
+ CDEBUG(D_NET, "ps_pool_create took %lld ms to complete",
+ ktime_ms_delta(ktime_get(), time_before));
spin_lock(&ps->ps_lock);
ps->ps_increasing = 0;
if (rc == 0) {
list_add_tail(&pool->po_list, &ps->ps_pool_list);
} else {
- ps->ps_next_retry = cfs_time_shift(IBLND_POOL_RETRY);
+ ps->ps_next_retry = ktime_get_seconds() + IBLND_POOL_RETRY;
CERROR("Can't allocate new %s pool because out of memory\n",
ps->ps_name);
}
}
static void
-kiblnd_destroy_tx_pool(kib_pool_t *pool)
+kiblnd_destroy_tx_pool(struct kib_pool *pool)
{
- kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
- int i;
+ struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool,
+ tpo_pool);
+ int i;
LASSERT (pool->po_allocated == 0);
goto out;
for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
+ int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
list_del(&tx->tx_list);
if (tx->tx_pages != NULL)
sizeof(*tx->tx_pages));
if (tx->tx_frags != NULL)
LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_wrq != NULL)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_sge != NULL)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
+ if (tx->tx_sge != NULL)
+ LIBCFS_FREE(tx->tx_sge,
+ (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
+ sizeof(*tx->tx_sge));
if (tx->tx_rd != NULL)
LIBCFS_FREE(tx->tx_rd,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
}
LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(kib_tx_t));
+ pool->po_size * sizeof(struct kib_tx));
out:
kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ CFS_FREE_PTR(tpo);
}
-static int kiblnd_tx_pool_size(int ncpts)
+static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts)
{
- int ntx = *kiblnd_tunables.kib_ntx / ncpts;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int ntx;
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ ntx = tunables->lnd_ntx / ncpts;
return max(IBLND_TX_POOL, ntx);
}
static int
-kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
+kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po)
{
int i;
int npg;
- kib_pool_t *pool;
- kib_tx_pool_t *tpo;
+ struct kib_pool *pool;
+ struct kib_tx_pool *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
if (tpo == NULL) {
npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ CFS_FREE_PTR(tpo);
return -ENOMEM;
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(kib_tx_t));
+ size * sizeof(struct kib_tx));
if (tpo->tpo_tx_descs == NULL) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
}
- memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+ memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
+ int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps != NULL) {
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_frags == NULL)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
+ (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
sizeof(*tx->tx_sge));
if (tx->tx_sge == NULL)
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
if (tx->tx_rd == NULL)
break;
}
static void
-kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+ struct kib_tx_poolset *tps = container_of(pool->po_owner,
+ struct kib_tx_poolset,
+ tps_poolset);
+ struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
tx->tx_cookie = tps->tps_next_tx_cookie++;
}
static void
-kiblnd_net_fini_pools(kib_net_t *net)
+kiblnd_net_fini_pools(struct kib_net *net)
{
int i;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- kib_tx_poolset_t *tps;
- kib_fmr_poolset_t *fps;
+ struct kib_tx_poolset *tps;
+ struct kib_fmr_poolset *fps;
if (net->ibn_tx_ps != NULL) {
tps = net->ibn_tx_ps[i];
}
static int
-kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts,
+ int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+#ifdef HAVE_IB_GET_DMA_MR
unsigned long flags;
+#endif
int cpt;
int rc;
int i;
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
+#ifdef HAVE_IB_GET_DMA_MR
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0) {
+ /*
+ * if lnd_map_on_demand is zero then we have effectively disabled
+ * FMR or FastReg and we're using global memory regions
+ * exclusively.
+ */
+ if (!tunables->lnd_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
goto create_tx_pool;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+#endif
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < tunables->lnd_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
- *kiblnd_tunables.kib_ntx / 4);
+ tunables->lnd_fmr_pool_size,
+ tunables->lnd_ntx / 4);
rc = -EINVAL;
goto failed;
}
* FMR pool and map-on-demand if premapping failed */
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_fmr_poolset_t));
+ sizeof(struct kib_fmr_poolset));
if (net->ibn_fmr_ps == NULL) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
for (i = 0; i < ncpts; i++) {
cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc != 0) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
if (i > 0)
LASSERT(i == ncpts);
+#ifdef HAVE_IB_GET_DMA_MR
create_tx_pool:
+#endif
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_tx_poolset_t));
+ sizeof(struct kib_tx_poolset));
if (net->ibn_tx_ps == NULL) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
cpt = (cpts == NULL) ? i : cpts[i];
rc = kiblnd_init_poolset(&net->ibn_tx_ps[cpt]->tps_poolset,
cpt, net, "TX",
- kiblnd_tx_pool_size(ncpts),
+ kiblnd_tx_pool_size(ni, ncpts),
kiblnd_create_tx_pool,
kiblnd_destroy_tx_pool,
kiblnd_tx_init, NULL);
}
static int
-kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+kiblnd_port_get_attr(struct kib_hca_dev *hdev)
{
- struct ib_device_attr *attr;
- int rc;
+ struct ib_port_attr *port_attr;
+ int rc;
+ unsigned long flags;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- /* It's safe to assume a HCA can handle a page size
- * matching that of the native system */
- hdev->ibh_page_shift = PAGE_SHIFT;
- hdev->ibh_page_size = 1 << PAGE_SHIFT;
- hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+ LIBCFS_ALLOC(port_attr, sizeof(*port_attr));
+ if (port_attr == NULL) {
+ CDEBUG(D_NETERROR, "Out of memory\n");
+ return -ENOMEM;
+ }
- LIBCFS_ALLOC(attr, sizeof(*attr));
- if (attr == NULL) {
- CERROR("Out of memory\n");
- return -ENOMEM;
- }
+ rc = ib_query_port(hdev->ibh_ibdev, hdev->ibh_port, port_attr);
- rc = ib_query_device(hdev->ibh_ibdev, attr);
- if (rc == 0)
- hdev->ibh_mr_size = attr->max_mr_size;
+ write_lock_irqsave(g_lock, flags);
- LIBCFS_FREE(attr, sizeof(*attr));
+ if (rc == 0)
+ hdev->ibh_state = port_attr->state == IB_PORT_ACTIVE
+ ? IBLND_DEV_PORT_ACTIVE
+ : IBLND_DEV_PORT_DOWN;
- if (rc != 0) {
- CERROR("Failed to query IB device: %d\n", rc);
- return rc;
- }
+ write_unlock_irqrestore(g_lock, flags);
+ LIBCFS_FREE(port_attr, sizeof(*port_attr));
- if (hdev->ibh_mr_size == ~0ULL) {
- hdev->ibh_mr_shift = 64;
- return 0;
- }
+ if (rc != 0) {
+ CDEBUG(D_NETERROR, "Failed to query IB port: %d\n", rc);
+ return rc;
+ }
+ return 0;
+}
- CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
- return -EINVAL;
+static inline void
+kiblnd_set_ni_fatal_on(struct kib_hca_dev *hdev, int val)
+{
+ struct kib_net *net;
+
+ /* for health check */
+ list_for_each_entry(net, &hdev->ibh_dev->ibd_nets, ibn_list) {
+ if (val)
+ CDEBUG(D_NETERROR, "Fatal device error for NI %s\n",
+ libcfs_nid2str(net->ibn_ni->ni_nid));
+ atomic_set(&net->ibn_ni->ni_fatal_error_on, val);
+ }
+}
+
+void
+kiblnd_event_handler(struct ib_event_handler *handler, struct ib_event *event)
+{
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ struct kib_hca_dev *hdev;
+ unsigned long flags;
+
+ hdev = container_of(handler, struct kib_hca_dev, ibh_event_handler);
+
+ write_lock_irqsave(g_lock, flags);
+
+ switch (event->event) {
+ case IB_EVENT_DEVICE_FATAL:
+ CDEBUG(D_NET, "IB device fatal\n");
+ hdev->ibh_state = IBLND_DEV_FATAL;
+ kiblnd_set_ni_fatal_on(hdev, 1);
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ CDEBUG(D_NET, "IB port active\n");
+ if (event->element.port_num == hdev->ibh_port) {
+ hdev->ibh_state = IBLND_DEV_PORT_ACTIVE;
+ kiblnd_set_ni_fatal_on(hdev, 0);
+ }
+ break;
+ case IB_EVENT_PORT_ERR:
+ CDEBUG(D_NET, "IB port err\n");
+ if (event->element.port_num == hdev->ibh_port) {
+ hdev->ibh_state = IBLND_DEV_PORT_DOWN;
+ kiblnd_set_ni_fatal_on(hdev, 1);
+ }
+ break;
+ default:
+ break;
+ }
+ write_unlock_irqrestore(g_lock, flags);
+}
+
+static int
+kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
+{
+ struct ib_device_attr *dev_attr;
+ int rc = 0;
+ int rc2 = 0;
+
+ /* It's safe to assume a HCA can handle a page size
+ * matching that of the native system */
+ hdev->ibh_page_shift = PAGE_SHIFT;
+ hdev->ibh_page_size = 1 << PAGE_SHIFT;
+ hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+
+#ifndef HAVE_IB_DEVICE_ATTRS
+ LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr));
+ if (dev_attr == NULL) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = ib_query_device(hdev->ibh_ibdev, dev_attr);
+ if (rc != 0) {
+ CERROR("Failed to query IB device: %d\n", rc);
+ goto out_clean_attr;
+ }
+#else
+ dev_attr = &hdev->ibh_ibdev->attrs;
+#endif
+
+ hdev->ibh_mr_size = dev_attr->max_mr_size;
+ hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
+
+ /* Setup device Memory Registration capabilities */
+#ifdef HAVE_IB_DEVICE_OPS
+ if (hdev->ibh_ibdev->ops.alloc_fmr &&
+ hdev->ibh_ibdev->ops.dealloc_fmr &&
+ hdev->ibh_ibdev->ops.map_phys_fmr &&
+ hdev->ibh_ibdev->ops.unmap_fmr) {
+#else
+ if (hdev->ibh_ibdev->alloc_fmr &&
+ hdev->ibh_ibdev->dealloc_fmr &&
+ hdev->ibh_ibdev->map_phys_fmr &&
+ hdev->ibh_ibdev->unmap_fmr) {
+#endif
+ LCONSOLE_INFO("Using FMR for registration\n");
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
+#ifndef HAVE_IB_ALLOC_FAST_REG_MR
+#ifdef IB_DEVICE_SG_GAPS_REG
+ if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT;
+#endif
+#endif
+ } else {
+ rc = -ENOSYS;
+ }
+
+ rc2 = kiblnd_port_get_attr(hdev);
+ if (rc2 != 0)
+ return rc2;
+
+ if (rc != 0)
+ rc = -EINVAL;
+
+#ifndef HAVE_IB_DEVICE_ATTRS
+out_clean_attr:
+ LIBCFS_FREE(dev_attr, sizeof(*dev_attr));
+#endif
+
+ if (rc == -ENOSYS)
+ CERROR("IB device does not support FMRs nor FastRegs, can't "
+ "register memory: %d\n", rc);
+ else if (rc == -EINVAL)
+ CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
+ return rc;
}
+#ifdef HAVE_IB_GET_DMA_MR
static void
-kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
{
if (hdev->ibh_mrs == NULL)
return;
hdev->ibh_mrs = NULL;
}
+#endif
void
-kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
{
+ if (hdev->ibh_event_handler.device != NULL)
+ ib_unregister_event_handler(&hdev->ibh_event_handler);
+
+#ifdef HAVE_IB_GET_DMA_MR
kiblnd_hdev_cleanup_mrs(hdev);
+#endif
if (hdev->ibh_pd != NULL)
ib_dealloc_pd(hdev->ibh_pd);
LIBCFS_FREE(hdev, sizeof(*hdev));
}
+#ifdef HAVE_IB_GET_DMA_MR
static int
-kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
{
struct ib_mr *mr;
- int rc;
int acflags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE;
- rc = kiblnd_hdev_get_attr(hdev);
- if (rc != 0)
- return rc;
-
mr = ib_get_dma_mr(hdev->ibh_pd, acflags);
if (IS_ERR(mr)) {
CERROR("Failed ib_get_dma_mr: %ld\n", PTR_ERR(mr));
return 0;
}
+#endif
static int
kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
}
static int
-kiblnd_dev_need_failover(kib_dev_t *dev)
+kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns)
{
struct rdma_cm_id *cmid;
struct sockaddr_in srcaddr;
*
* a. rdma_bind_addr(), it will conflict with listener cmid
* b. rdma_resolve_addr() to zero addr */
- cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_dummy_callback, dev,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
}
int
-kiblnd_dev_failover(kib_dev_t *dev)
+kiblnd_dev_failover(struct kib_dev *dev, struct net *ns)
{
- struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
- struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
- struct list_head zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
+ LIST_HEAD(zombie_tpo);
+ LIST_HEAD(zombie_ppo);
+ LIST_HEAD(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
- kib_hca_dev_t *hdev = NULL;
- kib_hca_dev_t *old;
+ struct kib_hca_dev *hdev = NULL;
+ struct kib_hca_dev *old;
struct ib_pd *pd;
- kib_net_t *net;
+ struct kib_net *net;
struct sockaddr_in addr;
unsigned long flags;
int rc = 0;
dev->ibd_can_failover ||
dev->ibd_hdev == NULL);
- rc = kiblnd_dev_need_failover(dev);
+ rc = kiblnd_dev_need_failover(dev, ns);
if (rc <= 0)
goto out;
rdma_destroy_id(cmid);
}
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
hdev->ibh_dev = dev;
hdev->ibh_cmid = cmid;
hdev->ibh_ibdev = cmid->device;
+ hdev->ibh_port = cmid->port_num;
- pd = ib_alloc_pd(cmid->device);
- if (IS_ERR(pd)) {
- rc = PTR_ERR(pd);
- CERROR("Can't allocate PD: %d\n", rc);
- goto out;
- }
+#ifdef HAVE_IB_ALLOC_PD_2ARGS
+ pd = ib_alloc_pd(cmid->device, 0);
+#else
+ pd = ib_alloc_pd(cmid->device);
+#endif
+ if (IS_ERR(pd)) {
+ rc = PTR_ERR(pd);
+ CERROR("Can't allocate PD: %d\n", rc);
+ goto out;
+ }
hdev->ibh_pd = pd;
goto out;
}
- rc = kiblnd_hdev_setup_mrs(hdev);
- if (rc != 0) {
- CERROR("Can't setup device: %d\n", rc);
- goto out;
- }
+ rc = kiblnd_hdev_get_attr(hdev);
+ if (rc != 0) {
+ CERROR("Can't get device attributes: %d\n", rc);
+ goto out;
+ }
+
+#ifdef HAVE_IB_GET_DMA_MR
+ rc = kiblnd_hdev_setup_mrs(hdev);
+ if (rc != 0) {
+ CERROR("Can't setup device: %d\n", rc);
+ goto out;
+ }
+#endif
+
+ INIT_IB_EVENT_HANDLER(&hdev->ibh_event_handler,
+ hdev->ibh_ibdev, kiblnd_event_handler);
+ ib_register_event_handler(&hdev->ibh_event_handler);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
void
-kiblnd_destroy_dev (kib_dev_t *dev)
+kiblnd_destroy_dev(struct kib_dev *dev)
{
- LASSERT (dev->ibd_nnets == 0);
+ LASSERT(dev->ibd_nnets == 0);
LASSERT(list_empty(&dev->ibd_nets));
list_del(&dev->ibd_fail_list);
LIBCFS_FREE(dev, sizeof(*dev));
}
-static kib_dev_t *
-kiblnd_create_dev(char *ifname)
-{
- struct net_device *netdev;
- kib_dev_t *dev;
- __u32 netmask;
- __u32 ip;
- int up;
- int rc;
-
- rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
- CERROR("Can't query IPoIB interface %s: %d\n",
- ifname, rc);
- return NULL;
- }
-
- if (!up) {
- CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
- return NULL;
- }
-
- LIBCFS_ALLOC(dev, sizeof(*dev));
- if (dev == NULL)
- return NULL;
-
- memset(dev, 0, sizeof(*dev));
- netdev = dev_get_by_name(&init_net, ifname);
- if (netdev == NULL) {
- dev->ibd_can_failover = 0;
- } else {
- dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
- dev_put(netdev);
- }
-
- INIT_LIST_HEAD(&dev->ibd_nets);
- INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- INIT_LIST_HEAD(&dev->ibd_fail_list);
- dev->ibd_ifip = ip;
- strcpy(&dev->ibd_ifname[0], ifname);
-
- /* initialize the device */
- rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
- CERROR("Can't initialize device: %d\n", rc);
- LIBCFS_FREE(dev, sizeof(*dev));
- return NULL;
- }
-
- list_add_tail(&dev->ibd_list,
- &kiblnd_data.kib_devs);
- return dev;
-}
-
static void
kiblnd_base_shutdown(void)
{
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* fall through */
}
static void
-kiblnd_shutdown (lnet_ni_t *ni)
+kiblnd_shutdown(struct lnet_ni *ni)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
/* nuke all existing peers within this net */
kiblnd_del_peer(ni, LNET_NID_ANY);
- /* Wait for all peer state to clean up */
+ /* Wait for all peer_ni state to clean up */
i = 2;
while (atomic_read(&net->ibn_npeers) != 0) {
i++;
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
kiblnd_net_fini_pools(net);
out:
if (list_empty(&kiblnd_data.kib_devs))
kiblnd_base_shutdown();
- return;
}
static int
-kiblnd_base_startup(void)
+kiblnd_base_startup(struct net *ns)
{
struct kib_sched_info *sched;
int rc;
LASSERT(kiblnd_data.kib_init == IBLND_INIT_NOTHING);
- try_module_get(THIS_MODULE);
+ if (!try_module_get(THIS_MODULE))
+ goto failed;
+
memset(&kiblnd_data, 0, sizeof(kiblnd_data)); /* zero pointers, flags etc */
rwlock_init(&kiblnd_data.kib_global_lock);
}
if (*kiblnd_tunables.kib_dev_failover != 0)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
+ rc = kiblnd_thread_start(kiblnd_failover_thread, ns,
"kiblnd_failover");
if (rc != 0) {
return rc;
}
-static int
-kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
+static int kiblnd_dev_start_threads(struct kib_dev *dev, bool newdev, u32 *cpts,
+ int ncpts)
{
int cpt;
int rc;
return 0;
}
-static kib_dev_t *
+static struct kib_dev *
kiblnd_dev_search(char *ifname)
{
- kib_dev_t *alias = NULL;
- kib_dev_t *dev;
- char *colon;
- char *colon2;
+ struct kib_dev *alias = NULL;
+ struct kib_dev *dev;
+ char *colon;
+ char *colon2;
colon = strchr(ifname, ':');
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
}
static int
-kiblnd_startup (lnet_ni_t *ni)
-{
- char *ifname;
- kib_dev_t *ibdev = NULL;
- kib_net_t *net;
- struct timeval tv;
- unsigned long flags;
- int rc;
- int newdev;
-
- LASSERT (ni->ni_lnd == &the_o2iblnd);
-
- if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
- rc = kiblnd_base_startup();
- if (rc != 0)
- return rc;
- }
+kiblnd_startup(struct lnet_ni *ni)
+{
+ char *ifname = NULL;
+ struct lnet_inetdev *ifaces = NULL;
+ struct kib_dev *ibdev = NULL;
+ struct kib_net *net = NULL;
+ unsigned long flags;
+ int rc;
+ int i;
+ bool newdev;
- LIBCFS_ALLOC(net, sizeof(*net));
- ni->ni_data = net;
- if (net == NULL)
- goto failed;
+ LASSERT(ni->ni_net->net_lnd == &the_o2iblnd);
- memset(net, 0, sizeof(*net));
+ if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
+ rc = kiblnd_base_startup(ni->ni_net_ns);
+ if (rc != 0)
+ return rc;
+ }
- do_gettimeofday(&tv);
- net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ LIBCFS_ALLOC(net, sizeof(*net));
+ ni->ni_data = net;
+ if (net == NULL) {
+ rc = -ENOMEM;
+ goto failed;
+ }
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ net->ibn_ni = ni;
+ net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC;
+
+ kiblnd_tunables_setup(ni);
+
+ /*
+ * ni_interfaces is only to support legacy pre Multi-Rail
+ * tcp bonding for ksocklnd. Multi-Rail wants each secondary
+ * IP to be treated as an unique 'struct ni' interfaces instead.
+ */
+ if (ni->ni_interfaces[0] != NULL) {
+ /* Use the IPoIB interface specified in 'networks=' */
+ if (ni->ni_interfaces[1] != NULL) {
+ CERROR("ko2iblnd: Multiple interfaces not supported\n");
+ rc = -EINVAL;
+ goto failed;
+ }
- if (ni->ni_interfaces[0] != NULL) {
- /* Use the IPoIB interface specified in 'networks=' */
+ ifname = ni->ni_interfaces[0];
+ } else {
+ ifname = *kiblnd_tunables.kib_default_ipif;
+ }
- CLASSERT (LNET_MAX_INTERFACES > 1);
- if (ni->ni_interfaces[1] != NULL) {
- CERROR("Multiple interfaces not supported\n");
- goto failed;
- }
+ if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
+ CERROR("IPoIB interface name too long: %s\n", ifname);
+ rc = -E2BIG;
+ goto failed;
+ }
- ifname = ni->ni_interfaces[0];
- } else {
- ifname = *kiblnd_tunables.kib_default_ipif;
- }
+ rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
+ if (rc < 0)
+ goto failed;
- if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
- CERROR("IPoIB interface name too long: %s\n", ifname);
- goto failed;
- }
+ for (i = 0; i < rc; i++) {
+ if (strcmp(ifname, ifaces[i].li_name) == 0)
+ break;
+ }
- ibdev = kiblnd_dev_search(ifname);
+ if (i == rc) {
+ CERROR("ko2iblnd: No matching interfaces\n");
+ rc = -ENOENT;
+ goto failed;
+ }
+ ibdev = kiblnd_dev_search(ifname);
newdev = ibdev == NULL;
/* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
+ if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0) {
+ LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
+ if (!ibdev) {
+ rc = -ENOMEM;
+ goto failed;
+ }
- if (ibdev == NULL)
- goto failed;
+ ibdev->ibd_ifip = ifaces[i].li_ipaddr;
+ strlcpy(ibdev->ibd_ifname, ifaces[i].li_name,
+ sizeof(ibdev->ibd_ifname));
+ ibdev->ibd_can_failover = !!(ifaces[i].li_flags & IFF_MASTER);
+
+ INIT_LIST_HEAD(&ibdev->ibd_nets);
+ INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&ibdev->ibd_fail_list);
+
+ /* initialize the device */
+ rc = kiblnd_dev_failover(ibdev, ni->ni_net_ns);
+ if (rc) {
+ CERROR("ko2iblnd: Can't initialize device: rc = %d\n",
+ rc);
+ goto failed;
+ }
+
+ list_add_tail(&ibdev->ibd_list, &kiblnd_data.kib_devs);
+ }
- net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
+ net->ibn_dev = ibdev;
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
- rc = kiblnd_dev_start_threads(ibdev, newdev,
- ni->ni_cpts, ni->ni_ncpts);
+ ni->ni_dev_cpt = ifaces[i].li_cpt;
+
+ rc = kiblnd_dev_start_threads(ibdev, newdev, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0) {
- CERROR("Failed to initialize NI pools: %d\n", rc);
- goto failed;
- }
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
+ if (rc != 0) {
+ CERROR("Failed to initialize NI pools: %d\n", rc);
+ goto failed;
+ }
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
+ /* for health check */
+ if (ibdev->ibd_hdev->ibh_state == IBLND_DEV_PORT_DOWN)
+ kiblnd_set_ni_fatal_on(ibdev->ibd_hdev, 1);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- net->ibn_init = IBLND_INIT_ALL;
+ net->ibn_init = IBLND_INIT_ALL;
- return 0;
+ return 0;
failed:
if (net != NULL && net->ibn_dev == NULL && ibdev != NULL)
- kiblnd_destroy_dev(ibdev);
+ kiblnd_destroy_dev(ibdev);
- kiblnd_shutdown(ni);
+ kfree(ifaces);
+ kiblnd_shutdown(ni);
- CDEBUG(D_NET, "kiblnd_startup failed\n");
- return -ENETDOWN;
+ CDEBUG(D_NET, "Configuration of device %s failed: rc = %d\n",
+ ifname ? ifname : "", rc);
+
+ return -ENETDOWN;
}
-static lnd_t the_o2iblnd = {
+static const struct lnet_lnd the_o2iblnd = {
.lnd_type = O2IBLND,
.lnd_startup = kiblnd_startup,
.lnd_shutdown = kiblnd_shutdown,
.lnd_ctl = kiblnd_ctl,
- .lnd_query = kiblnd_query,
.lnd_send = kiblnd_send,
.lnd_recv = kiblnd_recv,
};
static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
static int __init ko2iblnd_init(void)
{
int rc;
- CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
- ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
- IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
- ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
- <= IBLND_MSG_SIZE);
+ BUILD_BUG_ON(sizeof(struct kib_msg) > IBLND_MSG_SIZE);
+ BUILD_BUG_ON(offsetof(struct kib_msg,
+ ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) >
+ IBLND_MSG_SIZE);
+ BUILD_BUG_ON(offsetof(struct kib_msg,
+ ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) >
+ IBLND_MSG_SIZE);
rc = kiblnd_tunables_init();
if (rc != 0)