*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2015, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <asm/page.h>
#include "o2iblnd.h"
-static lnd_t the_o2iblnd;
+static struct lnet_lnd the_o2iblnd;
kib_data_t kiblnd_data;
}
void
-kiblnd_pack_msg (lnet_ni_t *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp)
+kiblnd_pack_msg(struct lnet_ni *ni, kib_msg_t *msg, int version,
+ int credits, lnet_nid_t dstnid, __u64 dststamp)
{
kib_net_t *net = ni->ni_data;
msg->ibm_cksum = msg_cksum;
if (flip) {
- /* leave magic unflipped as a clue to peer endianness */
+ /* leave magic unflipped as a clue to peer_ni endianness */
msg->ibm_version = version;
CLASSERT (sizeof(msg->ibm_type) == 1);
CLASSERT (sizeof(msg->ibm_credits) == 1);
}
int
-kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
+kiblnd_create_peer(struct lnet_ni *ni, kib_peer_ni_t **peerp, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
kib_net_t *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid);
+ int cpt = lnet_cpt_of_nid(nid, ni);
unsigned long flags;
LASSERT(net != NULL);
LASSERT(nid != LNET_NID_ANY);
- LIBCFS_CPT_ALLOC(peer, lnet_cpt_table(), cpt, sizeof(*peer));
- if (peer == NULL) {
- CERROR("Cannot allocate peer\n");
+ LIBCFS_CPT_ALLOC(peer_ni, lnet_cpt_table(), cpt, sizeof(*peer_ni));
+ if (peer_ni == NULL) {
+ CERROR("Cannot allocate peer_ni\n");
return -ENOMEM;
}
- peer->ibp_ni = ni;
- peer->ibp_nid = nid;
- peer->ibp_error = 0;
- peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
- atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
+ peer_ni->ibp_ni = ni;
+ peer_ni->ibp_nid = nid;
+ peer_ni->ibp_error = 0;
+ peer_ni->ibp_last_alive = 0;
+ peer_ni->ibp_max_frags = kiblnd_cfg_rdma_frags(peer_ni->ibp_ni);
+ peer_ni->ibp_queue_depth = ni->ni_net->net_tunables.lct_peer_tx_credits;
+ atomic_set(&peer_ni->ibp_refcount, 1); /* 1 ref for caller */
- INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
- INIT_LIST_HEAD(&peer->ibp_conns);
- INIT_LIST_HEAD(&peer->ibp_tx_queue);
+ INIT_LIST_HEAD(&peer_ni->ibp_list); /* not in the peer_ni table yet */
+ INIT_LIST_HEAD(&peer_ni->ibp_conns);
+ INIT_LIST_HEAD(&peer_ni->ibp_tx_queue);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- *peerp = peer;
+ *peerp = peer_ni;
return 0;
}
void
-kiblnd_destroy_peer (kib_peer_t *peer)
+kiblnd_destroy_peer (kib_peer_ni_t *peer_ni)
{
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
LASSERT(net != NULL);
- LASSERT (atomic_read(&peer->ibp_refcount) == 0);
- LASSERT(!kiblnd_peer_active(peer));
- LASSERT(kiblnd_peer_idle(peer));
- LASSERT(list_empty(&peer->ibp_tx_queue));
+ LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
+ LASSERT(!kiblnd_peer_active(peer_ni));
+ LASSERT(kiblnd_peer_idle(peer_ni));
+ LASSERT(list_empty(&peer_ni->ibp_tx_queue));
- LIBCFS_FREE(peer, sizeof(*peer));
+ LIBCFS_FREE(peer_ni, sizeof(*peer_ni));
- /* NB a peer's connections keep a reference on their peer until
+ /* NB a peer_ni's connections keep a reference on their peer_ni until
* they are destroyed, so we can be assured that _all_ state to do
- * with this peer has been cleaned up when its refcount drops to
+ * with this peer_ni has been cleaned up when its refcount drops to
* zero. */
atomic_dec(&net->ibn_npeers);
}
-kib_peer_t *
-kiblnd_find_peer_locked (lnet_nid_t nid)
+kib_peer_ni_t *
+kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
{
/* the caller is responsible for accounting the additional reference
* that this creates */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
list_for_each(tmp, peer_list) {
- peer = list_entry(tmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
-
- if (peer->ibp_nid != nid)
+ peer_ni = list_entry(tmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
+
+ /*
+ * Match a peer if its NID and the NID of the local NI it
+ * communicates over are the same. Otherwise don't match
+ * the peer, which will result in a new lnd peer being
+ * created.
+ */
+ if (peer_ni->ibp_nid != nid ||
+ peer_ni->ibp_ni->ni_nid != ni->ni_nid)
continue;
- CDEBUG(D_NET, "got peer [%p] -> %s (%d) version: %x\n",
- peer, libcfs_nid2str(nid),
- atomic_read(&peer->ibp_refcount),
- peer->ibp_version);
- return peer;
+ CDEBUG(D_NET, "got peer_ni [%p] -> %s (%d) version: %x\n",
+ peer_ni, libcfs_nid2str(nid),
+ atomic_read(&peer_ni->ibp_refcount),
+ peer_ni->ibp_version);
+ return peer_ni;
}
return NULL;
}
void
-kiblnd_unlink_peer_locked (kib_peer_t *peer)
+kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni)
{
- LASSERT(list_empty(&peer->ibp_conns));
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- LASSERT (kiblnd_peer_active(peer));
- list_del_init(&peer->ibp_list);
+ LASSERT (kiblnd_peer_active(peer_ni));
+ list_del_init(&peer_ni->ibp_list);
/* lose peerlist's ref */
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
}
static int
-kiblnd_get_peer_info(lnet_ni_t *ni, int index,
+kiblnd_get_peer_info(struct lnet_ni *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
int i;
unsigned long flags;
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
if (index-- > 0)
continue;
- *nidp = peer->ibp_nid;
- *count = atomic_read(&peer->ibp_refcount);
+ *nidp = peer_ni->ibp_nid;
+ *count = atomic_read(&peer_ni->ibp_refcount);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
}
static void
-kiblnd_del_peer_locked (kib_peer_t *peer)
+kiblnd_del_peer_locked (kib_peer_ni_t *peer_ni)
{
struct list_head *ctmp;
struct list_head *cnxt;
kib_conn_t *conn;
- if (list_empty(&peer->ibp_conns)) {
- kiblnd_unlink_peer_locked(peer);
+ if (list_empty(&peer_ni->ibp_conns)) {
+ kiblnd_unlink_peer_locked(peer_ni);
} else {
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
- /* NB closing peer's last conn unlinked it. */
+ /* NB closing peer_ni's last conn unlinked it. */
}
- /* NB peer now unlinked; might even be freed if the peer table had the
+ /* NB peer_ni now unlinked; might even be freed if the peer_ni table had the
* last ref on it. */
}
static int
-kiblnd_del_peer (lnet_ni_t *ni, lnet_nid_t nid)
+kiblnd_del_peer(struct lnet_ni *ni, lnet_nid_t nid)
{
struct list_head zombies = LIST_HEAD_INIT(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
int lo;
int hi;
int i;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || peer->ibp_nid == nid))
+ if (!(nid == LNET_NID_ANY || peer_ni->ibp_nid == nid))
continue;
- if (!list_empty(&peer->ibp_tx_queue)) {
- LASSERT(list_empty(&peer->ibp_conns));
+ if (!list_empty(&peer_ni->ibp_tx_queue)) {
+ LASSERT(list_empty(&peer_ni->ibp_conns));
- list_splice_init(&peer->ibp_tx_queue,
+ list_splice_init(&peer_ni->ibp_tx_queue,
&zombies);
}
- kiblnd_del_peer_locked(peer);
+ kiblnd_del_peer_locked(peer_ni);
rc = 0; /* matched something */
}
}
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(ni, &zombies, -EIO);
+ kiblnd_txlist_done(&zombies, -EIO);
return rc;
}
static kib_conn_t *
-kiblnd_get_conn_by_idx(lnet_ni_t *ni, int index)
+kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
kib_conn_t *conn;
struct list_head *ctmp;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- list_for_each(ctmp, &peer->ibp_conns) {
+ list_for_each(ctmp, &peer_ni->ibp_conns) {
if (index-- > 0)
continue;
kiblnd_debug_tx (kib_tx_t *tx)
{
CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lx "
- "cookie "LPX64" msg %s%s type %x cred %d\n",
+ "cookie %#llx msg %s%s type %x cred %d\n",
tx, tx->tx_sending, tx->tx_queued, tx->tx_waiting,
tx->tx_status, tx->tx_deadline, tx->tx_cookie,
tx->tx_lntmsg[0] == NULL ? "-" : "!",
return 1;
}
+/*
+ * Get the scheduler bound to this CPT. If the scheduler has no
+ * threads, which means that the CPT has no CPUs, then grab the
+ * next scheduler that we can use.
+ *
+ * This case would be triggered if a NUMA node is configured with
+ * no associated CPUs.
+ */
+static struct kib_sched_info *
+kiblnd_get_scheduler(int cpt)
+{
+ struct kib_sched_info *sched;
+ int i;
+
+ sched = kiblnd_data.kib_scheds[cpt];
+
+ if (sched->ibs_nthreads > 0)
+ return sched;
+
+ cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) {
+ if (sched->ibs_nthreads > 0) {
+ CDEBUG(D_NET, "scheduler[%d] has no threads. selected scheduler[%d]\n",
+ cpt, sched->ibs_cpt);
+ return sched;
+ }
+ }
+
+ return NULL;
+}
+
kib_conn_t *
-kiblnd_create_conn(kib_peer_t *peer, struct rdma_cm_id *cmid,
+kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
{
/* CAVEAT EMPTOR:
* If the new conn is created successfully it takes over the caller's
- * ref on 'peer'. It also "owns" 'cmid' and destroys it when it itself
- * is destroyed. On failure, the caller's ref on 'peer' remains and
+ * ref on 'peer_ni'. It also "owns" 'cmid' and destroys it when it itself
+ * is destroyed. On failure, the caller's ref on 'peer_ni' remains and
* she must dispose of 'cmid'. (Actually I'd block forever if I tried
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
kib_dev_t *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
dev = net->ibn_dev;
- cpt = lnet_cpt_of_nid(peer->ibp_nid);
- sched = kiblnd_data.kib_scheds[cpt];
+ cpt = lnet_cpt_of_nid(peer_ni->ibp_nid, peer_ni->ibp_ni);
+ sched = kiblnd_get_scheduler(cpt);
+
+ if (sched == NULL) {
+ CERROR("no schedulers available. node is unhealthy\n");
+ goto failed_0;
+ }
- LASSERT(sched->ibs_nthreads > 0);
+ /*
+ * The cpt might have changed if we ended up selecting a non cpt
+ * native scheduler. So use the scheduler's cpt instead.
+ */
+ cpt = sched->ibs_cpt;
LIBCFS_CPT_ALLOC(init_qp_attr, lnet_cpt_table(), cpt,
sizeof(*init_qp_attr));
if (init_qp_attr == NULL) {
CERROR("Can't allocate qp_attr for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_0;
}
LIBCFS_CPT_ALLOC(conn, lnet_cpt_table(), cpt, sizeof(*conn));
if (conn == NULL) {
CERROR("Can't allocate connection for %s\n",
- libcfs_nid2str(peer->ibp_nid));
+ libcfs_nid2str(peer_ni->ibp_nid));
goto failed_1;
}
conn->ibc_state = IBLND_CONN_INIT;
conn->ibc_version = version;
- conn->ibc_peer = peer; /* I take the caller's ref */
+ conn->ibc_peer = peer_ni; /* I take the caller's ref */
cmid->context = conn; /* for future CM callbacks */
conn->ibc_cmid = cmid;
- conn->ibc_max_frags = peer->ibp_max_frags;
- conn->ibc_queue_depth = peer->ibp_queue_depth;
+ conn->ibc_max_frags = peer_ni->ibp_max_frags;
+ conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
goto failed_2;
}
- init_qp_attr->event_handler = kiblnd_qp_event;
- init_qp_attr->qp_context = conn;
+ init_qp_attr->event_handler = kiblnd_qp_event;
+ init_qp_attr->qp_context = conn;
init_qp_attr->cap.max_send_wr = IBLND_SEND_WRS(conn);
init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
- init_qp_attr->cap.max_send_sge = 1;
- init_qp_attr->cap.max_recv_sge = 1;
- init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
- init_qp_attr->qp_type = IB_QPT_RC;
- init_qp_attr->send_cq = cq;
- init_qp_attr->recv_cq = cq;
+ init_qp_attr->cap.max_send_sge = *kiblnd_tunables.kib_wrq_sge;
+ init_qp_attr->cap.max_recv_sge = 1;
+ init_qp_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_qp_attr->qp_type = IB_QPT_RC;
+ init_qp_attr->send_cq = cq;
+ init_qp_attr->recv_cq = cq;
conn->ibc_sched = sched;
} while (rc);
if (rc) {
- CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d\n",
- rc, init_qp_attr->cap.max_send_wr,
- init_qp_attr->cap.max_recv_wr);
+ CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
+ "send_sge: %d, recv_sge: %d\n",
+ rc, init_qp_attr->cap.max_send_wr,
+ init_qp_attr->cap.max_recv_wr,
+ init_qp_attr->cap.max_send_sge,
+ init_qp_attr->cap.max_recv_sge);
goto failed_2;
}
kiblnd_destroy_conn(kib_conn_t *conn, bool free_conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_t *peer = conn->ibc_peer;
+ kib_peer_ni_t *peer_ni = conn->ibc_peer;
int rc;
LASSERT (!in_interrupt());
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer->ibp_ni->ni_data;
+ kib_net_t *net = peer_ni->ibp_ni->ni_data;
- kiblnd_peer_decref(peer);
+ kiblnd_peer_decref(peer_ni);
rdma_destroy_id(cmid);
atomic_dec(&net->ibn_nconns);
}
}
int
-kiblnd_close_peer_conns_locked(kib_peer_t *peer, int why)
+kiblnd_close_peer_conns_locked(kib_peer_ni_t *peer_ni, int why)
{
kib_conn_t *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, "
"version: %x, reason: %d\n",
- libcfs_nid2str(peer->ibp_nid),
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, why);
kiblnd_close_conn_locked(conn, why);
}
int
-kiblnd_close_stale_conns_locked(kib_peer_t *peer,
+kiblnd_close_stale_conns_locked(kib_peer_ni_t *peer_ni,
int version, __u64 incarnation)
{
kib_conn_t *conn;
struct list_head *cnxt;
int count = 0;
- list_for_each_safe(ctmp, cnxt, &peer->ibp_conns) {
+ list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
conn = list_entry(ctmp, kib_conn_t, ibc_list);
if (conn->ibc_version == version &&
continue;
CDEBUG(D_NET, "Closing stale conn -> %s version: %x, "
- "incarnation:"LPX64"(%x, "LPX64")\n",
- libcfs_nid2str(peer->ibp_nid),
+ "incarnation:%#llx(%x, %#llx)\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
conn->ibc_version, conn->ibc_incarnation,
version, incarnation);
}
static int
-kiblnd_close_matching_conns(lnet_ni_t *ni, lnet_nid_t nid)
+kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
{
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer = list_entry(ptmp, kib_peer_t, ibp_list);
- LASSERT(!kiblnd_peer_idle(peer));
+ peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ LASSERT(!kiblnd_peer_idle(peer_ni));
- if (peer->ibp_ni != ni)
+ if (peer_ni->ibp_ni != ni)
continue;
- if (!(nid == LNET_NID_ANY || nid == peer->ibp_nid))
+ if (!(nid == LNET_NID_ANY || nid == peer_ni->ibp_nid))
continue;
- count += kiblnd_close_peer_conns_locked(peer, 0);
+ count += kiblnd_close_peer_conns_locked(peer_ni, 0);
}
}
}
static int
-kiblnd_ctl(lnet_ni_t *ni, unsigned int cmd, void *arg)
+kiblnd_ctl(struct lnet_ni *ni, unsigned int cmd, void *arg)
{
struct libcfs_ioctl_data *data = arg;
int rc = -EINVAL;
break;
}
- LASSERT (conn->ibc_cmid != NULL);
- data->ioc_nid = conn->ibc_peer->ibp_nid;
- if (conn->ibc_cmid->route.path_rec == NULL)
- data->ioc_u32[0] = 0; /* iWarp has no path MTU */
- else
- data->ioc_u32[0] =
- ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
- kiblnd_conn_decref(conn);
- break;
+ LASSERT(conn->ibc_cmid != NULL);
+ data->ioc_nid = conn->ibc_peer->ibp_nid;
+ if (conn->ibc_cmid->route.path_rec == NULL)
+ data->ioc_u32[0] = 0; /* iWarp has no path MTU */
+ else
+ data->ioc_u32[0] =
+ ib_mtu_enum_to_int(conn->ibc_cmid->route.path_rec->mtu);
+ kiblnd_conn_decref(conn);
+ break;
}
case IOC_LIBCFS_CLOSE_CONNECTION: {
rc = kiblnd_close_matching_conns(ni, data->ioc_nid);
}
static void
-kiblnd_query(lnet_ni_t *ni, lnet_nid_t nid, cfs_time_t *when)
+kiblnd_query(struct lnet_ni *ni, lnet_nid_t nid, cfs_time_t *when)
{
cfs_time_t last_alive = 0;
cfs_time_t now = cfs_time_current();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_t *peer;
+ kib_peer_ni_t *peer_ni;
unsigned long flags;
read_lock_irqsave(glock, flags);
- peer = kiblnd_find_peer_locked(nid);
- if (peer != NULL)
- last_alive = peer->ibp_last_alive;
+ peer_ni = kiblnd_find_peer_locked(ni, nid);
+ if (peer_ni != NULL)
+ last_alive = peer_ni->ibp_last_alive;
read_unlock_irqrestore(glock, flags);
if (last_alive != 0)
*when = last_alive;
- /* peer is not persistent in hash, trigger peer creation
+ /* peer_ni is not persistent in hash, trigger peer_ni creation
* and connection establishment with a NULL tx */
- if (peer == NULL)
+ if (peer_ni == NULL)
kiblnd_launch_tx(ni, NULL, nid);
- CDEBUG(D_NET, "Peer %s %p, alive %ld secs ago\n",
- libcfs_nid2str(nid), peer,
+ CDEBUG(D_NET, "peer_ni %s %p, alive %ld secs ago\n",
+ libcfs_nid2str(nid), peer_ni,
last_alive ? cfs_duration_sec(now - last_alive) : -1);
return;
}
rx->rx_msgaddr));
KIBLND_UNMAP_ADDR_SET(rx, rx_msgunmap, rx->rx_msgaddr);
- CDEBUG(D_NET, "rx %d: %p "LPX64"("LPX64")\n",
+ CDEBUG(D_NET, "rx %d: %p %#llx(%#llx)\n",
i, rx->rx_msg, rx->rx_msgaddr,
(__u64)(page_to_phys(pg) + pg_off));
}
}
+#ifdef HAVE_IB_GET_DMA_MR
struct ib_mr *
-kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags)
{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+ __u16 nfrags;
+
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
LASSERT(hdev->ibh_mrs != NULL);
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
return NULL;
return hdev->ibh_mrs;
}
+#endif
static void
kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
ib_dereg_mr(frd->frd_mr);
LIBCFS_FREE(frd, sizeof(*frd));
i++;
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
int rc = 0;
fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
}
frd->frd_mr = NULL;
+#ifndef HAVE_IB_MAP_MR_SG
frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
LNET_MAX_PAYLOAD/PAGE_SIZE);
if (IS_ERR(frd->frd_frpl)) {
rc = PTR_ERR(frd->frd_frpl);
CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
rc);
+ frd->frd_frpl = NULL;
goto out_middle;
}
+#endif
+#ifdef HAVE_IB_ALLOC_FAST_REG_MR
frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
LNET_MAX_PAYLOAD/PAGE_SIZE);
+#else
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD/PAGE_SIZE);
+#endif
if (IS_ERR(frd->frd_mr)) {
rc = PTR_ERR(frd->frd_mr);
CERROR("Failed to allocate ib_fast_reg_mr: %d\n", rc);
+ frd->frd_mr = NULL;
goto out_middle;
}
- frd->frd_valid = true;
+ /* There appears to be a bug in MLX5 code where you must
+ * invalidate the rkey of a new FastReg pool before first
+ * using it. Thus, I am marking the FRD invalid here. */
+ frd->frd_valid = false;
list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size++;
out_middle:
if (frd->frd_mr)
ib_dereg_mr(frd->frd_mr);
+#ifndef HAVE_IB_MAP_MR_SG
if (frd->frd_frpl)
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
LIBCFS_FREE(frd, sizeof(*frd));
out:
list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
frd_list) {
list_del(&frd->frd_list);
+#ifndef HAVE_IB_MAP_MR_SG
ib_free_fast_reg_page_list(frd->frd_frpl);
+#endif
ib_dereg_mr(frd->frd_mr);
LIBCFS_FREE(frd, sizeof(*frd));
}
kib_fmr_pool_t *fpo;
int rc;
+#ifndef HAVE_IB_DEVICE_ATTRS
dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
if (!dev_attr)
return -ENOMEM;
+#endif
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
if (!fpo) {
fpo->fpo_hdev = kiblnd_current_hdev(dev);
+#ifdef HAVE_IB_DEVICE_ATTRS
+ dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
+#else
rc = ib_query_device(fpo->fpo_hdev->ibh_ibdev, dev_attr);
if (rc) {
CERROR("Query device failed for %s: %d\n",
fpo->fpo_hdev->ibh_ibdev->name, rc);
goto out_dev_attr;
}
+#endif
/* Check for FMR or FastReg support */
fpo->fpo_is_fmr = 0;
if (rc)
goto out_fpo;
+#ifndef HAVE_IB_DEVICE_ATTRS
kfree(dev_attr);
+#endif
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_owner = fps;
*pp_fpo = fpo;
LIBCFS_FREE(fpo, sizeof(*fpo));
out_dev_attr:
+#ifndef HAVE_IB_DEVICE_ATTRS
kfree(dev_attr);
+#endif
return rc;
}
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, kib_net_t *net,
- int pool_size, int flush_trigger)
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
kib_fmr_pool_t *fpo;
int rc;
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ kib_hca_dev_t *hdev;
+ __u64 *pages = tx->tx_pages;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void
kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
}
int
-kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u32 nob, __u64 iov, bool is_rx, kib_fmr_t *fmr)
+kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd,
+ __u32 nob, __u64 iov, kib_fmr_t *fmr)
{
kib_fmr_pool_t *fpo;
+ __u64 *pages = tx->tx_pages;
__u64 version;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
+ int npages = 0;
int rc;
again:
struct ib_pool_fmr *pfmr;
spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
pages, npages, iov);
if (likely(!IS_ERR(pfmr))) {
rc = PTR_ERR(pfmr);
} else {
if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
- struct ib_send_wr *wr;
struct kib_fast_reg_descriptor *frd;
+#ifdef HAVE_IB_MAP_MR_SG
+ struct ib_reg_wr *wr;
+ int n;
+#else
+ struct ib_rdma_wr *wr;
struct ib_fast_reg_page_list *frpl;
+#endif
struct ib_mr *mr;
frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
list_del(&frd->frd_list);
spin_unlock(&fps->fps_lock);
+#ifndef HAVE_IB_MAP_MR_SG
frpl = frd->frd_frpl;
+#endif
mr = frd->frd_mr;
if (!frd->frd_valid) {
- struct ib_send_wr *inv_wr;
+ struct ib_rdma_wr *inv_wr;
__u32 key = is_rx ? mr->rkey : mr->lkey;
inv_wr = &frd->frd_inv_wr;
memset(inv_wr, 0, sizeof(*inv_wr));
- inv_wr->opcode = IB_WR_LOCAL_INV;
- inv_wr->wr_id = IBLND_WID_MR;
- inv_wr->ex.invalidate_rkey = key;
+
+ inv_wr->wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr.wr_id = IBLND_WID_MR;
+ inv_wr->wr.ex.invalidate_rkey = key;
/* Bump the key */
key = ib_inc_rkey(key);
ib_update_fast_reg_key(mr, key);
}
+#ifdef HAVE_IB_MAP_MR_SG
+#ifdef HAVE_IB_MAP_MR_SG_5ARGS
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+#else
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, PAGE_SIZE);
+#endif
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d "
+ "elements\n", n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+#else
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
LASSERT(npages <= frpl->max_page_list_len);
memcpy(frpl->page_list, pages,
sizeof(*pages) * npages);
/* Prepare FastReg WR */
wr = &frd->frd_fastreg_wr;
memset(wr, 0, sizeof(*wr));
- wr->opcode = IB_WR_FAST_REG_MR;
- wr->wr_id = IBLND_WID_MR;
- wr->wr.fast_reg.iova_start = iov;
- wr->wr.fast_reg.page_list = frpl;
- wr->wr.fast_reg.page_list_len = npages;
- wr->wr.fast_reg.page_shift = PAGE_SHIFT;
- wr->wr.fast_reg.length = nob;
- wr->wr.fast_reg.rkey = is_rx ? mr->rkey
- : mr->lkey;
- wr->wr.fast_reg.access_flags =
+
+ wr->wr.opcode = IB_WR_FAST_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+
+ wr->wr.wr.fast_reg.iova_start = iov;
+ wr->wr.wr.fast_reg.page_list = frpl;
+ wr->wr.wr.fast_reg.page_list_len = npages;
+ wr->wr.wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.wr.fast_reg.length = nob;
+ wr->wr.wr.fast_reg.rkey =
+ is_rx ? mr->rkey : mr->lkey;
+ wr->wr.wr.fast_reg.access_flags =
(IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
+#endif
fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
fmr->fmr_frd = frd;
return 0;
}
spin_unlock(&fps->fps_lock);
- rc = -EBUSY;
+ rc = -EAGAIN;
}
spin_lock(&fps->fps_lock);
goto out;
for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
list_del(&tx->tx_list);
if (tx->tx_pages != NULL)
sizeof(*tx->tx_pages));
if (tx->tx_frags != NULL)
LIBCFS_FREE(tx->tx_frags,
- IBLND_MAX_RDMA_FRAGS *
- sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_wrq != NULL)
LIBCFS_FREE(tx->tx_wrq,
(1 + IBLND_MAX_RDMA_FRAGS) *
sizeof(*tx->tx_wrq));
- if (tx->tx_sge != NULL)
- LIBCFS_FREE(tx->tx_sge,
- (1 + IBLND_MAX_RDMA_FRAGS) *
- sizeof(*tx->tx_sge));
+ if (tx->tx_sge != NULL)
+ LIBCFS_FREE(tx->tx_sge,
+ (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
+ sizeof(*tx->tx_sge));
if (tx->tx_rd != NULL)
LIBCFS_FREE(tx->tx_rd,
offsetof(kib_rdma_desc_t,
memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
tx->tx_pool = tpo;
if (ps->ps_net->ibn_fmr_ps != NULL) {
}
LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt,
- IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags));
+ (1 + IBLND_MAX_RDMA_FRAGS) *
+ sizeof(*tx->tx_frags));
if (tx->tx_frags == NULL)
break;
- sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS);
+ sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS + 1);
LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt,
(1 + IBLND_MAX_RDMA_FRAGS) *
break;
LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt,
- (1 + IBLND_MAX_RDMA_FRAGS) *
+ (1 + IBLND_MAX_RDMA_FRAGS) * wrq_sge *
sizeof(*tx->tx_sge));
if (tx->tx_sge == NULL)
break;
}
static int
-kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+kiblnd_net_init_pools(kib_net_t *net, struct lnet_ni *ni, __u32 *cpts,
+ int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+#ifdef HAVE_IB_GET_DMA_MR
unsigned long flags;
+#endif
int cpt;
int rc;
int i;
+ tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
+
+#ifdef HAVE_IB_GET_DMA_MR
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_map_on_demand == 0) {
+ if (tunables->lnd_map_on_demand == 0) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
goto create_tx_pool;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+#endif
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
for (i = 0; i < ncpts; i++) {
cpt = (cpts == NULL) ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc != 0) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
if (i > 0)
LASSERT(i == ncpts);
+#ifdef HAVE_IB_GET_DMA_MR
create_tx_pool:
+#endif
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
if (net->ibn_tx_ps == NULL) {
static int
kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
{
- struct ib_device_attr *attr;
- int rc;
+#ifndef HAVE_IB_DEVICE_ATTRS
+ struct ib_device_attr *attr;
+ int rc;
+#endif
/* It's safe to assume a HCA can handle a page size
* matching that of the native system */
hdev->ibh_page_size = 1 << PAGE_SHIFT;
hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+#ifdef HAVE_IB_DEVICE_ATTRS
+ hdev->ibh_mr_size = hdev->ibh_ibdev->attrs.max_mr_size;
+#else
LIBCFS_ALLOC(attr, sizeof(*attr));
if (attr == NULL) {
CERROR("Out of memory\n");
CERROR("Failed to query IB device: %d\n", rc);
return rc;
}
+#endif
if (hdev->ibh_mr_size == ~0ULL) {
hdev->ibh_mr_shift = 64;
return 0;
}
- CERROR("Invalid mr size: "LPX64"\n", hdev->ibh_mr_size);
+ CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
return -EINVAL;
}
+#ifdef HAVE_IB_GET_DMA_MR
static void
kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
{
hdev->ibh_mrs = NULL;
}
+#endif
void
kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
{
+#ifdef HAVE_IB_GET_DMA_MR
kiblnd_hdev_cleanup_mrs(hdev);
+#endif
if (hdev->ibh_pd != NULL)
ib_dealloc_pd(hdev->ibh_pd);
LIBCFS_FREE(hdev, sizeof(*hdev));
}
+#ifdef HAVE_IB_GET_DMA_MR
static int
kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
{
return 0;
}
+#endif
static int
kiblnd_dummy_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
hdev->ibh_cmid = cmid;
hdev->ibh_ibdev = cmid->device;
- pd = ib_alloc_pd(cmid->device);
- if (IS_ERR(pd)) {
- rc = PTR_ERR(pd);
- CERROR("Can't allocate PD: %d\n", rc);
- goto out;
- }
+#ifdef HAVE_IB_ALLOC_PD_2ARGS
+ pd = ib_alloc_pd(cmid->device, 0);
+#else
+ pd = ib_alloc_pd(cmid->device);
+#endif
+ if (IS_ERR(pd)) {
+ rc = PTR_ERR(pd);
+ CERROR("Can't allocate PD: %d\n", rc);
+ goto out;
+ }
hdev->ibh_pd = pd;
goto out;
}
- rc = kiblnd_hdev_setup_mrs(hdev);
- if (rc != 0) {
- CERROR("Can't setup device: %d\n", rc);
- goto out;
- }
+#ifdef HAVE_IB_GET_DMA_MR
+ rc = kiblnd_hdev_setup_mrs(hdev);
+ if (rc != 0) {
+ CERROR("Can't setup device: %d\n", rc);
+ goto out;
+ }
+#else
+ rc = kiblnd_hdev_get_attr(hdev);
+ if (rc != 0) {
+ CERROR("Can't get device attributes: %d\n", rc);
+ goto out;
+ }
+#endif
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (dev == NULL)
return NULL;
- memset(dev, 0, sizeof(*dev));
netdev = dev_get_by_name(&init_net, ifname);
if (netdev == NULL) {
dev->ibd_can_failover = 0;
}
static void
-kiblnd_shutdown (lnet_ni_t *ni)
+kiblnd_shutdown(struct lnet_ni *ni)
{
kib_net_t *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
/* nuke all existing peers within this net */
kiblnd_del_peer(ni, LNET_NID_ANY);
- /* Wait for all peer state to clean up */
+ /* Wait for all peer_ni state to clean up */
i = 2;
while (atomic_read(&net->ibn_npeers) != 0) {
i++;
}
static int
-kiblnd_startup (lnet_ni_t *ni)
+kiblnd_startup(struct lnet_ni *ni)
{
char *ifname;
kib_dev_t *ibdev = NULL;
kib_net_t *net;
- struct timeval tv;
unsigned long flags;
int rc;
int newdev;
+ int node_id;
- LASSERT (ni->ni_lnd == &the_o2iblnd);
+ LASSERT (ni->ni_net->net_lnd == &the_o2iblnd);
if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
rc = kiblnd_base_startup();
if (net == NULL)
goto failed;
- memset(net, 0, sizeof(*net));
+ net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC;
- do_gettimeofday(&tv);
- net->ibn_incarnation = (((__u64)tv.tv_sec) * 1000000) + tv.tv_usec;
+ kiblnd_tunables_setup(ni);
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ if (ni->ni_interfaces[0] != NULL) {
+ /* Use the IPoIB interface specified in 'networks=' */
- if (ni->ni_interfaces[0] != NULL) {
- /* Use the IPoIB interface specified in 'networks=' */
-
- CLASSERT (LNET_MAX_INTERFACES > 1);
- if (ni->ni_interfaces[1] != NULL) {
- CERROR("Multiple interfaces not supported\n");
- goto failed;
- }
+ CLASSERT(LNET_NUM_INTERFACES > 1);
+ if (ni->ni_interfaces[1] != NULL) {
+ CERROR("Multiple interfaces not supported\n");
+ goto failed;
+ }
- ifname = ni->ni_interfaces[0];
- } else {
- ifname = *kiblnd_tunables.kib_default_ipif;
- }
+ ifname = ni->ni_interfaces[0];
+ } else {
+ ifname = *kiblnd_tunables.kib_default_ipif;
+ }
if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
CERROR("IPoIB interface name too long: %s\n", ifname);
newdev = ibdev == NULL;
/* hmm...create kib_dev even for alias */
if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
+ ibdev = kiblnd_create_dev(ifname);
- if (ibdev == NULL)
- goto failed;
+ if (ibdev == NULL)
+ goto failed;
+
+ node_id = dev_to_node(ibdev->ibd_hdev->ibh_ibdev->dma_device);
+ ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
- net->ibn_dev = ibdev;
- ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
+ net->ibn_dev = ibdev;
+ ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
rc = kiblnd_dev_start_threads(ibdev, newdev,
ni->ni_cpts, ni->ni_ncpts);
if (rc != 0)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
return -ENETDOWN;
}
-static lnd_t the_o2iblnd = {
+static struct lnet_lnd the_o2iblnd = {
.lnd_type = O2IBLND,
.lnd_startup = kiblnd_startup,
.lnd_shutdown = kiblnd_shutdown,
static void __exit ko2iblnd_exit(void)
{
lnet_unregister_lnd(&the_o2iblnd);
- kiblnd_tunables_fini();
}
static int __init ko2iblnd_init(void)