*/
#include <asm/page.h>
+#include <linux/inetdevice.h>
+
#include "o2iblnd.h"
static struct lnet_lnd the_o2iblnd;
-kib_data_t kiblnd_data;
+struct kib_data kiblnd_data;
static __u32
kiblnd_cksum (void *ptr, int nob)
static int
kiblnd_msgtype2size(int type)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
switch (type) {
case IBLND_MSG_CONNREQ:
case IBLND_MSG_CONNACK:
- return hdr_size + sizeof(kib_connparams_t);
+ return hdr_size + sizeof(struct kib_connparams);
case IBLND_MSG_NOOP:
return hdr_size;
case IBLND_MSG_IMMEDIATE:
- return offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[0]);
+ return offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[0]);
case IBLND_MSG_PUT_REQ:
- return hdr_size + sizeof(kib_putreq_msg_t);
+ return hdr_size + sizeof(struct kib_putreq_msg);
case IBLND_MSG_PUT_ACK:
- return hdr_size + sizeof(kib_putack_msg_t);
+ return hdr_size + sizeof(struct kib_putack_msg);
case IBLND_MSG_GET_REQ:
- return hdr_size + sizeof(kib_get_msg_t);
+ return hdr_size + sizeof(struct kib_get_msg);
case IBLND_MSG_PUT_NAK:
case IBLND_MSG_PUT_DONE:
case IBLND_MSG_GET_DONE:
- return hdr_size + sizeof(kib_completion_msg_t);
+ return hdr_size + sizeof(struct kib_completion_msg);
default:
return -1;
}
}
-static int
-kiblnd_unpack_rd(kib_msg_t *msg, int flip)
+static int kiblnd_unpack_rd(struct kib_msg *msg, int flip)
{
- kib_rdma_desc_t *rd;
+ struct kib_rdma_desc *rd;
int nob;
int n;
int i;
return 1;
}
- nob = offsetof (kib_msg_t, ibm_u) +
+ nob = offsetof(struct kib_msg, ibm_u) +
kiblnd_rd_msg_size(rd, msg->ibm_type, n);
if (msg->ibm_nob < nob) {
return 0;
}
-void
-kiblnd_pack_msg(struct lnet_ni *ni, kib_msg_t *msg, int version,
- int credits, lnet_nid_t dstnid, __u64 dststamp)
+void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
+ int credits, lnet_nid_t dstnid, __u64 dststamp)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
/* CAVEAT EMPTOR! all message fields not set here should have been
* initialised previously. */
}
}
-int
-kiblnd_unpack_msg(kib_msg_t *msg, int nob)
+int kiblnd_unpack_msg(struct kib_msg *msg, int nob)
{
- const int hdr_size = offsetof(kib_msg_t, ibm_u);
+ const int hdr_size = offsetof(struct kib_msg, ibm_u);
__u32 msg_cksum;
__u16 version;
int msg_nob;
}
int
-kiblnd_create_peer(struct lnet_ni *ni, kib_peer_ni_t **peerp, lnet_nid_t nid)
+kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer_ni **peerp,
+ lnet_nid_t nid)
{
- kib_peer_ni_t *peer_ni;
- kib_net_t *net = ni->ni_data;
- int cpt = lnet_cpt_of_nid(nid, ni);
- unsigned long flags;
+ struct kib_peer_ni *peer_ni;
+ struct kib_net *net = ni->ni_data;
+ int cpt = lnet_cpt_of_nid(nid, ni);
+ unsigned long flags;
LASSERT(net != NULL);
LASSERT(nid != LNET_NID_ANY);
}
void
-kiblnd_destroy_peer (kib_peer_ni_t *peer_ni)
+kiblnd_destroy_peer(struct kib_peer_ni *peer_ni)
{
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
LASSERT(net != NULL);
LASSERT (atomic_read(&peer_ni->ibp_refcount) == 0);
atomic_dec(&net->ibn_npeers);
}
-kib_peer_ni_t *
+struct kib_peer_ni *
kiblnd_find_peer_locked(struct lnet_ni *ni, lnet_nid_t nid)
{
/* the caller is responsible for accounting the additional reference
* that this creates */
struct list_head *peer_list = kiblnd_nid2peerlist(nid);
struct list_head *tmp;
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
list_for_each(tmp, peer_list) {
- peer_ni = list_entry(tmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(tmp, struct kib_peer_ni, ibp_list);
LASSERT(!kiblnd_peer_idle(peer_ni));
/*
}
void
-kiblnd_unlink_peer_locked (kib_peer_ni_t *peer_ni)
+kiblnd_unlink_peer_locked(struct kib_peer_ni *peer_ni)
{
LASSERT(list_empty(&peer_ni->ibp_conns));
kiblnd_get_peer_info(struct lnet_ni *ni, int index,
lnet_nid_t *nidp, int *count)
{
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
int i;
unsigned long flags;
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
}
static void
-kiblnd_del_peer_locked (kib_peer_ni_t *peer_ni)
+kiblnd_del_peer_locked(struct kib_peer_ni *peer_ni)
{
- struct list_head *ctmp;
- struct list_head *cnxt;
- kib_conn_t *conn;
+ struct list_head *ctmp;
+ struct list_head *cnxt;
+ struct kib_conn *conn;
if (list_empty(&peer_ni->ibp_conns)) {
kiblnd_unlink_peer_locked(peer_ni);
} else {
list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_close_conn_locked(conn, 0);
}
struct list_head zombies = LIST_HEAD_INIT(zombies);
struct list_head *ptmp;
struct list_head *pnxt;
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
int lo;
int hi;
int i;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(&zombies, -EIO);
+ kiblnd_txlist_done(&zombies, -EIO, LNET_MSG_STATUS_LOCAL_ERROR);
return rc;
}
-static kib_conn_t *
+static struct kib_conn *
kiblnd_get_conn_by_idx(struct lnet_ni *ni, int index)
{
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
int i;
unsigned long flags;
for (i = 0; i < kiblnd_data.kib_peer_hash_size; i++) {
list_for_each(ptmp, &kiblnd_data.kib_peers[i]) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
if (index-- > 0)
continue;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
kiblnd_conn_addref(conn);
read_unlock_irqrestore(&kiblnd_data.kib_global_lock,
flags);
}
static void
-kiblnd_debug_rx (kib_rx_t *rx)
+kiblnd_debug_rx(struct kib_rx *rx)
{
- CDEBUG(D_CONSOLE, " %p status %d msg_type %x cred %d\n",
- rx, rx->rx_status, rx->rx_msg->ibm_type,
- rx->rx_msg->ibm_credits);
+ CDEBUG(D_CONSOLE, " %p msg_type %x cred %d\n",
+ rx, rx->rx_msg->ibm_type,
+ rx->rx_msg->ibm_credits);
}
static void
-kiblnd_debug_tx (kib_tx_t *tx)
+kiblnd_debug_tx(struct kib_tx *tx)
{
CDEBUG(D_CONSOLE, " %p snd %d q %d w %d rc %d dl %lld "
"cookie %#llx msg %s%s type %x cred %d\n",
}
void
-kiblnd_debug_conn (kib_conn_t *conn)
+kiblnd_debug_conn(struct kib_conn *conn)
{
struct list_head *tmp;
int i;
conn->ibc_outstanding_credits, conn->ibc_reserved_credits);
CDEBUG(D_CONSOLE, " comms_err %d\n", conn->ibc_comms_error);
+ CDEBUG(D_CONSOLE, " early_rxs:\n");
+ list_for_each(tmp, &conn->ibc_early_rxs)
+ kiblnd_debug_rx(list_entry(tmp, struct kib_rx, rx_list));
+
CDEBUG(D_CONSOLE, " tx_noops:\n");
list_for_each(tmp, &conn->ibc_tx_noops)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue_nocred:\n");
list_for_each(tmp, &conn->ibc_tx_queue_nocred)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue_rsrvd:\n");
list_for_each(tmp, &conn->ibc_tx_queue_rsrvd)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " tx_queue:\n");
list_for_each(tmp, &conn->ibc_tx_queue)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " active_txs:\n");
list_for_each(tmp, &conn->ibc_active_txs)
- kiblnd_debug_tx(list_entry(tmp, kib_tx_t, tx_list));
+ kiblnd_debug_tx(list_entry(tmp, struct kib_tx, tx_list));
CDEBUG(D_CONSOLE, " rxs:\n");
for (i = 0; i < IBLND_RX_MSGS(conn); i++)
}
static int
-kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
+kiblnd_get_completion_vector(struct kib_conn *conn, int cpt)
{
- cpumask_t *mask;
+ cpumask_var_t *mask;
int vectors;
int off;
int i;
/* hash NID to CPU id in this partition... */
ibp_nid = conn->ibc_peer->ibp_nid;
- off = do_div(ibp_nid, cpumask_weight(mask));
- for_each_cpu(i, mask) {
+ off = do_div(ibp_nid, cpumask_weight(*mask));
+ for_each_cpu(i, *mask) {
if (off-- == 0)
return i % vectors;
}
* One WR for the LNet message
* And ibc_max_frags for the transfer WRs
*/
- unsigned int ret = 1 + conn->ibc_max_frags;
- __u32 dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
+ int ret;
+ int multiplier = 1 + conn->ibc_max_frags;
+ enum kib_dev_caps dev_caps = conn->ibc_hdev->ibh_dev->ibd_dev_caps;
/* FastReg needs two extra WRs for map and invalidate */
if (dev_caps & IBLND_DEV_CAPS_FASTREG_ENABLED)
- ret += 2;
+ multiplier += 2;
/* account for a maximum of ibc_queue_depth in-flight transfers */
- ret *= conn->ibc_queue_depth;
- return ret;
+ ret = multiplier * conn->ibc_queue_depth;
+
+ if (ret > conn->ibc_hdev->ibh_max_qp_wr) {
+ CDEBUG(D_NET, "peer_credits %u will result in send work "
+ "request size %d larger than maximum %d device "
+ "can handle\n", conn->ibc_queue_depth, ret,
+ conn->ibc_hdev->ibh_max_qp_wr);
+ conn->ibc_queue_depth =
+ conn->ibc_hdev->ibh_max_qp_wr / multiplier;
+ }
+
+ /* don't go beyond the maximum the device can handle */
+ return min(ret, conn->ibc_hdev->ibh_max_qp_wr);
}
-kib_conn_t *
-kiblnd_create_conn(kib_peer_ni_t *peer_ni, struct rdma_cm_id *cmid,
+struct kib_conn *
+kiblnd_create_conn(struct kib_peer_ni *peer_ni, struct rdma_cm_id *cmid,
int state, int version)
{
/* CAVEAT EMPTOR:
* to destroy 'cmid' here since I'm called from the CM which still has
* its ref on 'cmid'). */
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
- kib_dev_t *dev;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev;
struct ib_qp_init_attr *init_qp_attr;
struct kib_sched_info *sched;
#ifdef HAVE_IB_CQ_INIT_ATTR
struct ib_cq_init_attr cq_attr = {};
#endif
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct ib_cq *cq;
unsigned long flags;
int cpt;
conn->ibc_cmid = cmid;
conn->ibc_max_frags = peer_ni->ibp_max_frags;
conn->ibc_queue_depth = peer_ni->ibp_queue_depth;
+ conn->ibc_rxs = NULL;
+ conn->ibc_rx_pages = NULL;
+ INIT_LIST_HEAD(&conn->ibc_early_rxs);
INIT_LIST_HEAD(&conn->ibc_tx_noops);
INIT_LIST_HEAD(&conn->ibc_tx_queue);
INIT_LIST_HEAD(&conn->ibc_tx_queue_rsrvd);
INIT_LIST_HEAD(&conn->ibc_tx_queue_nocred);
INIT_LIST_HEAD(&conn->ibc_active_txs);
+ INIT_LIST_HEAD(&conn->ibc_zombie_txs);
spin_lock_init(&conn->ibc_lock);
LIBCFS_CPT_ALLOC(conn->ibc_connvars, lnet_cpt_table(), cpt,
write_unlock_irqrestore(glock, flags);
- LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
- if (conn->ibc_rxs == NULL) {
- CERROR("Cannot allocate RX buffers\n");
- goto failed_2;
- }
-
- rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
- IBLND_RX_MSG_PAGES(conn));
- if (rc != 0)
- goto failed_2;
-
- kiblnd_map_rx_descs(conn);
-
#ifdef HAVE_IB_CQ_INIT_ATTR
cq_attr.cqe = IBLND_CQ_ENTRIES(conn);
cq_attr.comp_vector = kiblnd_get_completion_vector(conn, cpt);
init_qp_attr->qp_type = IB_QPT_RC;
init_qp_attr->send_cq = cq;
init_qp_attr->recv_cq = cq;
+ /*
+ * kiblnd_send_wrs() can change the connection's queue depth if
+ * the maximum work requests for the device is maxed out
+ */
+ init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
+ init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
- conn->ibc_sched = sched;
-
- do {
- init_qp_attr->cap.max_send_wr = kiblnd_send_wrs(conn);
- init_qp_attr->cap.max_recv_wr = IBLND_RECV_WRS(conn);
-
- rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
- if (!rc || conn->ibc_queue_depth < 2)
- break;
-
- conn->ibc_queue_depth--;
- } while (rc);
-
+ rc = rdma_create_qp(cmid, conn->ibc_hdev->ibh_pd, init_qp_attr);
if (rc) {
CERROR("Can't create QP: %d, send_wr: %d, recv_wr: %d, "
"send_sge: %d, recv_sge: %d\n",
goto failed_2;
}
+ conn->ibc_sched = sched;
+
if (conn->ibc_queue_depth != peer_ni->ibp_queue_depth)
CWARN("peer %s - queue depth reduced from %u to %u"
" to allow for qp creation\n",
peer_ni->ibp_queue_depth,
conn->ibc_queue_depth);
+ LIBCFS_CPT_ALLOC(conn->ibc_rxs, lnet_cpt_table(), cpt,
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
+ if (conn->ibc_rxs == NULL) {
+ CERROR("Cannot allocate RX buffers\n");
+ goto failed_2;
+ }
+
+ rc = kiblnd_alloc_pages(&conn->ibc_rx_pages, cpt,
+ IBLND_RX_MSG_PAGES(conn));
+ if (rc != 0)
+ goto failed_2;
+
+ kiblnd_map_rx_descs(conn);
+
LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
/* 1 ref for caller and each rxmsg */
}
void
-kiblnd_destroy_conn(kib_conn_t *conn)
+kiblnd_destroy_conn(struct kib_conn *conn)
{
struct rdma_cm_id *cmid = conn->ibc_cmid;
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
int rc;
LASSERT (!in_interrupt());
LASSERT (atomic_read(&conn->ibc_refcount) == 0);
+ LASSERT(list_empty(&conn->ibc_early_rxs));
LASSERT(list_empty(&conn->ibc_tx_noops));
LASSERT(list_empty(&conn->ibc_tx_queue));
LASSERT(list_empty(&conn->ibc_tx_queue_rsrvd));
CWARN("Error destroying CQ: %d\n", rc);
}
+ kiblnd_txlist_done(&conn->ibc_zombie_txs, -ECONNABORTED,
+ LNET_MSG_STATUS_OK);
+
if (conn->ibc_rx_pages != NULL)
kiblnd_unmap_rx_descs(conn);
if (conn->ibc_rxs != NULL) {
LIBCFS_FREE(conn->ibc_rxs,
- IBLND_RX_MSGS(conn) * sizeof(kib_rx_t));
+ IBLND_RX_MSGS(conn) * sizeof(struct kib_rx));
}
if (conn->ibc_connvars != NULL)
/* See CAVEAT EMPTOR above in kiblnd_create_conn */
if (conn->ibc_state != IBLND_CONN_INIT) {
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
kiblnd_peer_decref(peer_ni);
rdma_destroy_id(cmid);
}
int
-kiblnd_close_peer_conns_locked(kib_peer_ni_t *peer_ni, int why)
+kiblnd_close_peer_conns_locked(struct kib_peer_ni *peer_ni, int why)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
CDEBUG(D_NET, "Closing conn -> %s, "
"version: %x, reason: %d\n",
}
int
-kiblnd_close_stale_conns_locked(kib_peer_ni_t *peer_ni,
+kiblnd_close_stale_conns_locked(struct kib_peer_ni *peer_ni,
int version, __u64 incarnation)
{
- kib_conn_t *conn;
+ struct kib_conn *conn;
struct list_head *ctmp;
struct list_head *cnxt;
int count = 0;
list_for_each_safe(ctmp, cnxt, &peer_ni->ibp_conns) {
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
if (conn->ibc_version == version &&
conn->ibc_incarnation == incarnation)
static int
kiblnd_close_matching_conns(struct lnet_ni *ni, lnet_nid_t nid)
{
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
struct list_head *ptmp;
struct list_head *pnxt;
int lo;
for (i = lo; i <= hi; i++) {
list_for_each_safe(ptmp, pnxt, &kiblnd_data.kib_peers[i]) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
LASSERT(!kiblnd_peer_idle(peer_ni));
if (peer_ni->ibp_ni != ni)
break;
}
case IOC_LIBCFS_GET_CONN: {
- kib_conn_t *conn;
+ struct kib_conn *conn;
rc = 0;
conn = kiblnd_get_conn_by_idx(ni, data->ioc_count);
time64_t last_alive = 0;
time64_t now = ktime_get_seconds();
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_ni_t *peer_ni;
+ struct kib_peer_ni *peer_ni;
unsigned long flags;
read_lock_irqsave(glock, flags);
}
static void
-kiblnd_free_pages(kib_pages_t *p)
+kiblnd_free_pages(struct kib_pages *p)
{
int npages = p->ibp_npages;
int i;
__free_page(p->ibp_pages[i]);
}
- LIBCFS_FREE(p, offsetof(kib_pages_t, ibp_pages[npages]));
+ LIBCFS_FREE(p, offsetof(struct kib_pages, ibp_pages[npages]));
}
int
-kiblnd_alloc_pages(kib_pages_t **pp, int cpt, int npages)
+kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages)
{
- kib_pages_t *p;
- int i;
+ struct kib_pages *p;
+ int i;
LIBCFS_CPT_ALLOC(p, lnet_cpt_table(), cpt,
- offsetof(kib_pages_t, ibp_pages[npages]));
+ offsetof(struct kib_pages, ibp_pages[npages]));
if (p == NULL) {
CERROR("Can't allocate descriptor for %d pages\n", npages);
return -ENOMEM;
}
- memset(p, 0, offsetof(kib_pages_t, ibp_pages[npages]));
+ memset(p, 0, offsetof(struct kib_pages, ibp_pages[npages]));
p->ibp_npages = npages;
for (i = 0; i < npages; i++) {
}
void
-kiblnd_unmap_rx_descs(kib_conn_t *conn)
+kiblnd_unmap_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
int i;
LASSERT (conn->ibc_rxs != NULL);
}
void
-kiblnd_map_rx_descs(kib_conn_t *conn)
+kiblnd_map_rx_descs(struct kib_conn *conn)
{
- kib_rx_t *rx;
+ struct kib_rx *rx;
struct page *pg;
int pg_off;
int ipg;
rx = &conn->ibc_rxs[i];
rx->rx_conn = conn;
- rx->rx_msg = (kib_msg_t *)(((char *)page_address(pg)) + pg_off);
+ rx->rx_msg = (struct kib_msg *)(((char *)page_address(pg)) + pg_off);
rx->rx_msgaddr =
kiblnd_dma_map_single(conn->ibc_hdev->ibh_ibdev,
}
static void
-kiblnd_unmap_tx_pool(kib_tx_pool_t *tpo)
+kiblnd_unmap_tx_pool(struct kib_tx_pool *tpo)
{
- kib_hca_dev_t *hdev = tpo->tpo_hdev;
- kib_tx_t *tx;
- int i;
+ struct kib_hca_dev *hdev = tpo->tpo_hdev;
+ struct kib_tx *tx;
+ int i;
LASSERT (tpo->tpo_pool.po_allocated == 0);
tpo->tpo_hdev = NULL;
}
-static kib_hca_dev_t *
-kiblnd_current_hdev(kib_dev_t *dev)
+static struct kib_hca_dev *
+kiblnd_current_hdev(struct kib_dev *dev)
{
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
unsigned long flags;
int i = 0;
}
static void
-kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
-{
- kib_pages_t *txpgs = tpo->tpo_tx_pages;
- kib_pool_t *pool = &tpo->tpo_pool;
- kib_net_t *net = pool->po_owner->ps_net;
- kib_dev_t *dev;
- struct page *page;
- kib_tx_t *tx;
+kiblnd_map_tx_pool(struct kib_tx_pool *tpo)
+{
+ struct kib_pages *txpgs = tpo->tpo_tx_pages;
+ struct kib_pool *pool = &tpo->tpo_pool;
+ struct kib_net *net = pool->po_owner->ps_net;
+ struct kib_dev *dev;
+ struct page *page;
+ struct kib_tx *tx;
int page_offset;
int ipage;
int i;
page = txpgs->ibp_pages[ipage];
tx = &tpo->tpo_tx_descs[i];
- tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) +
- page_offset);
+ tx->tx_msg = (struct kib_msg *)(((char *)page_address(page)) +
+ page_offset);
tx->tx_msgaddr = kiblnd_dma_map_single(tpo->tpo_hdev->ibh_ibdev,
tx->tx_msg,
}
static void
-kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
+kiblnd_destroy_fmr_pool(struct kib_fmr_pool *fpo)
{
LASSERT(fpo->fpo_map_count == 0);
static void
kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *fpo, *tmp;
+ struct kib_fmr_pool *fpo, *tmp;
list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
list_del(&fpo->fpo_list);
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+static int kiblnd_alloc_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool *fpo)
{
struct ib_fmr_pool_param param = {
- .max_pages_per_fmr = LNET_MAX_PAYLOAD/PAGE_SIZE,
+ .max_pages_per_fmr = LNET_MAX_IOV,
.page_shift = PAGE_SHIFT,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE),
return rc;
}
-static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo,
- __u32 dev_caps)
+static int kiblnd_alloc_freg_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool *fpo,
+ enum kib_dev_caps dev_caps)
{
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
#ifndef HAVE_IB_MAP_MR_SG
frd->frd_frpl = ib_alloc_fast_reg_page_list(fpo->fpo_hdev->ibh_ibdev,
- LNET_MAX_PAYLOAD/PAGE_SIZE);
+ LNET_MAX_IOV);
if (IS_ERR(frd->frd_frpl)) {
rc = PTR_ERR(frd->frd_frpl);
CERROR("Failed to allocate ib_fast_reg_page_list: %d\n",
#ifdef HAVE_IB_ALLOC_FAST_REG_MR
frd->frd_mr = ib_alloc_fast_reg_mr(fpo->fpo_hdev->ibh_pd,
- LNET_MAX_PAYLOAD/PAGE_SIZE);
+ LNET_MAX_IOV);
#else
/*
* it is expected to get here if this is an MLX-5 card.
#else
IB_MR_TYPE_MEM_REG,
#endif
- LNET_MAX_PAYLOAD/PAGE_SIZE);
+ LNET_MAX_IOV);
if ((*kiblnd_tunables.kib_use_fastreg_gaps == 1) &&
(dev_caps & IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT))
CWARN("using IB_MR_TYPE_SG_GAPS, expect a performance drop\n");
return rc;
}
-static int
-kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t **pp_fpo)
+static int kiblnd_create_fmr_pool(struct kib_fmr_poolset *fps,
+ struct kib_fmr_pool **pp_fpo)
{
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
+ struct kib_dev *dev = fps->fps_net->ibn_dev;
+ struct kib_fmr_pool *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
}
static void
-kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps, struct list_head *zombies)
+kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
{
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
while (!list_empty(&fps->fps_pool_list)) {
- kib_fmr_pool_t *fpo = list_entry(fps->fps_pool_list.next,
- kib_fmr_pool_t, fpo_list);
+ struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
+ struct kib_fmr_pool,
+ fpo_list);
+
fpo->fpo_failed = 1;
list_del(&fpo->fpo_list);
if (fpo->fpo_map_count == 0)
}
static void
-kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
+kiblnd_fini_fmr_poolset(struct kib_fmr_poolset *fps)
{
if (fps->fps_net != NULL) { /* initialized? */
kiblnd_destroy_fmr_pool_list(&fps->fps_failed_pool_list);
}
static int
-kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
- kib_net_t *net,
+kiblnd_init_fmr_poolset(struct kib_fmr_poolset *fps, int cpt, int ncpts,
+ struct kib_net *net,
struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
- kib_fmr_pool_t *fpo;
- int rc;
+ struct kib_fmr_pool *fpo;
+ int rc;
- memset(fps, 0, sizeof(kib_fmr_poolset_t));
+ memset(fps, 0, sizeof(struct kib_fmr_poolset));
fps->fps_net = net;
fps->fps_cpt = cpt;
}
static int
-kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, time64_t now)
+kiblnd_fmr_pool_is_idle(struct kib_fmr_pool *fpo, time64_t now)
{
if (fpo->fpo_map_count != 0) /* still in use */
return 0;
}
static int
-kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+kiblnd_map_tx_pages(struct kib_tx *tx, struct kib_rdma_desc *rd)
{
- kib_hca_dev_t *hdev;
+ struct kib_hca_dev *hdev;
__u64 *pages = tx->tx_pages;
int npages;
int size;
}
void
-kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
+kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps;
+ struct list_head zombies = LIST_HEAD_INIT(zombies);
+ struct kib_fmr_pool *fpo = fmr->fmr_pool;
+ struct kib_fmr_poolset *fps;
time64_t now = ktime_get_seconds();
- kib_fmr_pool_t *tmp;
+ struct kib_fmr_pool *tmp;
int rc;
if (!fpo)
fps = fpo->fpo_owner;
if (fpo->fpo_is_fmr) {
if (fmr->fmr_pfmr) {
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
+ ib_fmr_pool_unmap(fmr->fmr_pfmr);
fmr->fmr_pfmr = NULL;
}
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int
-kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx, kib_rdma_desc_t *rd,
- __u32 nob, __u64 iov, kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, u32 nob, u64 iov,
+ struct kib_fmr *fmr)
{
- kib_fmr_pool_t *fpo;
+ struct kib_fmr_pool *fpo;
__u64 *pages = tx->tx_pages;
__u64 version;
bool is_rx = (rd != tx->tx_rd);
tx_pages_mapped = 1;
}
- pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
- pages, npages, iov);
+ pfmr = kib_fmr_pool_map(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
if (likely(!IS_ERR(pfmr))) {
fmr->fmr_key = is_rx ? pfmr->fmr->rkey
: pfmr->fmr->lkey;
}
static void
-kiblnd_fini_pool(kib_pool_t *pool)
+kiblnd_fini_pool(struct kib_pool *pool)
{
LASSERT(list_empty(&pool->po_free_list));
LASSERT(pool->po_allocated == 0);
}
static void
-kiblnd_init_pool(kib_poolset_t *ps, kib_pool_t *pool, int size)
+kiblnd_init_pool(struct kib_poolset *ps, struct kib_pool *pool, int size)
{
CDEBUG(D_NET, "Initialize %s pool\n", ps->ps_name);
- memset(pool, 0, sizeof(kib_pool_t));
+ memset(pool, 0, sizeof(struct kib_pool));
INIT_LIST_HEAD(&pool->po_free_list);
pool->po_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
pool->po_owner = ps;
static void
kiblnd_destroy_pool_list(struct list_head *head)
{
- kib_pool_t *pool;
+ struct kib_pool *pool;
while (!list_empty(head)) {
- pool = list_entry(head->next, kib_pool_t, po_list);
+ pool = list_entry(head->next, struct kib_pool, po_list);
list_del(&pool->po_list);
LASSERT(pool->po_owner != NULL);
}
static void
-kiblnd_fail_poolset(kib_poolset_t *ps, struct list_head *zombies)
+kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
while (!list_empty(&ps->ps_pool_list)) {
- kib_pool_t *po = list_entry(ps->ps_pool_list.next,
- kib_pool_t, po_list);
+ struct kib_pool *po = list_entry(ps->ps_pool_list.next,
+ struct kib_pool, po_list);
+
po->po_failed = 1;
list_del(&po->po_list);
if (po->po_allocated == 0)
}
static void
-kiblnd_fini_poolset(kib_poolset_t *ps)
+kiblnd_fini_poolset(struct kib_poolset *ps)
{
if (ps->ps_net != NULL) { /* initialized? */
kiblnd_destroy_pool_list(&ps->ps_failed_pool_list);
}
static int
-kiblnd_init_poolset(kib_poolset_t *ps, int cpt,
- kib_net_t *net, char *name, int size,
+kiblnd_init_poolset(struct kib_poolset *ps, int cpt,
+ struct kib_net *net, char *name, int size,
kib_ps_pool_create_t po_create,
kib_ps_pool_destroy_t po_destroy,
kib_ps_node_init_t nd_init,
kib_ps_node_fini_t nd_fini)
{
- kib_pool_t *pool;
- int rc;
+ struct kib_pool *pool;
+ int rc;
- memset(ps, 0, sizeof(kib_poolset_t));
+ memset(ps, 0, sizeof(struct kib_poolset));
ps->ps_cpt = cpt;
ps->ps_net = net;
}
static int
-kiblnd_pool_is_idle(kib_pool_t *pool, time64_t now)
+kiblnd_pool_is_idle(struct kib_pool *pool, time64_t now)
{
if (pool->po_allocated != 0) /* still in use */
return 0;
}
void
-kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node)
+kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node)
{
struct list_head zombies = LIST_HEAD_INIT(zombies);
- kib_poolset_t *ps = pool->po_owner;
- kib_pool_t *tmp;
+ struct kib_poolset *ps = pool->po_owner;
+ struct kib_pool *tmp;
time64_t now = ktime_get_seconds();
spin_lock(&ps->ps_lock);
}
struct list_head *
-kiblnd_pool_alloc_node(kib_poolset_t *ps)
+kiblnd_pool_alloc_node(struct kib_poolset *ps)
{
struct list_head *node;
- kib_pool_t *pool;
+ struct kib_pool *pool;
int rc;
unsigned int interval = 1;
ktime_t time_before;
}
static void
-kiblnd_destroy_tx_pool(kib_pool_t *pool)
+kiblnd_destroy_tx_pool(struct kib_pool *pool)
{
- kib_tx_pool_t *tpo = container_of(pool, kib_tx_pool_t, tpo_pool);
- int i;
+ struct kib_tx_pool *tpo = container_of(pool, struct kib_tx_pool,
+ tpo_pool);
+ int i;
LASSERT (pool->po_allocated == 0);
goto out;
for (i = 0; i < pool->po_size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
list_del(&tx->tx_list);
sizeof(*tx->tx_sge));
if (tx->tx_rd != NULL)
LIBCFS_FREE(tx->tx_rd,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
}
LIBCFS_FREE(tpo->tpo_tx_descs,
- pool->po_size * sizeof(kib_tx_t));
+ pool->po_size * sizeof(struct kib_tx));
out:
kiblnd_fini_pool(pool);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool));
}
static int kiblnd_tx_pool_size(struct lnet_ni *ni, int ncpts)
}
static int
-kiblnd_create_tx_pool(kib_poolset_t *ps, int size, kib_pool_t **pp_po)
+kiblnd_create_tx_pool(struct kib_poolset *ps, int size, struct kib_pool **pp_po)
{
int i;
int npg;
- kib_pool_t *pool;
- kib_tx_pool_t *tpo;
+ struct kib_pool *pool;
+ struct kib_tx_pool *tpo;
LIBCFS_CPT_ALLOC(tpo, lnet_cpt_table(), ps->ps_cpt, sizeof(*tpo));
if (tpo == NULL) {
npg = (size * IBLND_MSG_SIZE + PAGE_SIZE - 1) / PAGE_SIZE;
if (kiblnd_alloc_pages(&tpo->tpo_tx_pages, ps->ps_cpt, npg) != 0) {
CERROR("Can't allocate tx pages: %d\n", npg);
- LIBCFS_FREE(tpo, sizeof(kib_tx_pool_t));
+ LIBCFS_FREE(tpo, sizeof(struct kib_tx_pool));
return -ENOMEM;
}
LIBCFS_CPT_ALLOC(tpo->tpo_tx_descs, lnet_cpt_table(), ps->ps_cpt,
- size * sizeof(kib_tx_t));
+ size * sizeof(struct kib_tx));
if (tpo->tpo_tx_descs == NULL) {
CERROR("Can't allocate %d tx descriptors\n", size);
ps->ps_pool_destroy(pool);
return -ENOMEM;
}
- memset(tpo->tpo_tx_descs, 0, size * sizeof(kib_tx_t));
+ memset(tpo->tpo_tx_descs, 0, size * sizeof(struct kib_tx));
for (i = 0; i < size; i++) {
- kib_tx_t *tx = &tpo->tpo_tx_descs[i];
+ struct kib_tx *tx = &tpo->tpo_tx_descs[i];
int wrq_sge = *kiblnd_tunables.kib_wrq_sge;
tx->tx_pool = tpo;
break;
LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt,
- offsetof(kib_rdma_desc_t,
+ offsetof(struct kib_rdma_desc,
rd_frags[IBLND_MAX_RDMA_FRAGS]));
if (tx->tx_rd == NULL)
break;
}
static void
-kiblnd_tx_init(kib_pool_t *pool, struct list_head *node)
+kiblnd_tx_init(struct kib_pool *pool, struct list_head *node)
{
- kib_tx_poolset_t *tps = container_of(pool->po_owner, kib_tx_poolset_t,
- tps_poolset);
- kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list);
+ struct kib_tx_poolset *tps = container_of(pool->po_owner,
+ struct kib_tx_poolset,
+ tps_poolset);
+ struct kib_tx *tx = list_entry(node, struct kib_tx, tx_list);
tx->tx_cookie = tps->tps_next_tx_cookie++;
}
static void
-kiblnd_net_fini_pools(kib_net_t *net)
+kiblnd_net_fini_pools(struct kib_net *net)
{
int i;
cfs_cpt_for_each(i, lnet_cpt_table()) {
- kib_tx_poolset_t *tps;
- kib_fmr_poolset_t *fps;
+ struct kib_tx_poolset *tps;
+ struct kib_fmr_poolset *fps;
if (net->ibn_tx_ps != NULL) {
tps = net->ibn_tx_ps[i];
}
static int
-kiblnd_net_init_pools(kib_net_t *net, struct lnet_ni *ni, __u32 *cpts,
+kiblnd_net_init_pools(struct kib_net *net, struct lnet_ni *ni, __u32 *cpts,
int ncpts)
{
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
* FMR pool and map-on-demand if premapping failed */
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_fmr_poolset_t));
+ sizeof(struct kib_fmr_poolset));
if (net->ibn_fmr_ps == NULL) {
CERROR("Failed to allocate FMR pool array\n");
rc = -ENOMEM;
create_tx_pool:
#endif
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(kib_tx_poolset_t));
+ sizeof(struct kib_tx_poolset));
if (net->ibn_tx_ps == NULL) {
CERROR("Failed to allocate tx pool array\n");
rc = -ENOMEM;
}
static int
-kiblnd_hdev_get_attr(kib_hca_dev_t *hdev)
+kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
{
struct ib_device_attr *dev_attr;
int rc = 0;
#endif
hdev->ibh_mr_size = dev_attr->max_mr_size;
+ hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
/* Setup device Memory Registration capabilities */
+#ifdef HAVE_IB_DEVICE_OPS
+ if (hdev->ibh_ibdev->ops.alloc_fmr &&
+ hdev->ibh_ibdev->ops.dealloc_fmr &&
+ hdev->ibh_ibdev->ops.map_phys_fmr &&
+ hdev->ibh_ibdev->ops.unmap_fmr) {
+#else
if (hdev->ibh_ibdev->alloc_fmr &&
hdev->ibh_ibdev->dealloc_fmr &&
hdev->ibh_ibdev->map_phys_fmr &&
hdev->ibh_ibdev->unmap_fmr) {
+#endif
LCONSOLE_INFO("Using FMR for registration\n");
hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
} else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
rc = -ENOSYS;
}
- if (rc == 0 && hdev->ibh_mr_size == ~0ULL)
- hdev->ibh_mr_shift = 64;
- else if (rc != 0)
+ if (rc != 0)
rc = -EINVAL;
#ifndef HAVE_IB_DEVICE_ATTRS
#ifdef HAVE_IB_GET_DMA_MR
static void
-kiblnd_hdev_cleanup_mrs(kib_hca_dev_t *hdev)
+kiblnd_hdev_cleanup_mrs(struct kib_hca_dev *hdev)
{
if (hdev->ibh_mrs == NULL)
return;
#endif
void
-kiblnd_hdev_destroy(kib_hca_dev_t *hdev)
+kiblnd_hdev_destroy(struct kib_hca_dev *hdev)
{
#ifdef HAVE_IB_GET_DMA_MR
kiblnd_hdev_cleanup_mrs(hdev);
#ifdef HAVE_IB_GET_DMA_MR
static int
-kiblnd_hdev_setup_mrs(kib_hca_dev_t *hdev)
+kiblnd_hdev_setup_mrs(struct kib_hca_dev *hdev)
{
struct ib_mr *mr;
int acflags = IB_ACCESS_LOCAL_WRITE |
}
static int
-kiblnd_dev_need_failover(kib_dev_t *dev)
+kiblnd_dev_need_failover(struct kib_dev *dev, struct net *ns)
{
struct rdma_cm_id *cmid;
struct sockaddr_in srcaddr;
*
* a. rdma_bind_addr(), it will conflict with listener cmid
* b. rdma_resolve_addr() to zero addr */
- cmid = kiblnd_rdma_create_id(kiblnd_dummy_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_dummy_callback, dev,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
}
int
-kiblnd_dev_failover(kib_dev_t *dev)
+kiblnd_dev_failover(struct kib_dev *dev, struct net *ns)
{
struct list_head zombie_tpo = LIST_HEAD_INIT(zombie_tpo);
struct list_head zombie_ppo = LIST_HEAD_INIT(zombie_ppo);
struct list_head zombie_fpo = LIST_HEAD_INIT(zombie_fpo);
struct rdma_cm_id *cmid = NULL;
- kib_hca_dev_t *hdev = NULL;
- kib_hca_dev_t *old;
+ struct kib_hca_dev *hdev = NULL;
+ struct kib_hca_dev *old;
struct ib_pd *pd;
- kib_net_t *net;
+ struct kib_net *net;
struct sockaddr_in addr;
unsigned long flags;
int rc = 0;
dev->ibd_can_failover ||
dev->ibd_hdev == NULL);
- rc = kiblnd_dev_need_failover(dev);
+ rc = kiblnd_dev_need_failover(dev, ns);
if (rc <= 0)
goto out;
rdma_destroy_id(cmid);
}
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, dev, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(ns, kiblnd_cm_callback, dev, RDMA_PS_TCP,
+ IB_QPT_RC);
if (IS_ERR(cmid)) {
rc = PTR_ERR(cmid);
CERROR("Failed to create cmid for failover: %d\n", rc);
}
void
-kiblnd_destroy_dev (kib_dev_t *dev)
+kiblnd_destroy_dev(struct kib_dev *dev)
{
- LASSERT (dev->ibd_nnets == 0);
+ LASSERT(dev->ibd_nnets == 0);
LASSERT(list_empty(&dev->ibd_nets));
list_del(&dev->ibd_fail_list);
LIBCFS_FREE(dev, sizeof(*dev));
}
-static kib_dev_t *
-kiblnd_create_dev(char *ifname)
-{
- struct net_device *netdev;
- kib_dev_t *dev;
- __u32 netmask;
- __u32 ip;
- int up;
- int rc;
-
- rc = lnet_ipif_query(ifname, &up, &ip, &netmask);
- if (rc != 0) {
- CERROR("Can't query IPoIB interface %s: %d\n",
- ifname, rc);
- return NULL;
- }
-
- if (!up) {
- CERROR("Can't query IPoIB interface %s: it's down\n", ifname);
- return NULL;
- }
-
- LIBCFS_ALLOC(dev, sizeof(*dev));
- if (dev == NULL)
- return NULL;
-
- netdev = dev_get_by_name(&init_net, ifname);
- if (netdev == NULL) {
- dev->ibd_can_failover = 0;
- } else {
- dev->ibd_can_failover = !!(netdev->flags & IFF_MASTER);
- dev_put(netdev);
- }
-
- INIT_LIST_HEAD(&dev->ibd_nets);
- INIT_LIST_HEAD(&dev->ibd_list); /* not yet in kib_devs */
- INIT_LIST_HEAD(&dev->ibd_fail_list);
- dev->ibd_ifip = ip;
- strcpy(&dev->ibd_ifname[0], ifname);
-
- /* initialize the device */
- rc = kiblnd_dev_failover(dev);
- if (rc != 0) {
- CERROR("Can't initialize device: %d\n", rc);
- LIBCFS_FREE(dev, sizeof(*dev));
- return NULL;
- }
-
- list_add_tail(&dev->ibd_list,
- &kiblnd_data.kib_devs);
- return dev;
-}
-
static void
kiblnd_base_shutdown(void)
{
static void
kiblnd_shutdown(struct lnet_ni *ni)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
int i;
unsigned long flags;
}
static int
-kiblnd_base_startup(void)
+kiblnd_base_startup(struct net *ns)
{
struct kib_sched_info *sched;
int rc;
}
if (*kiblnd_tunables.kib_dev_failover != 0)
- rc = kiblnd_thread_start(kiblnd_failover_thread, NULL,
+ rc = kiblnd_thread_start(kiblnd_failover_thread, ns,
"kiblnd_failover");
if (rc != 0) {
return rc;
}
-static int
-kiblnd_dev_start_threads(kib_dev_t *dev, int newdev, __u32 *cpts, int ncpts)
+static int kiblnd_dev_start_threads(struct kib_dev *dev, u32 *cpts, int ncpts)
{
int cpt;
int rc;
cpt = (cpts == NULL) ? i : cpts[i];
sched = kiblnd_data.kib_scheds[cpt];
- if (!newdev && sched->ibs_nthreads > 0)
+ if (sched->ibs_nthreads > 0)
continue;
rc = kiblnd_start_schedulers(kiblnd_data.kib_scheds[cpt]);
return 0;
}
-static kib_dev_t *
-kiblnd_dev_search(char *ifname)
-{
- kib_dev_t *alias = NULL;
- kib_dev_t *dev;
- char *colon;
- char *colon2;
-
- colon = strchr(ifname, ':');
- list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- return dev;
-
- if (alias != NULL)
- continue;
-
- colon2 = strchr(dev->ibd_ifname, ':');
- if (colon != NULL)
- *colon = 0;
- if (colon2 != NULL)
- *colon2 = 0;
-
- if (strcmp(&dev->ibd_ifname[0], ifname) == 0)
- alias = dev;
-
- if (colon != NULL)
- *colon = ':';
- if (colon2 != NULL)
- *colon2 = ':';
- }
- return alias;
-}
-
static int
kiblnd_startup(struct lnet_ni *ni)
{
- char *ifname;
- kib_dev_t *ibdev = NULL;
- kib_net_t *net;
- unsigned long flags;
- int rc;
- int newdev;
- int node_id;
+ char *ifname;
+ struct lnet_inetdev *ifaces = NULL;
+ struct kib_dev *ibdev = NULL;
+ struct kib_net *net;
+ unsigned long flags;
+ int rc;
+ int i;
- LASSERT (ni->ni_net->net_lnd == &the_o2iblnd);
+ LASSERT(ni->ni_net->net_lnd == &the_o2iblnd);
- if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
- rc = kiblnd_base_startup();
- if (rc != 0)
- return rc;
- }
+ if (kiblnd_data.kib_init == IBLND_INIT_NOTHING) {
+ rc = kiblnd_base_startup(ni->ni_net_ns);
+ if (rc != 0)
+ return rc;
+ }
- LIBCFS_ALLOC(net, sizeof(*net));
- ni->ni_data = net;
- if (net == NULL)
- goto failed;
+ LIBCFS_ALLOC(net, sizeof(*net));
+ ni->ni_data = net;
+ if (net == NULL)
+ goto failed;
net->ibn_incarnation = ktime_get_real_ns() / NSEC_PER_USEC;
kiblnd_tunables_setup(ni);
+ /*
+ * ni_interfaces is only to support legacy pre Multi-Rail
+ * tcp bonding for ksocklnd. Multi-Rail wants each secondary
+ * IP to be treated as an unique 'struct ni' interfaces instead.
+ */
if (ni->ni_interfaces[0] != NULL) {
/* Use the IPoIB interface specified in 'networks=' */
-
- CLASSERT(LNET_INTERFACES_NUM > 1);
if (ni->ni_interfaces[1] != NULL) {
- CERROR("Multiple interfaces not supported\n");
+ CERROR("ko2iblnd: Multiple interfaces not supported\n");
goto failed;
}
ifname = *kiblnd_tunables.kib_default_ipif;
}
- if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
- CERROR("IPoIB interface name too long: %s\n", ifname);
- goto failed;
- }
+ if (strlen(ifname) >= sizeof(ibdev->ibd_ifname)) {
+ CERROR("IPoIB interface name too long: %s\n", ifname);
+ goto failed;
+ }
+
+ rc = lnet_inet_enumerate(&ifaces, ni->ni_net_ns);
+ if (rc < 0)
+ goto failed;
+
+ for (i = 0; i < rc; i++) {
+ if (strcmp(ifname, ifaces[i].li_name) == 0)
+ break;
+ }
+
+ if (i == rc) {
+ CERROR("ko2iblnd: No matching interfaces\n");
+ rc = -ENOENT;
+ goto failed;
+ }
+
+ LIBCFS_ALLOC(ibdev, sizeof(*ibdev));
+ if (!ibdev) {
+ rc = -ENOMEM;
+ goto failed;
+ }
- ibdev = kiblnd_dev_search(ifname);
+ ibdev->ibd_ifip = ifaces[i].li_ipaddr;
+ strlcpy(ibdev->ibd_ifname, ifaces[i].li_name,
+ sizeof(ibdev->ibd_ifname));
+ ibdev->ibd_can_failover = !!(ifaces[i].li_flags & IFF_MASTER);
- newdev = ibdev == NULL;
- /* hmm...create kib_dev even for alias */
- if (ibdev == NULL || strcmp(&ibdev->ibd_ifname[0], ifname) != 0)
- ibdev = kiblnd_create_dev(ifname);
+ INIT_LIST_HEAD(&ibdev->ibd_nets);
+ INIT_LIST_HEAD(&ibdev->ibd_list); /* not yet in kib_devs */
+ INIT_LIST_HEAD(&ibdev->ibd_fail_list);
- if (ibdev == NULL)
+ /* initialize the device */
+ rc = kiblnd_dev_failover(ibdev, ni->ni_net_ns);
+ if (rc) {
+ CERROR("ko2iblnd: Can't initialize device: rc = %d\n", rc);
goto failed;
+ }
- node_id = dev_to_node(ibdev->ibd_hdev->ibh_ibdev->dma_device);
- ni->ni_dev_cpt = cfs_cpt_of_node(lnet_cpt_table(), node_id);
+ list_add_tail(&ibdev->ibd_list, &kiblnd_data.kib_devs);
net->ibn_dev = ibdev;
ni->ni_nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), ibdev->ibd_ifip);
- rc = kiblnd_dev_start_threads(ibdev, newdev,
- ni->ni_cpts, ni->ni_ncpts);
+ ni->ni_dev_cpt = ifaces[i].li_cpt;
+
+ rc = kiblnd_dev_start_threads(ibdev, ni->ni_cpts, ni->ni_ncpts);
if (rc != 0)
goto failed;
rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
- if (rc != 0) {
- CERROR("Failed to initialize NI pools: %d\n", rc);
- goto failed;
- }
+ if (rc != 0) {
+ CERROR("Failed to initialize NI pools: %d\n", rc);
+ goto failed;
+ }
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
ibdev->ibd_nnets++;
list_add_tail(&net->ibn_list, &ibdev->ibd_nets);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- net->ibn_init = IBLND_INIT_ALL;
+ net->ibn_init = IBLND_INIT_ALL;
- return 0;
+ return 0;
failed:
if (net != NULL && net->ibn_dev == NULL && ibdev != NULL)
- kiblnd_destroy_dev(ibdev);
+ kiblnd_destroy_dev(ibdev);
- kiblnd_shutdown(ni);
+ kfree(ifaces);
+ kiblnd_shutdown(ni);
- CDEBUG(D_NET, "kiblnd_startup failed\n");
- return -ENETDOWN;
+ CDEBUG(D_NET, "kiblnd_startup failed\n");
+ return -ENETDOWN;
}
static struct lnet_lnd the_o2iblnd = {
{
int rc;
- CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(sizeof(struct kib_msg) <= IBLND_MSG_SIZE);
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS]) <=
IBLND_MSG_SIZE);
- CLASSERT(offsetof(kib_msg_t,
+ CLASSERT(offsetof(struct kib_msg,
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);