#define MAX_CONN_RACES_BEFORE_ABORT 20
-static void kiblnd_peer_alive(kib_peer_ni_t *peer_ni);
-static void kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error);
-static void kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx,
+static void kiblnd_peer_alive(struct kib_peer_ni *peer_ni);
+static void kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+ int error);
+static void kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx,
int type, int body_nob);
-static int kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie);
-static void kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn);
-static void kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn);
+static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, u64 dstcookie);
+static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn);
+static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn);
-static void kiblnd_unmap_tx(kib_tx_t *tx);
-static void kiblnd_check_sends_locked(kib_conn_t *conn);
+static void kiblnd_unmap_tx(struct kib_tx *tx);
+static void kiblnd_check_sends_locked(struct kib_conn *conn);
void
-kiblnd_tx_done(kib_tx_t *tx)
+kiblnd_tx_done(struct kib_tx *tx)
{
struct lnet_msg *lntmsg[2];
int rc;
if (lntmsg[i] == NULL)
continue;
+ /* propagate health status to LNet for requests */
+ if (i == 0 && lntmsg[i])
+ lntmsg[i]->msg_health_status = tx->tx_hstatus;
+
lnet_finalize(lntmsg[i], rc);
}
}
void
-kiblnd_txlist_done(struct list_head *txlist, int status)
+kiblnd_txlist_done(struct list_head *txlist, int status,
+ enum lnet_msg_hstatus hstatus)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, kib_tx_t, tx_list);
+ tx = list_entry(txlist->next, struct kib_tx, tx_list);
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
tx->tx_status = status;
+ if (hstatus != LNET_MSG_STATUS_OK)
+ tx->tx_hstatus = hstatus;
kiblnd_tx_done(tx);
}
}
-static kib_tx_t *
+static struct kib_tx *
kiblnd_get_idle_tx(struct lnet_ni *ni, lnet_nid_t target)
{
- kib_net_t *net = (kib_net_t *)ni->ni_data;
- struct list_head *node;
- kib_tx_t *tx;
- kib_tx_poolset_t *tps;
+ struct kib_net *net = ni->ni_data;
+ struct list_head *node;
+ struct kib_tx *tx;
+ struct kib_tx_poolset *tps;
tps = net->ibn_tx_ps[lnet_cpt_of_nid(target, ni)];
node = kiblnd_pool_alloc_node(&tps->tps_poolset);
if (node == NULL)
return NULL;
- tx = container_of(node, kib_tx_t, tx_list);
+ tx = container_of(node, struct kib_tx, tx_list);
LASSERT (tx->tx_nwrq == 0);
LASSERT (!tx->tx_queued);
LASSERT (tx->tx_nfrags == 0);
tx->tx_gaps = false;
+ tx->tx_hstatus = LNET_MSG_STATUS_OK;
return tx;
}
static void
-kiblnd_drop_rx(kib_rx_t *rx)
+kiblnd_drop_rx(struct kib_rx *rx)
{
- kib_conn_t *conn = rx->rx_conn;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
spin_lock_irqsave(&sched->ibs_lock, flags);
LASSERT(conn->ibc_nrx > 0);
}
int
-kiblnd_post_rx (kib_rx_t *rx, int credit)
+kiblnd_post_rx(struct kib_rx *rx, int credit)
{
- kib_conn_t *conn = rx->rx_conn;
- kib_net_t *net = conn->ibc_peer->ibp_ni->ni_data;
- struct ib_recv_wr *bad_wrq = NULL;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_net *net = conn->ibc_peer->ibp_ni->ni_data;
+ struct ib_recv_wr *bad_wrq = NULL;
#ifdef HAVE_IB_GET_DMA_MR
- struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
+ struct ib_mr *mr = conn->ibc_hdev->ibh_mrs;
#endif
- int rc;
+ int rc;
LASSERT (net != NULL);
LASSERT (!in_interrupt());
* own this rx (and rx::rx_conn) anymore, LU-5678.
*/
kiblnd_conn_addref(conn);
+#ifdef HAVE_IB_POST_SEND_RECV_CONST
+ rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq,
+ (const struct ib_recv_wr **)&bad_wrq);
+#else
rc = ib_post_recv(conn->ibc_cmid->qp, &rx->rx_wrq, &bad_wrq);
+#endif
if (unlikely(rc != 0)) {
CERROR("Can't post rx for %s: %d, bad_wrq: %p\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc, bad_wrq);
return rc;
}
-static kib_tx_t *
-kiblnd_find_waiting_tx_locked(kib_conn_t *conn, int txtype, __u64 cookie)
+static struct kib_tx *
+kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
struct list_head *tmp;
list_for_each(tmp, &conn->ibc_active_txs) {
- kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list);
+ struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
}
static void
-kiblnd_handle_completion(kib_conn_t *conn, int txtype, int status, __u64 cookie)
+kiblnd_handle_completion(struct kib_conn *conn, int txtype, int status, u64 cookie)
{
- kib_tx_t *tx;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- int idle;
+ struct kib_tx *tx;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ int idle;
spin_lock(&conn->ibc_lock);
spin_unlock(&conn->ibc_lock);
CWARN("Unmatched completion type %x cookie %#llx from %s\n",
- txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- kiblnd_close_conn(conn, -EPROTO);
- return;
- }
+ txtype, cookie, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ kiblnd_close_conn(conn, -EPROTO);
+ return;
+ }
- if (tx->tx_status == 0) { /* success so far */
- if (status < 0) { /* failed? */
- tx->tx_status = status;
- } else if (txtype == IBLND_MSG_GET_REQ) {
- lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
- }
- }
+ if (tx->tx_status == 0) { /* success so far */
+ if (status < 0) { /* failed? */
+ tx->tx_status = status;
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_ERROR;
+ } else if (txtype == IBLND_MSG_GET_REQ) {
+ lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status);
+ }
+ }
- tx->tx_waiting = 0;
+ tx->tx_waiting = 0;
- idle = !tx->tx_queued && (tx->tx_sending == 0);
- if (idle)
+ idle = !tx->tx_queued && (tx->tx_sending == 0);
+ if (idle)
list_del(&tx->tx_list);
spin_unlock(&conn->ibc_lock);
}
static void
-kiblnd_send_completion(kib_conn_t *conn, int type, int status, __u64 cookie)
+kiblnd_send_completion(struct kib_conn *conn, int type, int status, u64 cookie)
{
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ struct kib_tx *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid);
if (tx == NULL) {
CERROR("Can't get tx for completion %x for %s\n",
tx->tx_msg->ibm_u.completion.ibcm_status = status;
tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie;
- kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t));
+ kiblnd_init_tx_msg(ni, tx, type, sizeof(struct kib_completion_msg));
kiblnd_queue_tx(tx, conn);
}
static void
-kiblnd_handle_rx (kib_rx_t *rx)
+kiblnd_handle_rx(struct kib_rx *rx)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
int credits = msg->ibm_credits;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc = 0;
int rc2;
int post_credit;
}
static void
-kiblnd_rx_complete (kib_rx_t *rx, int status, int nob)
+kiblnd_rx_complete(struct kib_rx *rx, int status, int nob)
{
- kib_msg_t *msg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_net_t *net = ni->ni_data;
- int rc;
- int err = -EIO;
+ struct kib_msg *msg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
+ struct kib_net *net = ni->ni_data;
+ int rc;
+ int err = -EIO;
LASSERT (net != NULL);
LASSERT (rx->rx_nob < 0); /* was posted */
/* set time last known alive */
kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED)
- goto ignore;
+ /* racing with connection establishment/teardown! */
+
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ unsigned long flags;
+ write_lock_irqsave(g_lock, flags);
+ /* must check holding global lock to eliminate race */
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ list_add_tail(&rx->rx_list, &conn->ibc_early_rxs);
+ write_unlock_irqrestore(g_lock, flags);
+ return;
+ }
+ write_unlock_irqrestore(g_lock, flags);
+ }
kiblnd_handle_rx(rx);
return;
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
+kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, u32 nob)
{
- kib_hca_dev_t *hdev;
- kib_dev_t *dev;
- kib_fmr_poolset_t *fps;
+ struct kib_hca_dev *hdev;
+ struct kib_dev *dev;
+ struct kib_fmr_poolset *fps;
int cpt;
int rc;
int i;
}
static void
-kiblnd_unmap_tx(kib_tx_t *tx)
+kiblnd_unmap_tx(struct kib_tx *tx)
{
if (tx->tx_fmr.fmr_pfmr || tx->tx_fmr.fmr_frd)
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
#ifdef HAVE_IB_GET_DMA_MR
static struct ib_mr *
-kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd)
+kiblnd_find_rd_dma_mr(struct lnet_ni *ni, struct kib_rdma_desc *rd)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
struct lnet_ioctl_config_o2iblnd_tunables *tunables;
tunables = &ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
}
#endif
-static int
-kiblnd_map_tx(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags)
+static int kiblnd_map_tx(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nfrags)
{
- kib_net_t *net = ni->ni_data;
- kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct kib_net *net = ni->ni_data;
+ struct kib_hca_dev *hdev = net->ibn_dev->ibd_hdev;
#ifdef HAVE_IB_GET_DMA_MR
- struct ib_mr *mr = NULL;
+ struct ib_mr *mr = NULL;
#endif
__u32 nob;
int i;
return -EINVAL;
}
-static int
-kiblnd_setup_rd_iov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- unsigned int niov, struct kvec *iov, int offset, int nob)
+static int kiblnd_setup_rd_iov(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, unsigned int niov,
+ struct kvec *iov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
- struct page *page;
+ struct kib_net *net = ni->ni_data;
+ struct page *page;
struct scatterlist *sg;
unsigned long vaddr;
int fragnob;
int page_offset;
+ unsigned int max_niov;
LASSERT (nob > 0);
LASSERT (niov > 0);
LASSERT (niov > 0);
}
+ max_niov = niov;
+
sg = tx->tx_frags;
do {
LASSERT(niov > 0);
fragnob = min((int)(iov->iov_len - offset), nob);
fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
- if ((fragnob < (int)PAGE_SIZE - page_offset) && (niov > 1)) {
+ /*
+ * We're allowed to start at a non-aligned page offset in
+ * the first fragment and end at a non-aligned page offset
+ * in the last fragment.
+ */
+ if ((fragnob < (int)PAGE_SIZE - page_offset) &&
+ (niov < max_niov) && nob > fragnob) {
CDEBUG(D_NET, "fragnob %d < available page %d: with"
- " remaining %d iovs\n",
- fragnob, (int)PAGE_SIZE - page_offset, niov);
+ " remaining %d iovs with %d nob left\n",
+ fragnob, (int)PAGE_SIZE - page_offset, niov,
+ nob);
tx->tx_gaps = true;
}
return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
}
-static int
-kiblnd_setup_rd_kiov(struct lnet_ni *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
- int nkiov, lnet_kiov_t *kiov, int offset, int nob)
+static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
+ struct kib_rdma_desc *rd, int nkiov,
+ lnet_kiov_t *kiov, int offset, int nob)
{
- kib_net_t *net = ni->ni_data;
+ struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
int fragnob;
+ int max_nkiov;
CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
LASSERT(nkiov > 0);
}
+ max_nkiov = nkiov;
+
sg = tx->tx_frags;
do {
LASSERT(nkiov > 0);
fragnob = min((int)(kiov->kiov_len - offset), nob);
- if ((fragnob < (int)(kiov->kiov_len - offset)) && nkiov > 1) {
+ /*
+ * We're allowed to start at a non-aligned page offset in
+ * the first fragment and end at a non-aligned page offset
+ * in the last fragment.
+ */
+ if ((fragnob < (int)(kiov->kiov_len - offset)) &&
+ nkiov < max_nkiov && nob > fragnob) {
CDEBUG(D_NET, "fragnob %d < available page %d: with"
- " remaining %d kiovs\n",
- fragnob, (int)(kiov->kiov_len - offset), nkiov);
+ " remaining %d kiovs with %d nob left\n",
+ fragnob, (int)(kiov->kiov_len - offset),
+ nkiov, nob);
tx->tx_gaps = true;
}
}
static int
-kiblnd_post_tx_locked (kib_conn_t *conn, kib_tx_t *tx, int credit)
+kiblnd_post_tx_locked(struct kib_conn *conn, struct kib_tx *tx, int credit)
__must_hold(&conn->ibc_lock)
{
- kib_msg_t *msg = tx->tx_msg;
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_msg *msg = tx->tx_msg;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct lnet_ni *ni = peer_ni->ibp_ni;
int ver = conn->ibc_version;
int rc;
int done;
LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
if (conn->ibc_nsends_posted ==
- conn->ibc_queue_depth) {
+ kiblnd_concurrent_sends(ver, ni)) {
/* tx completions outstanding... */
CDEBUG(D_NET, "%s: posted enough\n",
libcfs_nid2str(peer_ni->ibp_nid));
* kiblnd_check_sends_locked will queue NOOP again when
* posted NOOPs complete */
spin_unlock(&conn->ibc_lock);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
spin_lock(&conn->ibc_lock);
CDEBUG(D_NET, "%s(%d): redundant or enough NOOP\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
bad = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+ if (lnet_send_error_simulation(tx->tx_lntmsg[0], &tx->tx_hstatus))
+ rc = -EINVAL;
+ else
+#ifdef HAVE_IB_POST_SEND_RECV_CONST
+ rc = ib_post_send(conn->ibc_cmid->qp, wr,
+ (const struct ib_send_wr **)&bad);
+#else
+ rc = ib_post_send(conn->ibc_cmid->qp, wr, &bad);
+#endif
}
conn->ibc_last_send = ktime_get();
}
static void
-kiblnd_check_sends_locked(kib_conn_t *conn)
+kiblnd_check_sends_locked(struct kib_conn *conn)
{
- int ver = conn->ibc_version;
+ int ver = conn->ibc_version;
struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
- kib_tx_t *tx;
+ struct kib_tx *tx;
/* Don't send anything until after the connection is established */
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
}
LASSERT(conn->ibc_nsends_posted <=
- conn->ibc_queue_depth);
+ kiblnd_concurrent_sends(ver, ni));
LASSERT (!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
!list_empty(&conn->ibc_tx_queue_rsrvd)) {
tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- kib_tx_t, tx_list);
- list_del(&tx->tx_list);
- list_add_tail(&tx->tx_list, &conn->ibc_tx_queue);
+ struct kib_tx, tx_list);
+ list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
tx = list_entry(conn->ibc_tx_queue_nocred.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
tx = list_entry(conn->ibc_tx_noops.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
tx = list_entry(conn->ibc_tx_queue.next,
- kib_tx_t, tx_list);
+ struct kib_tx, tx_list);
} else
break;
}
static void
-kiblnd_tx_complete (kib_tx_t *tx, int status)
+kiblnd_tx_complete(struct kib_tx *tx, int status)
{
int failed = (status != IB_WC_SUCCESS);
- kib_conn_t *conn = tx->tx_conn;
+ struct kib_conn *conn = tx->tx_conn;
int idle;
LASSERT (tx->tx_sending > 0);
conn->ibc_noops_posted--;
if (failed) {
+ tx->tx_hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
tx->tx_waiting = 0; /* don't wait for peer_ni */
tx->tx_status = -EIO;
}
}
static void
-kiblnd_init_tx_msg(struct lnet_ni *ni, kib_tx_t *tx, int type, int body_nob)
+kiblnd_init_tx_msg(struct lnet_ni *ni, struct kib_tx *tx, int type,
+ int body_nob)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
+ struct kib_hca_dev *hdev = tx->tx_pool->tpo_hdev;
struct ib_sge *sge = &tx->tx_msgsge;
struct ib_rdma_wr *wrq;
- int nob = offsetof(kib_msg_t, ibm_u) + body_nob;
+ int nob = offsetof(struct kib_msg, ibm_u) + body_nob;
#ifdef HAVE_IB_GET_DMA_MR
struct ib_mr *mr = hdev->ibh_mrs;
#endif
}
static int
-kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
- int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie)
+kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type,
+ int resid, struct kib_rdma_desc *dstrd, u64 dstcookie)
{
- kib_msg_t *ibmsg = tx->tx_msg;
- kib_rdma_desc_t *srcrd = tx->tx_rd;
+ struct kib_msg *ibmsg = tx->tx_msg;
+ struct kib_rdma_desc *srcrd = tx->tx_rd;
struct ib_rdma_wr *wrq = NULL;
struct ib_sge *sge;
int rc = resid;
break;
}
- sge_nob = MIN(MIN(kiblnd_rd_frag_size(srcrd, srcidx),
- kiblnd_rd_frag_size(dstrd, dstidx)), resid);
+ sge_nob = min3(kiblnd_rd_frag_size(srcrd, srcidx),
+ kiblnd_rd_frag_size(dstrd, dstidx),
+ resid);
sge = &tx->tx_sge[tx->tx_nsge];
sge->addr = kiblnd_rd_frag_addr(srcrd, srcidx);
ibmsg->ibm_u.completion.ibcm_status = rc;
ibmsg->ibm_u.completion.ibcm_cookie = dstcookie;
kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx,
- type, sizeof (kib_completion_msg_t));
+ type, sizeof(struct kib_completion_msg));
return rc;
}
static void
-kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn)
{
struct list_head *q;
s64 timeout_ns;
LASSERT(!tx->tx_queued); /* not queued for sending already */
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
- timeout_ns = *kiblnd_tunables.kib_timeout * NSEC_PER_SEC;
+ if (conn->ibc_state >= IBLND_CONN_DISCONNECTED) {
+ tx->tx_status = -ECONNABORTED;
+ tx->tx_waiting = 0;
+ if (tx->tx_conn != NULL) {
+ /* PUT_DONE first attached to conn as a PUT_REQ */
+ LASSERT(tx->tx_conn == conn);
+ LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE);
+ tx->tx_conn = NULL;
+ kiblnd_conn_decref(conn);
+ }
+ list_add(&tx->tx_list, &conn->ibc_zombie_txs);
+
+ return;
+ }
+
+ timeout_ns = lnet_get_lnd_timeout() * NSEC_PER_SEC;
tx->tx_queued = 1;
tx->tx_deadline = ktime_add_ns(ktime_get(), timeout_ns);
}
static void
-kiblnd_queue_tx (kib_tx_t *tx, kib_conn_t *conn)
+kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn)
{
spin_lock(&conn->ibc_lock);
kiblnd_queue_tx_locked(tx, conn);
}
static void
-kiblnd_connect_peer (kib_peer_ni_t *peer_ni)
+kiblnd_connect_peer(struct kib_peer_ni *peer_ni)
{
struct rdma_cm_id *cmid;
- kib_dev_t *dev;
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
struct sockaddr_in srcaddr;
struct sockaddr_in dstaddr;
- int rc;
+ int rc;
LASSERT (net != NULL);
LASSERT (peer_ni->ibp_connecting > 0);
- cmid = kiblnd_rdma_create_id(kiblnd_cm_callback, peer_ni, RDMA_PS_TCP,
- IB_QPT_RC);
+ cmid = kiblnd_rdma_create_id(peer_ni->ibp_ni->ni_net_ns,
+ kiblnd_cm_callback, peer_ni,
+ RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cmid)) {
CERROR("Can't create CMID for %s: %ld\n",
kiblnd_peer_addref(peer_ni); /* cmid's ref */
- if (*kiblnd_tunables.kib_use_priv_port) {
- rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- } else {
- rc = rdma_resolve_addr(cmid,
- (struct sockaddr *)&srcaddr,
- (struct sockaddr *)&dstaddr,
- *kiblnd_tunables.kib_timeout * 1000);
- }
- if (rc != 0) {
- /* Can't initiate address resolution: */
- CERROR("Can't resolve addr for %s: %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), rc);
- goto failed2;
- }
+ if (*kiblnd_tunables.kib_use_priv_port) {
+ rc = kiblnd_resolve_addr(cmid, &srcaddr, &dstaddr,
+ lnet_get_lnd_timeout() * 1000);
+ } else {
+ rc = rdma_resolve_addr(cmid,
+ (struct sockaddr *)&srcaddr,
+ (struct sockaddr *)&dstaddr,
+ lnet_get_lnd_timeout() * 1000);
+ }
+ if (rc != 0) {
+ /* Can't initiate address resolution: */
+ CERROR("Can't resolve addr for %s: %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid), rc);
+ goto failed2;
+ }
return;
}
bool
-kiblnd_reconnect_peer(kib_peer_ni_t *peer_ni)
+kiblnd_reconnect_peer(struct kib_peer_ni *peer_ni)
{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
- char *reason = NULL;
- struct list_head txs;
- unsigned long flags;
-
- INIT_LIST_HEAD(&txs);
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ char *reason = NULL;
+ LIST_HEAD(txs);
+ unsigned long flags;
write_lock_irqsave(glock, flags);
if (peer_ni->ibp_reconnecting == 0) {
CWARN("Abort reconnection of %s: %s\n",
libcfs_nid2str(peer_ni->ibp_nid), reason);
- kiblnd_txlist_done(&txs, -ECONNABORTED);
+ kiblnd_txlist_done(&txs, -ECONNABORTED,
+ LNET_MSG_STATUS_LOCAL_ABORTED);
return false;
}
void
-kiblnd_launch_tx(struct lnet_ni *ni, kib_tx_t *tx, lnet_nid_t nid)
+kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid)
{
- kib_peer_ni_t *peer_ni;
- kib_peer_ni_t *peer2;
- kib_conn_t *conn;
- rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ struct kib_peer_ni *peer_ni;
+ struct kib_peer_ni *peer2;
+ struct kib_conn *conn;
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
int rc;
int i;
if (tx != NULL) {
tx->tx_status = -EHOSTUNREACH;
tx->tx_waiting = 0;
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
}
return;
peer_ni->ibp_connecting = tunables->lnd_conns_per_peer;
/* always called with a ref on ni, which prevents ni being shutdown */
- LASSERT(((kib_net_t *)ni->ni_data)->ibn_shutdown == 0);
+ LASSERT(((struct kib_net *)ni->ni_data)->ibn_shutdown == 0);
if (tx != NULL)
list_add_tail(&tx->tx_list, &peer_ni->ibp_tx_queue);
lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
- kib_msg_t *ibmsg;
- kib_rdma_desc_t *rd;
- kib_tx_t *tx;
+ struct kib_msg *ibmsg;
+ struct kib_rdma_desc *rd;
+ struct kib_tx *tx;
int nob;
int rc;
break; /* send IMMEDIATE */
/* is the REPLY message too small for RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
if (rc != 0) {
CERROR("Can't setup GET sink for %s: %d\n",
libcfs_nid2str(target.nid), rc);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
return -EIO;
}
- nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_get_msg, ibgm_rd.rd_frags[rd->rd_nfrags]);
ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie;
ibmsg->ibm_u.get.ibgm_hdr = *hdr;
case LNET_MSG_REPLY:
case LNET_MSG_PUT:
/* Is the payload small enough not to need RDMA? */
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
if (nob <= IBLND_MSG_SIZE)
break; /* send IMMEDIATE */
ibmsg = tx->tx_msg;
ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t));
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
+ sizeof(struct kib_putreq_msg));
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
return 0;
}
- /* send IMMEDIATE */
-
- LASSERT (offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[payload_nob])
- <= IBLND_MSG_SIZE);
+ /* send IMMEDIATE */
+ LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
+ <= IBLND_MSG_SIZE);
tx = kiblnd_get_idle_tx(ni, target.nid);
if (tx == NULL) {
if (payload_kiov != NULL)
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_kiov,
payload_offset, payload_nob);
else
lnet_copy_iov2flat(IBLND_MSG_SIZE, ibmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
payload_niov, payload_iov,
payload_offset, payload_nob);
- nob = offsetof(kib_immediate_msg_t, ibim_payload[payload_nob]);
+ nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
}
static void
-kiblnd_reply(struct lnet_ni *ni, kib_rx_t *rx, struct lnet_msg *lntmsg)
+kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
{
struct lnet_process_id target = lntmsg->msg_target;
unsigned int niov = lntmsg->msg_niov;
lnet_kiov_t *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
- kib_tx_t *tx;
+ struct kib_tx *tx;
int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
kiblnd_queue_tx(tx, rx->rx_conn);
return;
- failed_1:
+
+failed_1:
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
- failed_0:
+failed_0:
lnet_finalize(lntmsg, -EIO);
}
int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
- kib_rx_t *rx = private;
- kib_msg_t *rxmsg = rx->rx_msg;
- kib_conn_t *conn = rx->rx_conn;
- kib_tx_t *tx;
+ struct kib_rx *rx = private;
+ struct kib_msg *rxmsg = rx->rx_msg;
+ struct kib_conn *conn = rx->rx_conn;
+ struct kib_tx *tx;
__u64 ibprm_cookie;
int nob;
int post_credit = IBLND_POSTRX_PEER_CREDIT;
LBUG();
case IBLND_MSG_IMMEDIATE:
- nob = offsetof(kib_msg_t, ibm_u.immediate.ibim_payload[rlen]);
+ nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[rlen]);
if (nob > rx->rx_nob) {
CERROR ("Immediate message from %s too big: %d(%d)\n",
libcfs_nid2str(rxmsg->ibm_u.immediate.ibim_hdr.src_nid),
if (kiov != NULL)
lnet_copy_flat2kiov(niov, kiov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
else
lnet_copy_flat2iov(niov, iov, offset,
IBLND_MSG_SIZE, rxmsg,
- offsetof(kib_msg_t, ibm_u.immediate.ibim_payload),
+ offsetof(struct kib_msg, ibm_u.immediate.ibim_payload),
mlen);
lnet_finalize(lntmsg, 0);
break;
case IBLND_MSG_PUT_REQ: {
- kib_msg_t *txmsg;
- kib_rdma_desc_t *rd;
+ struct kib_msg *txmsg;
+ struct kib_rdma_desc *rd;
ibprm_cookie = rxmsg->ibm_u.putreq.ibprm_cookie;
if (mlen == 0) {
if (rc != 0) {
CERROR("Can't setup PUT sink for %s: %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
kiblnd_tx_done(tx);
/* tell peer_ni it's over */
kiblnd_send_completion(rx->rx_conn, IBLND_MSG_PUT_NAK,
break;
}
- nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[rd->rd_nfrags]);
+ nob = offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[rd->rd_nfrags]);
txmsg->ibm_u.putack.ibpam_src_cookie = ibprm_cookie;
txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie;
}
static void
-kiblnd_peer_alive (kib_peer_ni_t *peer_ni)
+kiblnd_peer_alive(struct kib_peer_ni *peer_ni)
{
/* This is racy, but everyone's only writing ktime_get_seconds() */
peer_ni->ibp_last_alive = ktime_get_seconds();
}
static void
-kiblnd_peer_notify (kib_peer_ni_t *peer_ni)
+kiblnd_peer_notify(struct kib_peer_ni *peer_ni)
{
- int error = 0;
+ int error = 0;
time64_t last_alive = 0;
- unsigned long flags;
+ unsigned long flags;
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
if (kiblnd_peer_idle(peer_ni) && peer_ni->ibp_error != 0) {
- error = peer_ni->ibp_error;
- peer_ni->ibp_error = 0;
+ error = peer_ni->ibp_error;
+ peer_ni->ibp_error = 0;
- last_alive = peer_ni->ibp_last_alive;
- }
+ last_alive = peer_ni->ibp_last_alive;
+ }
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (error != 0)
- lnet_notify(peer_ni->ibp_ni,
- peer_ni->ibp_nid, 0, last_alive);
+ if (error != 0)
+ lnet_notify(peer_ni->ibp_ni,
+ peer_ni->ibp_nid, false, false, last_alive);
}
void
-kiblnd_close_conn_locked (kib_conn_t *conn, int error)
+kiblnd_close_conn_locked(struct kib_conn *conn, int error)
{
/* This just does the immediate housekeeping. 'error' is zero for a
* normal shutdown which can happen only after the connection has been
* connection to be finished off by the connd. Otherwise the connd is
* already dealing with it (either to set it up or tear it down).
* Caller holds kib_global_lock exclusively in irq context */
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- kib_dev_t *dev;
- unsigned long flags;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct kib_dev *dev;
+ unsigned long flags;
LASSERT (error != 0 || conn->ibc_state >= IBLND_CONN_ESTABLISHED);
list_empty(&conn->ibc_active_txs) ? "" : "(waiting)");
}
- dev = ((kib_net_t *)peer_ni->ibp_ni->ni_data)->ibn_dev;
+ dev = ((struct kib_net *)peer_ni->ibp_ni->ni_data)->ibn_dev;
if (peer_ni->ibp_next_conn == conn)
/* clear next_conn so it won't be used */
peer_ni->ibp_next_conn = NULL;
}
void
-kiblnd_close_conn(kib_conn_t *conn, int error)
+kiblnd_close_conn(struct kib_conn *conn, int error)
{
unsigned long flags;
}
static void
-kiblnd_abort_txs(kib_conn_t *conn, struct list_head *txs)
+kiblnd_handle_early_rxs(struct kib_conn *conn)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ unsigned long flags;
+ struct kib_rx *rx;
+
+ LASSERT(!in_interrupt());
+ LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ while (!list_empty(&conn->ibc_early_rxs)) {
+ rx = list_entry(conn->ibc_early_rxs.next,
+ struct kib_rx, rx_list);
+ list_del(&rx->rx_list);
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+
+ kiblnd_handle_rx(rx);
+
+ write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
+ }
+ write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
+}
+
+void
+kiblnd_abort_txs(struct kib_conn *conn, struct list_head *txs)
+{
+ LIST_HEAD(zombies);
struct list_head *tmp;
struct list_head *nxt;
- kib_tx_t *tx;
+ struct kib_tx *tx;
spin_lock(&conn->ibc_lock);
list_for_each_safe(tmp, nxt, txs) {
- tx = list_entry(tmp, kib_tx_t, tx_list);
+ tx = list_entry(tmp, struct kib_tx, tx_list);
if (txs == &conn->ibc_active_txs) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_waiting ||
tx->tx_sending != 0);
+ if (conn->ibc_comms_error == -ETIMEDOUT) {
+ if (tx->tx_waiting && !tx->tx_sending)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_REMOTE_TIMEOUT;
+ else if (tx->tx_sending)
+ tx->tx_hstatus =
+ LNET_MSG_STATUS_NETWORK_TIMEOUT;
+ }
} else {
LASSERT(tx->tx_queued);
+ if (conn->ibc_comms_error == -ETIMEDOUT)
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_TIMEOUT;
+ else
+ tx->tx_hstatus = LNET_MSG_STATUS_LOCAL_ERROR;
}
tx->tx_status = -ECONNABORTED;
tx->tx_waiting = 0;
+ /*
+ * TODO: This makes an assumption that
+ * kiblnd_tx_complete() will be called for each tx. If
+ * that event is dropped we could end up with stale
+ * connections floating around. We'd like to deal with
+ * that in a better way.
+ *
+ * Also that means we can exceed the timeout by many
+ * seconds.
+ */
if (tx->tx_sending == 0) {
tx->tx_queued = 0;
- list_del(&tx->tx_list);
- list_add(&tx->tx_list, &zombies);
+ list_move(&tx->tx_list, &zombies);
}
}
spin_unlock(&conn->ibc_lock);
- kiblnd_txlist_done(&zombies, -ECONNABORTED);
+ /*
+ * aborting transmits occurs when finalizing the connection.
+ * The connection is finalized on error.
+ * Passing LNET_MSG_STATUS_OK to txlist_done() will not
+ * override the value already set in tx->tx_hstatus above.
+ */
+ kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
}
static void
-kiblnd_finalise_conn (kib_conn_t *conn)
+kiblnd_finalise_conn(struct kib_conn *conn)
{
LASSERT (!in_interrupt());
LASSERT (conn->ibc_state > IBLND_CONN_INIT);
- kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
-
/* abort_receives moves QP state to IB_QPS_ERR. This is only required
* for connections that didn't get as far as being connected, because
* rdma_disconnect() does this for free. */
kiblnd_abort_receives(conn);
+ kiblnd_set_conn_state(conn, IBLND_CONN_DISCONNECTED);
+
/* Complete all tx descs not waiting for sends to complete.
* NB we should be safe from RDMA now that the QP has changed state */
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_rsrvd);
kiblnd_abort_txs(conn, &conn->ibc_tx_queue_nocred);
kiblnd_abort_txs(conn, &conn->ibc_active_txs);
+
+ kiblnd_handle_early_rxs(conn);
}
static void
-kiblnd_peer_connect_failed(kib_peer_ni_t *peer_ni, int active, int error)
+kiblnd_peer_connect_failed(struct kib_peer_ni *peer_ni, int active,
+ int error)
{
- struct list_head zombies = LIST_HEAD_INIT(zombies);
+ LIST_HEAD(zombies);
unsigned long flags;
LASSERT (error != 0);
peer_ni->ibp_reconnected = 0;
if (list_empty(&peer_ni->ibp_conns)) {
/* Take peer_ni's blocked transmits to complete with error */
- list_add(&zombies, &peer_ni->ibp_tx_queue);
- list_del_init(&peer_ni->ibp_tx_queue);
+ list_splice_init(&peer_ni->ibp_tx_queue, &zombies);
if (kiblnd_peer_active(peer_ni))
kiblnd_unlink_peer_locked(peer_ni);
CNETERR("Deleting messages for %s: connection failed\n",
libcfs_nid2str(peer_ni->ibp_nid));
- kiblnd_txlist_done(&zombies, -EHOSTUNREACH);
+ kiblnd_txlist_done(&zombies, error,
+ LNET_MSG_STATUS_LOCAL_DROPPED);
}
static void
-kiblnd_connreq_done(kib_conn_t *conn, int status)
+kiblnd_connreq_done(struct kib_conn *conn, int status)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- kib_tx_t *tx;
- struct list_head txs;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct kib_tx *tx;
+ LIST_HEAD(txs);
unsigned long flags;
int active;
}
/* grab pending txs while I have the lock */
- list_add(&txs, &peer_ni->ibp_tx_queue);
- list_del_init(&peer_ni->ibp_tx_queue);
+ list_splice_init(&peer_ni->ibp_tx_queue, &txs);
if (!kiblnd_peer_active(peer_ni) || /* peer_ni has been deleted */
conn->ibc_comms_error != 0) { /* error has happened already */
kiblnd_close_conn_locked(conn, -ECONNABORTED);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- kiblnd_txlist_done(&txs, -ECONNABORTED);
+ kiblnd_txlist_done(&txs, -ECONNABORTED,
+ LNET_MSG_STATUS_LOCAL_ERROR);
return;
}
*/
spin_lock(&conn->ibc_lock);
while (!list_empty(&txs)) {
- tx = list_entry(txs.next, kib_tx_t, tx_list);
+ tx = list_entry(txs.next, struct kib_tx, tx_list);
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
kiblnd_check_sends_locked(conn);
spin_unlock(&conn->ibc_lock);
+ /* schedule blocked rxs */
+ kiblnd_handle_early_rxs(conn);
kiblnd_conn_decref(conn);
}
static void
-kiblnd_reject(struct rdma_cm_id *cmid, kib_rej_t *rej)
+kiblnd_reject(struct rdma_cm_id *cmid, struct kib_rej *rej)
{
int rc;
kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
{
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
- kib_msg_t *reqmsg = priv;
- kib_msg_t *ackmsg;
- kib_dev_t *ibdev;
- kib_peer_ni_t *peer_ni;
- kib_peer_ni_t *peer2;
- kib_conn_t *conn;
- struct lnet_ni *ni = NULL;
- kib_net_t *net = NULL;
+ struct kib_msg *reqmsg = priv;
+ struct kib_msg *ackmsg;
+ struct kib_dev *ibdev;
+ struct kib_peer_ni *peer_ni;
+ struct kib_peer_ni *peer2;
+ struct kib_conn *conn;
+ struct lnet_ni *ni = NULL;
+ struct kib_net *net = NULL;
lnet_nid_t nid;
struct rdma_conn_param cp;
- kib_rej_t rej;
+ struct kib_rej rej;
int version = IBLND_MSG_VERSION;
unsigned long flags;
int rc;
LASSERT (!in_interrupt());
/* cmid inherits 'context' from the corresponding listener id */
- ibdev = (kib_dev_t *)cmid->context;
- LASSERT (ibdev != NULL);
+ ibdev = cmid->context;
+ LASSERT(ibdev);
memset(&rej, 0, sizeof(rej));
rej.ibr_magic = IBLND_MSG_MAGIC;
goto failed;
}
- if (priv_nob < offsetof(kib_msg_t, ibm_type)) {
+ if (priv_nob < offsetof(struct kib_msg, ibm_type)) {
CERROR("Short connection request\n");
goto failed;
}
ni = lnet_nid2ni_addref(reqmsg->ibm_dstnid);
if (ni != NULL) {
- net = (kib_net_t *)ni->ni_data;
+ net = (struct kib_net *)ni->ni_data;
rej.ibr_incarnation = net->ibn_incarnation;
}
}
static void
-kiblnd_check_reconnect(kib_conn_t *conn, int version,
- __u64 incarnation, int why, kib_connparams_t *cp)
+kiblnd_check_reconnect(struct kib_conn *conn, int version,
+ u64 incarnation, int why, struct kib_connparams *cp)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
char *reason;
int msg_size = IBLND_MSG_SIZE;
int frag_num = -1;
}
write_lock_irqsave(glock, flags);
- /* retry connection if it's still needed and no other connection
- * attempts (active or passive) are in progress
- * NB: reconnect is still needed even when ibp_tx_queue is
- * empty if ibp_version != version because reconnect may be
- * initiated by kiblnd_query() */
+ /* retry connection if it's still needed and no other connection
+ * attempts (active or passive) are in progress
+ * NB: reconnect is still needed even when ibp_tx_queue is
+ * empty if ibp_version != version because reconnect may be
+ * initiated.
+ */
reconnect = (!list_empty(&peer_ni->ibp_tx_queue) ||
peer_ni->ibp_version != version) &&
peer_ni->ibp_connecting &&
}
static void
-kiblnd_rejected (kib_conn_t *conn, int reason, void *priv, int priv_nob)
+kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
LASSERT (!in_interrupt());
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
break;
case IB_CM_REJ_CONSUMER_DEFINED:
- if (priv_nob >= offsetof(kib_rej_t, ibr_padding)) {
- kib_rej_t *rej = priv;
- kib_connparams_t *cp = NULL;
+ if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
+ struct kib_rej *rej = priv;
+ struct kib_connparams *cp = NULL;
int flip = 0;
__u64 incarnation = -1;
flip = 1;
}
- if (priv_nob >= sizeof(kib_rej_t) &&
+ if (priv_nob >= sizeof(struct kib_rej) &&
rej->ibr_version > IBLND_MSG_VERSION_1) {
/* priv_nob is always 148 in current version
* of OFED, so we still need to check version.
}
static void
-kiblnd_check_connreply (kib_conn_t *conn, void *priv, int priv_nob)
+kiblnd_check_connreply(struct kib_conn *conn, void *priv, int priv_nob)
{
- kib_peer_ni_t *peer_ni = conn->ibc_peer;
- struct lnet_ni *ni = peer_ni->ibp_ni;
- kib_net_t *net = ni->ni_data;
- kib_msg_t *msg = priv;
+ struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_net *net = ni->ni_data;
+ struct kib_msg *msg = priv;
int ver = conn->ibc_version;
int rc = kiblnd_unpack_msg(msg, priv_nob);
unsigned long flags;
}
static int
-kiblnd_active_connect (struct rdma_cm_id *cmid)
+kiblnd_active_connect(struct rdma_cm_id *cmid)
{
- kib_peer_ni_t *peer_ni = (kib_peer_ni_t *)cmid->context;
- kib_conn_t *conn;
- kib_msg_t *msg;
- struct rdma_conn_param cp;
+ struct kib_peer_ni *peer_ni = cmid->context;
+ struct kib_conn *conn;
+ struct kib_msg *msg;
+ struct rdma_conn_param cp;
int version;
__u64 incarnation;
unsigned long flags;
int
kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event)
{
- kib_peer_ni_t *peer_ni;
- kib_conn_t *conn;
- int rc;
+ struct kib_peer_ni *peer_ni;
+ struct kib_conn *conn;
+ int rc;
switch (event->event) {
default:
case RDMA_CM_EVENT_CONNECT_REQUEST:
/* destroy cmid on failure */
- rc = kiblnd_passive_connect(cmid,
+ rc = kiblnd_passive_connect(cmid,
(void *)KIBLND_CONN_PARAM(event),
KIBLND_CONN_PARAM_LEN(event));
CDEBUG(D_NET, "connreq: %d\n", rc);
return rc;
-
+
case RDMA_CM_EVENT_ADDR_ERROR:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
CNETERR("%s: ADDR ERROR %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ADDR_RESOLVED:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
CDEBUG(D_NET,"%s Addr resolved: %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
CNETERR("Can't resolve address for %s: %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
rc = event->status;
- } else {
- rc = rdma_resolve_route(
- cmid, *kiblnd_tunables.kib_timeout * 1000);
+ } else {
+ rc = rdma_resolve_route(
+ cmid, lnet_get_lnd_timeout() * 1000);
if (rc == 0) {
- kib_net_t *net = peer_ni->ibp_ni->ni_data;
- kib_dev_t *dev = net->ibn_dev;
+ struct kib_net *net = peer_ni->ibp_ni->ni_data;
+ struct kib_dev *dev = net->ibn_dev;
CDEBUG(D_NET, "%s: connection bound to "\
"%s:%pI4h:%s\n",
return rc; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ROUTE_ERROR:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
CNETERR("%s: ROUTE ERROR %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer_ni, 1, -EHOSTUNREACH);
return -EHOSTUNREACH; /* rc != 0 destroys cmid */
case RDMA_CM_EVENT_ROUTE_RESOLVED:
- peer_ni = (kib_peer_ni_t *)cmid->context;
+ peer_ni = cmid->context;
CDEBUG(D_NET,"%s Route resolved: %d\n",
libcfs_nid2str(peer_ni->ibp_nid), event->status);
kiblnd_peer_connect_failed(peer_ni, 1, event->status);
kiblnd_peer_decref(peer_ni);
return event->status; /* rc != 0 destroys cmid */
-
+
case RDMA_CM_EVENT_UNREACHABLE:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: UNREACHABLE %d\n",
return 0;
case RDMA_CM_EVENT_CONNECT_ERROR:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
LASSERT(conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT ||
conn->ibc_state == IBLND_CONN_PASSIVE_WAIT);
CNETERR("%s: CONNECT ERROR %d\n",
return 0;
case RDMA_CM_EVENT_REJECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
return 0;
case RDMA_CM_EVENT_ESTABLISHED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
switch (conn->ibc_state) {
default:
LBUG();
return 0;
case RDMA_CM_EVENT_DISCONNECTED:
- conn = (kib_conn_t *)cmid->context;
+ conn = cmid->context;
if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
CERROR("%s DISCONNECTED\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid));
}
static int
-kiblnd_check_txs_locked(kib_conn_t *conn, struct list_head *txs)
+kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
- kib_tx_t *tx;
+ struct kib_tx *tx;
struct list_head *ttmp;
list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, kib_tx_t, tx_list);
+ tx = list_entry(ttmp, struct kib_tx, tx_list);
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
}
static int
-kiblnd_conn_timed_out_locked(kib_conn_t *conn)
+kiblnd_conn_timed_out_locked(struct kib_conn *conn)
{
return kiblnd_check_txs_locked(conn, &conn->ibc_tx_queue) ||
kiblnd_check_txs_locked(conn, &conn->ibc_tx_noops) ||
static void
kiblnd_check_conns (int idx)
{
- struct list_head closes = LIST_HEAD_INIT(closes);
- struct list_head checksends = LIST_HEAD_INIT(checksends);
- struct list_head timedout_txs = LIST_HEAD_INIT(timedout_txs);
+ LIST_HEAD(closes);
+ LIST_HEAD(checksends);
+ LIST_HEAD(timedout_txs);
struct list_head *peers = &kiblnd_data.kib_peers[idx];
struct list_head *ptmp;
- kib_peer_ni_t *peer_ni;
- kib_conn_t *conn;
- kib_tx_t *tx, *tx_tmp;
+ struct kib_peer_ni *peer_ni;
+ struct kib_conn *conn;
+ struct kib_tx *tx, *tx_tmp;
struct list_head *ctmp;
unsigned long flags;
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
list_for_each(ptmp, peers) {
- peer_ni = list_entry(ptmp, kib_peer_ni_t, ibp_list);
+ peer_ni = list_entry(ptmp, struct kib_peer_ni, ibp_list);
/* Check tx_deadline */
list_for_each_entry_safe(tx, tx_tmp, &peer_ni->ibp_tx_queue, tx_list) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, kib_conn_t, ibc_list);
+ conn = list_entry(ctmp, struct kib_conn, ibc_list);
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
if (!list_empty(&timedout_txs))
- kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT);
+ kiblnd_txlist_done(&timedout_txs, -ETIMEDOUT,
+ LNET_MSG_STATUS_LOCAL_TIMEOUT);
/* Handle timeout by closing the whole
* connection. We can only be sure RDMA activity
* has ceased once the QP has been modified. */
while (!list_empty(&closes)) {
conn = list_entry(closes.next,
- kib_conn_t, ibc_connd_list);
+ struct kib_conn, ibc_connd_list);
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
* free to do it last time... */
while (!list_empty(&checksends)) {
conn = list_entry(checksends.next,
- kib_conn_t, ibc_connd_list);
+ struct kib_conn, ibc_connd_list);
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
}
static void
-kiblnd_disconnect_conn (kib_conn_t *conn)
+kiblnd_disconnect_conn(struct kib_conn *conn)
{
LASSERT (!in_interrupt());
LASSERT (current == kiblnd_data.kib_connd);
spinlock_t *lock= &kiblnd_data.kib_connd_lock;
wait_queue_entry_t wait;
unsigned long flags;
- kib_conn_t *conn;
+ struct kib_conn *conn;
int timeout;
int i;
int dropped_lock;
dropped_lock = 0;
if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
- kib_peer_ni_t *peer_ni = NULL;
+ struct kib_peer_ni *peer_ni = NULL;
conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
if (!list_empty(&kiblnd_data.kib_connd_conns)) {
conn = list_entry(kiblnd_data.kib_connd_conns.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
break;
conn = list_entry(kiblnd_data.kib_reconn_list.next,
- kib_conn_t, ibc_list);
+ struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
const int n = 4;
const int p = 1;
int chunk = kiblnd_data.kib_peer_hash_size;
+ unsigned int lnd_timeout;
spin_unlock_irqrestore(lock, flags);
dropped_lock = 1;
* connection within (n+1)/n times the timeout
* interval. */
- if (*kiblnd_tunables.kib_timeout > n * p)
- chunk = (chunk * n * p) /
- *kiblnd_tunables.kib_timeout;
- if (chunk == 0)
- chunk = 1;
+ lnd_timeout = lnet_get_lnd_timeout();
+ if (lnd_timeout > n * p)
+ chunk = (chunk * n * p) / lnd_timeout;
+ if (chunk == 0)
+ chunk = 1;
for (i = 0; i < chunk; i++) {
kiblnd_check_conns(peer_index);
kiblnd_data.kib_peer_hash_size;
}
- deadline += msecs_to_jiffies(p * MSEC_PER_SEC);
+ deadline += cfs_time_seconds(p);
spin_lock_irqsave(lock, flags);
}
void
kiblnd_qp_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
- switch (event->event) {
- case IB_EVENT_COMM_EST:
- CDEBUG(D_NET, "%s established\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ CDEBUG(D_NET, "%s established\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
/* We received a packet but connection isn't established
* probably handshake packet was lost, so free to
* force make connection established */
rdma_notify(conn->ibc_cmid, IB_EVENT_COMM_EST);
- return;
+ return;
- default:
- CERROR("%s: Async QP event type %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
- return;
- }
+ case IB_EVENT_PORT_ERR:
+ case IB_EVENT_DEVICE_FATAL:
+ CERROR("Fatal device error for NI %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
+ return;
+
+ case IB_EVENT_PORT_ACTIVE:
+ CERROR("Port reactivated for NI %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
+ return;
+
+ default:
+ CERROR("%s: Async QP event type %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
+ return;
+ }
}
static void
* consuming my CQ I could be called after all completions have
* occurred. But in this case, ibc_nrx == 0 && ibc_nsends_posted == 0
* and this CQ is about to be destroyed so I NOOP. */
- kib_conn_t *conn = (kib_conn_t *)arg;
- struct kib_sched_info *sched = conn->ibc_sched;
- unsigned long flags;
+ struct kib_conn *conn = arg;
+ struct kib_sched_info *sched = conn->ibc_sched;
+ unsigned long flags;
LASSERT(cq == conn->ibc_cq);
void
kiblnd_cq_event(struct ib_event *event, void *arg)
{
- kib_conn_t *conn = arg;
+ struct kib_conn *conn = arg;
CERROR("%s: async CQ event type %d\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid), event->event);
{
long id = (long)arg;
struct kib_sched_info *sched;
- kib_conn_t *conn;
+ struct kib_conn *conn;
wait_queue_entry_t wait;
unsigned long flags;
struct ib_wc wc;
if (!list_empty(&sched->ibs_conns)) {
conn = list_entry(sched->ibs_conns.next,
- kib_conn_t, ibc_sched_list);
+ struct kib_conn, ibc_sched_list);
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
kiblnd_failover_thread(void *arg)
{
rwlock_t *glock = &kiblnd_data.kib_global_lock;
- kib_dev_t *dev;
+ struct kib_dev *dev;
+ struct net *ns = arg;
wait_queue_entry_t wait;
unsigned long flags;
int rc;
dev->ibd_failover = 1;
write_unlock_irqrestore(glock, flags);
- rc = kiblnd_dev_failover(dev);
+ rc = kiblnd_dev_failover(dev, ns);
write_lock_irqsave(glock, flags);