*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd_cb.c
*
{
struct kib_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist,
+ struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
static struct kib_tx *
kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+ struct kib_tx *tx;
+ list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
int rc;
int err = -EIO;
- LASSERT (net != NULL);
- LASSERT (rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
+ LASSERT(net);
+ LASSERT(rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
- LASSERT (nob >= 0);
- rx->rx_nob = nob;
+ LASSERT(nob >= 0);
+ rx->rx_nob = nob;
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR ("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR ("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
+ /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
return;
}
write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
+ }
+ kiblnd_handle_rx(rx);
+ return;
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
+failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
static int
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
}
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+ rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx->tx_frags,
tx->tx_nfrags, tx->tx_dmadir);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
- int fragnob;
- int max_nkiov;
+ int fragnob;
+ int max_nkiov;
+ int sg_count = 0;
CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
do {
LASSERT(nkiov > 0);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
+ sg_count++;
+
fragnob = min((int)(kiov->bv_len - offset), nob);
/*
sg_set_page(sg, kiov->bv_page, fragnob,
kiov->bv_offset + offset);
sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
offset = 0;
kiov++;
nob -= fragnob;
} while (nob > 0);
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+ return kiblnd_map_tx(ni, tx, rd, sg_count);
}
static int
struct kib_msg *msg = tx->tx_msg;
struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
int ver = conn->ibc_version;
int rc;
int done;
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
- if (frd != NULL) {
+ if (frd != NULL && !frd->frd_posted) {
if (!frd->frd_valid) {
wr = &frd->frd_inv_wr.wr;
wr->next = &frd->frd_fastreg_wr.wr;
conn->ibc_last_send = ktime_get();
- if (rc == 0)
- return 0;
+ if (rc == 0) {
+ if (frd != NULL)
+ frd->frd_posted = true;
+ return 0;
+ }
/* NB credits are transferred in the actual
* message, which can only be the last work item */
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- struct kib_tx, tx_list);
+ (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+ struct kib_tx, tx_list)) != NULL) {
list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_noops,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue,
+ struct kib_tx, tx_list);
} else
break;
#endif
LASSERT(tx->tx_nwrq >= 0);
- LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
+ LASSERT(tx->tx_nwrq <= IBLND_MAX_RDMA_FRAGS);
LASSERT(nob <= IBLND_MSG_SIZE);
#ifdef HAVE_IB_GET_DMA_MR
LASSERT(mr != NULL);
/* Thread context */
LASSERT (!in_interrupt());
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate %s txd for %s\n",
+ lnet_msgtyp2str(type),
+ libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+ ibmsg = tx->tx_msg;
+
switch (type) {
default:
LBUG();
/* is the REPLY message too small for RDMA? */
nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
+ if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
+ break; /* send IMMEDIATE */
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
lntmsg->msg_md->md_niov,
return -EIO;
}
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for GET_DONE */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
+ case LNET_MSG_REPLY:
+ case LNET_MSG_PUT:
+ /* Is the payload small enough not to need RDMA? */
nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
+ break; /* send IMMEDIATE */
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
payload_niov, payload_kiov,
return -EIO;
}
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
+ ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
+ ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
sizeof(struct kib_putreq_msg));
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
+ }
/* send IMMEDIATE */
LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR ("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
+ ibmsg = tx->tx_msg;
+ ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
offsetof(struct kib_msg,
payload_offset, payload_nob);
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+
+ /* finalise lntmsg on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
}
static void
lnet_finalize(lntmsg, -EIO);
}
+unsigned int
+kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx)
+{
+ struct kib_net *net = ni->ni_data;
+ struct device *dev = NULL;
+
+ if (net)
+ dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device;
+
+ return lnet_get_dev_prio(dev, dev_idx);
+
+}
+
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov, struct bio_vec *kiov,
return rc;
}
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
static void
kiblnd_thread_fini (void)
{
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- struct kib_rx, rx_list);
+ while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+ struct kib_rx,
+ rx_list)) != NULL) {
list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
kiblnd_txlist_done(&zombies, -ECONNABORTED, LNET_MSG_STATUS_OK);
}
-static int
+static bool
kiblnd_tx_may_discard(struct kib_conn *conn)
{
- int rc = 0;
+ bool rc = false;
struct kib_tx *nxt;
struct kib_tx *tx;
if (tx->tx_sending == 0) {
kiblnd_conn_decref(tx->tx_conn);
tx->tx_conn = NULL;
- rc = 1;
+ rc = true;
}
}
}
int error)
{
LIST_HEAD(zombies);
- unsigned long flags;
+ unsigned long flags;
+ enum lnet_msg_hstatus hstatus;
- LASSERT (error != 0);
- LASSERT (!in_interrupt());
+ LASSERT(error != 0);
+ LASSERT(!in_interrupt());
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
CNETERR("Deleting messages for %s: connection failed\n",
libcfs_nid2str(peer_ni->ibp_nid));
- if (error == -EHOSTUNREACH || error == -ETIMEDOUT)
- kiblnd_txlist_done(&zombies, error,
- LNET_MSG_STATUS_NETWORK_TIMEOUT);
- else
- kiblnd_txlist_done(&zombies, error,
- LNET_MSG_STATUS_LOCAL_DROPPED);
+ switch (error) {
+ case -EHOSTUNREACH:
+ case -ETIMEDOUT:
+ hstatus = LNET_MSG_STATUS_NETWORK_TIMEOUT;
+ break;
+ case -ECONNREFUSED:
+ hstatus = LNET_MSG_STATUS_REMOTE_DROPPED;
+ break;
+ default:
+ hstatus = LNET_MSG_STATUS_LOCAL_DROPPED;
+ break;
+ }
+
+ kiblnd_txlist_done(&zombies, error, hstatus);
}
static void
/* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- /* reset retry count */
- peer_ni->ibp_retries = 0;
-
conn->ibc_last_send = ktime_get();
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer_ni);
* scheduled. We won't be using round robin on this first batch.
*/
spin_lock(&conn->ibc_lock);
- while (!list_empty(&txs)) {
- tx = list_entry(txs.next, struct kib_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
rej.ibr_incarnation = net->ibn_incarnation;
}
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
+ if (ni == NULL || /* no matching net */
+ lnet_nid_to_nid4(&ni->ni_nid) !=
+ reqmsg->ibm_dstnid || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
- ni ? libcfs_nid2str(ni->ni_nid) : "NA",
+ ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
* the lower NID connection win so we can move forward.
*/
if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
+ nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
goto out;
}
- if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
- reason = "retry count exceeded due to no listener";
- goto out;
- }
-
switch (why) {
default:
reason = "Unknown";
break;
case IBLND_REJECT_RDMA_FRAGS: {
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
-#ifdef HAVE_IB_GET_DMA_MR
- /*
- * This check only makes sense if the kernel supports global
- * memory registration. Otherwise, map_on_demand will never == 0
- */
- if (!tunables->lnd_map_on_demand) {
- reason = "map_on_demand must be enabled";
- goto out;
- }
-#endif
+
if (conn->ibc_max_frags <= frag_num) {
reason = "unsupported max frags";
goto out;
case IBLND_REJECT_CONN_UNCOMPAT:
reason = "version negotiation";
break;
-
- case IBLND_REJECT_INVALID_SRV_ID:
- reason = "invalid service id";
- break;
}
conn->ibc_reconnect = 1;
kiblnd_rejected(struct kib_conn *conn, int reason, void *priv, int priv_nob)
{
struct kib_peer_ni *peer_ni = conn->ibc_peer;
+ int status = -ECONNREFUSED;
LASSERT (!in_interrupt());
LASSERT (conn->ibc_state == IBLND_CONN_ACTIVE_CONNECT);
break;
case IB_CM_REJ_INVALID_SERVICE_ID:
- peer_ni->ibp_retries++;
- kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_INVALID_SRV_ID, NULL);
+ status = -EHOSTUNREACH;
CNETERR("%s rejected: no listener at %d\n",
libcfs_nid2str(peer_ni->ibp_nid),
*kiblnd_tunables.kib_service);
break;
- case IB_CM_REJ_CONSUMER_DEFINED:
+ case IB_CM_REJ_CONSUMER_DEFINED:
if (priv_nob >= offsetof(struct kib_rej, ibr_padding)) {
struct kib_rej *rej = priv;
struct kib_connparams *cp = NULL;
- int flip = 0;
- __u64 incarnation = -1;
-
- /* NB. default incarnation is -1 because:
- * a) V1 will ignore dst incarnation in connreq.
- * b) V2 will provide incarnation while rejecting me,
- * -1 will be overwrote.
- *
- * if I try to connect to a V1 peer_ni with V2 protocol,
- * it rejected me then upgrade to V2, I have no idea
- * about the upgrading and try to reconnect with V1,
- * in this case upgraded V2 can find out I'm trying to
- * talk to the old guy and reject me(incarnation is -1).
- */
-
- if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
- rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
- __swab32s(&rej->ibr_magic);
- __swab16s(&rej->ibr_version);
- flip = 1;
- }
+ bool flip = false;
+ __u64 incarnation = -1;
+
+ /* NB. default incarnation is -1 because:
+ * a) V1 will ignore dst incarnation in connreq.
+ * b) V2 will provide incarnation while rejecting me,
+ * -1 will be overwrote.
+ *
+ * if I try to connect to a V1 peer_ni with V2 protocol,
+ * it rejected me then upgrade to V2, I have no idea
+ * about the upgrading and try to reconnect with V1,
+ * in this case upgraded V2 can find out I'm trying to
+ * talk to the old guy and reject me(incarnation is -1).
+ */
+
+ if (rej->ibr_magic == __swab32(IBLND_MSG_MAGIC) ||
+ rej->ibr_magic == __swab32(LNET_PROTO_MAGIC)) {
+ __swab32s(&rej->ibr_magic);
+ __swab16s(&rej->ibr_version);
+ flip = true;
+ }
if (priv_nob >= sizeof(struct kib_rej) &&
- rej->ibr_version > IBLND_MSG_VERSION_1) {
- /* priv_nob is always 148 in current version
- * of OFED, so we still need to check version.
- * (define of IB_CM_REJ_PRIVATE_DATA_SIZE) */
- cp = &rej->ibr_cp;
-
- if (flip) {
- __swab64s(&rej->ibr_incarnation);
- __swab16s(&cp->ibcp_queue_depth);
- __swab16s(&cp->ibcp_max_frags);
- __swab32s(&cp->ibcp_max_msg_size);
- }
-
- incarnation = rej->ibr_incarnation;
- }
-
- if (rej->ibr_magic != IBLND_MSG_MAGIC &&
- rej->ibr_magic != LNET_PROTO_MAGIC) {
- CERROR("%s rejected: consumer defined fatal error\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
- }
-
- if (rej->ibr_version != IBLND_MSG_VERSION &&
- rej->ibr_version != IBLND_MSG_VERSION_1) {
- CERROR("%s rejected: o2iblnd version %x error\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- rej->ibr_version);
- break;
- }
-
- if (rej->ibr_why == IBLND_REJECT_FATAL &&
- rej->ibr_version == IBLND_MSG_VERSION_1) {
- CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
- libcfs_nid2str(peer_ni->ibp_nid), rej->ibr_version);
-
- if (conn->ibc_version != IBLND_MSG_VERSION_1)
- rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
- }
-
- switch (rej->ibr_why) {
- case IBLND_REJECT_CONN_RACE:
- case IBLND_REJECT_CONN_STALE:
- case IBLND_REJECT_CONN_UNCOMPAT:
+ rej->ibr_version > IBLND_MSG_VERSION_1) {
+ /* priv_nob is always 148 in current version
+ * of OFED, so we still need to check version.
+ * (define of IB_CM_REJ_PRIVATE_DATA_SIZE)
+ */
+ cp = &rej->ibr_cp;
+
+ if (flip) {
+ __swab64s(&rej->ibr_incarnation);
+ __swab16s(&cp->ibcp_queue_depth);
+ __swab16s(&cp->ibcp_max_frags);
+ __swab32s(&cp->ibcp_max_msg_size);
+ }
+
+ incarnation = rej->ibr_incarnation;
+ }
+
+ if (rej->ibr_magic != IBLND_MSG_MAGIC &&
+ rej->ibr_magic != LNET_PROTO_MAGIC) {
+ CERROR("%s rejected: consumer defined fatal error\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+ }
+
+ if (rej->ibr_version != IBLND_MSG_VERSION &&
+ rej->ibr_version != IBLND_MSG_VERSION_1) {
+ CERROR("%s rejected: o2iblnd version %x error\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_version);
+ break;
+ }
+
+ if (rej->ibr_why == IBLND_REJECT_FATAL &&
+ rej->ibr_version == IBLND_MSG_VERSION_1) {
+ CDEBUG(D_NET, "rejected by old version peer_ni %s: %x\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_version);
+
+ if (conn->ibc_version != IBLND_MSG_VERSION_1)
+ rej->ibr_why = IBLND_REJECT_CONN_UNCOMPAT;
+ }
+
+ switch (rej->ibr_why) {
+ case IBLND_REJECT_CONN_RACE:
+ case IBLND_REJECT_CONN_STALE:
+ case IBLND_REJECT_CONN_UNCOMPAT:
case IBLND_REJECT_MSG_QUEUE_SIZE:
case IBLND_REJECT_RDMA_FRAGS:
kiblnd_check_reconnect(conn, rej->ibr_version,
- incarnation, rej->ibr_why, cp);
- break;
-
- case IBLND_REJECT_NO_RESOURCES:
- CERROR("%s rejected: o2iblnd no resources\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
-
- case IBLND_REJECT_FATAL:
- CERROR("%s rejected: o2iblnd fatal error\n",
- libcfs_nid2str(peer_ni->ibp_nid));
- break;
-
- default:
- CERROR("%s rejected: o2iblnd reason %d\n",
- libcfs_nid2str(peer_ni->ibp_nid),
- rej->ibr_why);
- break;
- }
- break;
- }
- /* fall through */
- default:
- CNETERR("%s rejected: reason %d, size %d\n",
- libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
- break;
- }
+ incarnation,
+ rej->ibr_why, cp);
+ break;
+
+ case IBLND_REJECT_NO_RESOURCES:
+ CERROR("%s rejected: o2iblnd no resources\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
+ case IBLND_REJECT_FATAL:
+ CERROR("%s rejected: o2iblnd fatal error\n",
+ libcfs_nid2str(peer_ni->ibp_nid));
+ break;
+
+ default:
+ CERROR("%s rejected: o2iblnd reason %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid),
+ rej->ibr_why);
+ break;
+ }
+ break;
+ }
+ /* fall through */
+ default:
+ CNETERR("%s rejected: reason %d, size %d\n",
+ libcfs_nid2str(peer_ni->ibp_nid), reason, priv_nob);
+ break;
+ }
- kiblnd_connreq_done(conn, -ECONNREFUSED);
+ kiblnd_connreq_done(conn, status);
}
static void
}
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
+ if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
struct kib_tx *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, struct kib_tx, tx_list);
+ list_for_each_entry(tx, txs, tx_list) {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
struct kib_tx *tx, *tx_tmp;
- struct list_head *ctmp;
unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
}
}
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
* connection. We can only be sure RDMA activity
* has ceased once the QP has been modified.
*/
- while (!list_empty(&closes)) {
- conn = list_entry(closes.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&closes,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
* NOOP, but there were no non-blocking tx descs
* free to do it last time...
*/
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&checksends,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
struct kib_conn *conn;
int timeout;
int i;
- int dropped_lock;
+ bool dropped_lock;
int peer_index = 0;
unsigned long deadline = jiffies;
while (!kiblnd_data.kib_shutdown) {
int reconn = 0;
- dropped_lock = 0;
+ dropped_lock = false;
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+ struct kib_conn, ibc_list);
+ if (conn) {
struct kib_peer_ni *peer_ni = NULL;
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
}
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
kiblnd_destroy_conn(conn);
&kiblnd_data.kib_reconn_wait);
}
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+ struct kib_conn, ibc_list);
+ if (conn) {
int wait;
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- struct kib_conn, ibc_list);
+
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
kiblnd_disconnect_conn(conn);
wait = conn->ibc_waits;
&kiblnd_data.kib_reconn_list);
}
- if (list_empty(&kiblnd_data.kib_reconn_list))
+ conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+ struct kib_conn, ibc_list);
+ if (!conn)
break;
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
reconn += kiblnd_reconnect_peer(conn->ibc_peer);
kiblnd_peer_decref(conn->ibc_peer);
spin_lock_irqsave(lock, flags);
}
- if (!list_empty(&kiblnd_data.kib_connd_waits)) {
- conn = list_entry(kiblnd_data.kib_connd_waits.next,
- struct kib_conn, ibc_list);
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+ struct kib_conn, ibc_list);
+ if (conn) {
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
kiblnd_conn_decref(conn);
spin_lock_irqsave(lock, flags);
- if (dropped_lock == 0)
+ if (!dropped_lock)
list_add_tail(&conn->ibc_list,
&kiblnd_data.kib_connd_waits);
}
unsigned int lnd_timeout;
spin_unlock_irqrestore(lock, flags);
- dropped_lock = 1;
+ dropped_lock = true;
/* Time to check for RDMA timeouts on a few more
* peers: I do checks every 'p' seconds on a
case IB_EVENT_PORT_ERR:
case IB_EVENT_DEVICE_FATAL:
CERROR("Fatal device error for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
return;
case IB_EVENT_PORT_ACTIVE:
CERROR("Port reactivated for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
return;
int
kiblnd_scheduler(void *arg)
{
- long id = (long)arg;
- struct kib_sched_info *sched;
+ long id = (long)arg;
+ struct kib_sched_info *sched;
struct kib_conn *conn;
- wait_queue_entry_t wait;
- unsigned long flags;
- struct ib_wc wc;
- int did_something;
- int rc;
+ wait_queue_entry_t wait;
+ unsigned long flags;
+ struct ib_wc wc;
+ bool did_something;
+ int rc;
init_wait(&wait);
rc = cfs_cpt_bind(lnet_cpt_table(), sched->ibs_cpt);
if (rc != 0) {
- CWARN("Unable to bind on CPU partition %d, please verify "
- "whether all CPUs are healthy and reload modules if "
- "necessary, otherwise your system might under risk of "
- "low performance\n", sched->ibs_cpt);
+ CWARN("Unable to bind on CPU partition %d, please verify whether all CPUs are healthy and reload modules if necessary, otherwise your system might under risk of low performance\n", sched->ibs_cpt);
}
spin_lock_irqsave(&sched->ibs_lock, flags);
spin_lock_irqsave(&sched->ibs_lock, flags);
}
- did_something = 0;
+ did_something = false;
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- struct kib_conn, ibc_sched_list);
+ conn = list_first_entry_or_null(&sched->ibs_conns,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
wc.wr_id = IBLND_WID_INVAL;
- rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
- if (rc == 0) {
- rc = ib_req_notify_cq(conn->ibc_cq,
- IB_CQ_NEXT_COMP);
- if (rc < 0) {
- CWARN("%s: ib_req_notify_cq failed: %d, "
- "closing connection\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
- kiblnd_close_conn(conn, -EIO);
- kiblnd_conn_decref(conn);
+ rc = ib_poll_cq(conn->ibc_cq, 1, &wc);
+ if (rc == 0) {
+ rc = ib_req_notify_cq(conn->ibc_cq,
+ IB_CQ_NEXT_COMP);
+ if (rc < 0) {
+ CWARN("%s: ib_req_notify_cq failed: %d, closing connection\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), rc);
+ kiblnd_close_conn(conn, -EIO);
+ kiblnd_conn_decref(conn);
spin_lock_irqsave(&sched->ibs_lock,
- flags);
+ flags);
continue;
}
}
if (rc < 0) {
- CWARN("%s: ib_poll_cq failed: %d, "
- "closing connection\n",
+ CWARN("%s: ib_poll_cq failed: %d, closing connection\n",
libcfs_nid2str(conn->ibc_peer->ibp_nid),
rc);
kiblnd_close_conn(conn, -EIO);
/* +1 ref for sched_conns */
kiblnd_conn_addref(conn);
list_add_tail(&conn->ibc_sched_list,
- &sched->ibs_conns);
+ &sched->ibs_conns);
if (waitqueue_active(&sched->ibs_waitq))
wake_up(&sched->ibs_waitq);
} else {
kiblnd_complete(&wc);
spin_lock_irqsave(&sched->ibs_lock, flags);
- }
+ }
- kiblnd_conn_decref(conn); /* ...drop my ref from above */
- did_something = 1;
- }
+ kiblnd_conn_decref(conn); /* ..drop my ref from above */
+ did_something = true;
+ }
- if (did_something)
- continue;
+ if (did_something)
+ continue;
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sched->ibs_waitq, &wait);
int
kiblnd_failover_thread(void *arg)
{
- rwlock_t *glock = &kiblnd_data.kib_global_lock;
+ rwlock_t *glock = &kiblnd_data.kib_global_lock;
struct kib_dev *dev;
struct net *ns = arg;
wait_queue_entry_t wait;
- unsigned long flags;
- int rc;
+ unsigned long flags;
+ int rc;
LASSERT(*kiblnd_tunables.kib_dev_failover != 0);
init_wait(&wait);
write_lock_irqsave(glock, flags);
- while (!kiblnd_data.kib_shutdown) {
- int do_failover = 0;
- int long_sleep;
+ while (!kiblnd_data.kib_shutdown) {
+ bool do_failover = false;
+ int long_sleep;
list_for_each_entry(dev, &kiblnd_data.kib_failed_devs,
- ibd_fail_list) {
+ ibd_fail_list) {
if (ktime_get_seconds() < dev->ibd_next_failover)
- continue;
- do_failover = 1;
- break;
- }
+ continue;
+ do_failover = true;
+ break;
+ }
- if (do_failover) {
+ if (do_failover) {
list_del_init(&dev->ibd_fail_list);
- dev->ibd_failover = 1;
+ dev->ibd_failover = 1;
write_unlock_irqrestore(glock, flags);
rc = kiblnd_dev_failover(dev, ns);
write_lock_irqsave(glock, flags);
- LASSERT (dev->ibd_failover);
- dev->ibd_failover = 0;
- if (rc >= 0) { /* Device is OK or failover succeed */
+ LASSERT(dev->ibd_failover);
+ dev->ibd_failover = 0;
+ if (rc >= 0) { /* Device is OK or failover succeed */
dev->ibd_next_failover = ktime_get_seconds() + 3;
- continue;
- }
+ continue;
+ }
- /* failed to failover, retry later */
+ /* failed to failover, retry later */
dev->ibd_next_failover = ktime_get_seconds() +
- min(dev->ibd_failed_failover, 10);
- if (kiblnd_dev_can_failover(dev)) {
+ min(dev->ibd_failed_failover, 10);
+ if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
+ &kiblnd_data.kib_failed_devs);
+ }
- continue;
- }
+ continue;
+ }
- /* long sleep if no more pending failover */
+ /* long sleep if no more pending failover */
long_sleep = list_empty(&kiblnd_data.kib_failed_devs);
set_current_state(TASK_INTERRUPTIBLE);
write_unlock_irqrestore(glock, flags);
rc = schedule_timeout(long_sleep ? cfs_time_seconds(10) :
- cfs_time_seconds(1));
+ cfs_time_seconds(1));
set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_failover_waitq, &wait);
write_lock_irqsave(glock, flags);
- if (!long_sleep || rc != 0)
- continue;
+ if (!long_sleep || rc != 0)
+ continue;
- /* have a long sleep, routine check all active devices,
- * we need checking like this because if there is not active
- * connection on the dev and no SEND from local, we may listen
- * on wrong HCA for ever while there is a bonding failover */
+ /* have a long sleep, routine check all active devices,
+ * we need checking like this because if there is not active
+ * connection on the dev and no SEND from local, we may listen
+ * on wrong HCA for ever while there is a bonding failover
+ */
list_for_each_entry(dev, &kiblnd_data.kib_devs, ibd_list) {
- if (kiblnd_dev_can_failover(dev)) {
+ if (kiblnd_dev_can_failover(dev)) {
list_add_tail(&dev->ibd_fail_list,
- &kiblnd_data.kib_failed_devs);
- }
- }
- }
+ &kiblnd_data.kib_failed_devs);
+ }
+ }
+ }
write_unlock_irqrestore(glock, flags);
- kiblnd_thread_fini();
- return 0;
+ kiblnd_thread_fini();
+ return 0;
}