*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lnet/klnds/o2iblnd/o2iblnd_cb.c
*
{
struct kib_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist,
+ struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
static struct kib_tx *
kiblnd_find_waiting_tx_locked(struct kib_conn *conn, int txtype, u64 cookie)
{
- struct list_head *tmp;
-
- list_for_each(tmp, &conn->ibc_active_txs) {
- struct kib_tx *tx = list_entry(tmp, struct kib_tx, tx_list);
+ struct kib_tx *tx;
+ list_for_each_entry(tx, &conn->ibc_active_txs, tx_list) {
LASSERT(!tx->tx_queued);
LASSERT(tx->tx_sending != 0 || tx->tx_waiting);
int rc;
int err = -EIO;
- LASSERT (net != NULL);
- LASSERT (rx->rx_nob < 0); /* was posted */
- rx->rx_nob = 0; /* isn't now */
+ LASSERT(net);
+ LASSERT(rx->rx_nob < 0); /* was posted */
+ rx->rx_nob = 0; /* isn't now */
- if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
- goto ignore;
+ if (conn->ibc_state > IBLND_CONN_ESTABLISHED)
+ goto ignore;
- if (status != IB_WC_SUCCESS) {
- CNETERR("Rx from %s failed: %d\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
- goto failed;
- }
+ if (status != IB_WC_SUCCESS) {
+ CNETERR("Rx from %s failed: %d\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid), status);
+ goto failed;
+ }
- LASSERT (nob >= 0);
- rx->rx_nob = nob;
+ LASSERT(nob >= 0);
+ rx->rx_nob = nob;
- rc = kiblnd_unpack_msg(msg, rx->rx_nob);
- if (rc != 0) {
- CERROR ("Error %d unpacking rx from %s\n",
- rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
- goto failed;
- }
+ rc = kiblnd_unpack_msg(msg, rx->rx_nob);
+ if (rc != 0) {
+ CERROR("Error %d unpacking rx from %s\n",
+ rc, libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ goto failed;
+ }
- if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
- msg->ibm_dstnid != ni->ni_nid ||
- msg->ibm_srcstamp != conn->ibc_incarnation ||
- msg->ibm_dststamp != net->ibn_incarnation) {
- CERROR ("Stale rx from %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- err = -ESTALE;
- goto failed;
- }
+ if (msg->ibm_srcnid != conn->ibc_peer->ibp_nid ||
+ msg->ibm_dstnid != lnet_nid_to_nid4(&ni->ni_nid) ||
+ msg->ibm_srcstamp != conn->ibc_incarnation ||
+ msg->ibm_dststamp != net->ibn_incarnation) {
+ CERROR("Stale rx from %s\n",
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ err = -ESTALE;
+ goto failed;
+ }
- /* set time last known alive */
- kiblnd_peer_alive(conn->ibc_peer);
+ /* set time last known alive */
+ kiblnd_peer_alive(conn->ibc_peer);
- /* racing with connection establishment/teardown! */
+ /* racing with connection establishment/teardown! */
- if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
+ if (conn->ibc_state < IBLND_CONN_ESTABLISHED) {
rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
unsigned long flags;
return;
}
write_unlock_irqrestore(g_lock, flags);
- }
- kiblnd_handle_rx(rx);
- return;
+ }
+ kiblnd_handle_rx(rx);
+ return;
- failed:
- CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
- kiblnd_close_conn(conn, err);
- ignore:
- kiblnd_drop_rx(rx); /* Don't re-post rx. */
+failed:
+ CDEBUG(D_NET, "rx %p conn %p\n", rx, conn);
+ kiblnd_close_conn(conn, err);
+ignore:
+ kiblnd_drop_rx(rx); /* Don't re-post rx. */
}
static int
kiblnd_fmr_pool_unmap(&tx->tx_fmr, tx->tx_status);
if (tx->tx_nfrags != 0) {
- kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
+ kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev,
tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir);
tx->tx_nfrags = 0;
}
tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
tx->tx_nfrags = nfrags;
- rd->rd_nfrags = kiblnd_dma_map_sg(hdev->ibh_ibdev, tx->tx_frags,
+ rd->rd_nfrags = kiblnd_dma_map_sg(hdev, tx->tx_frags,
tx->tx_nfrags, tx->tx_dmadir);
for (i = 0, nob = 0; i < rd->rd_nfrags; i++) {
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
- int fragnob;
- int max_nkiov;
+ int fragnob;
+ int max_nkiov;
+ int sg_count = 0;
CDEBUG(D_NET, "niov %d offset %d nob %d\n", nkiov, offset, nob);
do {
LASSERT(nkiov > 0);
+ if (!sg) {
+ CERROR("lacking enough sg entries to map tx\n");
+ return -EFAULT;
+ }
+ sg_count++;
+
fragnob = min((int)(kiov->bv_len - offset), nob);
/*
sg_set_page(sg, kiov->bv_page, fragnob,
kiov->bv_offset + offset);
sg = sg_next(sg);
- if (!sg) {
- CERROR("lacking enough sg entries to map tx\n");
- return -EFAULT;
- }
offset = 0;
kiov++;
nob -= fragnob;
} while (nob > 0);
- return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags);
+ return kiblnd_map_tx(ni, tx, rd, sg_count);
}
static int
struct kib_msg *msg = tx->tx_msg;
struct kib_peer_ni *peer_ni = conn->ibc_peer;
struct lnet_ni *ni = peer_ni->ibp_ni;
+ struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
int ver = conn->ibc_version;
int rc;
int done;
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct kib_fast_reg_descriptor *frd = tx->tx_fmr.fmr_frd;
struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
struct ib_send_wr *wr = &tx->tx_wrq[0].wr;
- if (frd != NULL) {
+ if (frd != NULL && !frd->frd_posted) {
if (!frd->frd_valid) {
wr = &frd->frd_inv_wr.wr;
wr->next = &frd->frd_fastreg_wr.wr;
conn->ibc_last_send = ktime_get();
- if (rc == 0)
- return 0;
+ if (rc == 0) {
+ if (frd != NULL)
+ frd->frd_posted = true;
+ return 0;
+ }
/* NB credits are transferred in the actual
* message, which can only be the last work item */
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- struct kib_tx, tx_list);
+ (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+ struct kib_tx, tx_list)) != NULL) {
list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_noops,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue,
+ struct kib_tx, tx_list);
} else
break;
#endif
LASSERT(tx->tx_nwrq >= 0);
- LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1);
+ LASSERT(tx->tx_nwrq <= IBLND_MAX_RDMA_FRAGS);
LASSERT(nob <= IBLND_MSG_SIZE);
#ifdef HAVE_IB_GET_DMA_MR
LASSERT(mr != NULL);
/* Thread context */
LASSERT (!in_interrupt());
+ tx = kiblnd_get_idle_tx(ni, target.nid);
+ if (tx == NULL) {
+ CERROR("Can't allocate %s txd for %s\n",
+ lnet_msgtyp2str(type),
+ libcfs_nid2str(target.nid));
+ return -ENOMEM;
+ }
+ ibmsg = tx->tx_msg;
+
switch (type) {
default:
LBUG();
/* is the REPLY message too small for RDMA? */
nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[lntmsg->msg_md->md_length]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate txd for GET to %s\n",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
+ break; /* send IMMEDIATE */
- ibmsg = tx->tx_msg;
rd = &ibmsg->ibm_u.get.ibgm_rd;
rc = kiblnd_setup_rd_kiov(ni, tx, rd,
lntmsg->msg_md->md_niov,
return -EIO;
}
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */
- tx->tx_waiting = 1; /* waiting for GET_DONE */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for GET_DONE */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
- case LNET_MSG_REPLY:
- case LNET_MSG_PUT:
- /* Is the payload small enough not to need RDMA? */
+ case LNET_MSG_REPLY:
+ case LNET_MSG_PUT:
+ /* Is the payload small enough not to need RDMA? */
nob = offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob]);
- if (nob <= IBLND_MSG_SIZE)
- break; /* send IMMEDIATE */
-
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR("Can't allocate %s txd for %s\n",
- type == LNET_MSG_PUT ? "PUT" : "REPLY",
- libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
+ if (nob <= IBLND_MSG_SIZE && !lntmsg->msg_rdma_force)
+ break; /* send IMMEDIATE */
rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd,
payload_niov, payload_kiov,
return -EIO;
}
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
- ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
+ ibmsg->ibm_u.putreq.ibprm_hdr = *hdr;
+ ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie;
kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ,
sizeof(struct kib_putreq_msg));
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
- }
+ /* finalise lntmsg[0,1] on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+ tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
+ }
/* send IMMEDIATE */
LASSERT(offsetof(struct kib_msg, ibm_u.immediate.ibim_payload[payload_nob])
<= IBLND_MSG_SIZE);
- tx = kiblnd_get_idle_tx(ni, target.nid);
- if (tx == NULL) {
- CERROR ("Can't send %d to %s: tx descs exhausted\n",
- type, libcfs_nid2str(target.nid));
- return -ENOMEM;
- }
-
- ibmsg = tx->tx_msg;
- ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
+ ibmsg = tx->tx_msg;
+ ibmsg->ibm_u.immediate.ibim_hdr = *hdr;
lnet_copy_kiov2flat(IBLND_MSG_SIZE, ibmsg,
offsetof(struct kib_msg,
payload_offset, payload_nob);
nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]);
- kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
- tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */
- kiblnd_launch_tx(ni, tx, target.nid);
- return 0;
+ kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob);
+
+ /* finalise lntmsg on completion */
+ tx->tx_lntmsg[0] = lntmsg;
+
+ kiblnd_launch_tx(ni, tx, target.nid);
+ return 0;
}
static void
lnet_finalize(lntmsg, -EIO);
}
+unsigned int
+kiblnd_get_dev_prio(struct lnet_ni *ni, unsigned int dev_idx)
+{
+ struct kib_net *net = ni->ni_data;
+ struct device *dev = NULL;
+
+ if (net)
+ dev = net->ibn_dev->ibd_hdev->ibh_ibdev->dma_device;
+
+ return lnet_get_dev_prio(dev, dev_idx);
+
+}
+
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov, struct bio_vec *kiov,
return rc;
}
-int
-kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
-{
- struct task_struct *task = kthread_run(fn, arg, "%s", name);
-
- if (IS_ERR(task))
- return PTR_ERR(task);
-
- atomic_inc(&kiblnd_data.kib_nthreads);
- return 0;
-}
-
static void
kiblnd_thread_fini (void)
{
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- struct kib_rx, rx_list);
+ while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+ struct kib_rx,
+ rx_list)) != NULL) {
list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
/* connection established */
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- /* reset retry count */
- peer_ni->ibp_retries = 0;
-
conn->ibc_last_send = ktime_get();
kiblnd_set_conn_state(conn, IBLND_CONN_ESTABLISHED);
kiblnd_peer_alive(peer_ni);
* scheduled. We won't be using round robin on this first batch.
*/
spin_lock(&conn->ibc_lock);
- while (!list_empty(&txs)) {
- tx = list_entry(txs.next, struct kib_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
rej.ibr_incarnation = net->ibn_incarnation;
}
- if (ni == NULL || /* no matching net */
- ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
- net->ibn_dev != ibdev) { /* wrong device */
+ if (ni == NULL || /* no matching net */
+ lnet_nid_to_nid4(&ni->ni_nid) !=
+ reqmsg->ibm_dstnid || /* right NET, wrong NID! */
+ net->ibn_dev != ibdev) { /* wrong device */
CERROR("Can't accept conn from %s on %s (%s:%d:%pI4h): bad dst nid %s\n", libcfs_nid2str(nid),
- ni ? libcfs_nid2str(ni->ni_nid) : "NA",
+ ni ? libcfs_nidstr(&ni->ni_nid) : "NA",
ibdev->ibd_ifname, ibdev->ibd_nnets,
&ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
* the lower NID connection win so we can move forward.
*/
if (peer2->ibp_connecting != 0 &&
- nid < ni->ni_nid && peer2->ibp_races <
- MAX_CONN_RACES_BEFORE_ABORT) {
+ nid < lnet_nid_to_nid4(&ni->ni_nid) &&
+ peer2->ibp_races < MAX_CONN_RACES_BEFORE_ABORT) {
peer2->ibp_races++;
write_unlock_irqrestore(g_lock, flags);
goto out;
}
- if (peer_ni->ibp_retries > *kiblnd_tunables.kib_retry_count) {
- reason = "retry count exceeded due to no listener";
- goto out;
- }
-
switch (why) {
default:
reason = "Unknown";
break;
case IBLND_REJECT_RDMA_FRAGS: {
- struct lnet_ioctl_config_o2iblnd_tunables *tunables;
-
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- tunables = &peer_ni->ibp_ni->ni_lnd_tunables.lnd_tun_u.lnd_o2ib;
-#ifdef HAVE_IB_GET_DMA_MR
- /*
- * This check only makes sense if the kernel supports global
- * memory registration. Otherwise, map_on_demand will never == 0
- */
- if (!tunables->lnd_map_on_demand) {
- reason = "map_on_demand must be enabled";
- goto out;
- }
-#endif
+
if (conn->ibc_max_frags <= frag_num) {
reason = "unsupported max frags";
goto out;
case IBLND_REJECT_CONN_UNCOMPAT:
reason = "version negotiation";
break;
-
- case IBLND_REJECT_INVALID_SRV_ID:
- reason = "invalid service id";
- break;
}
conn->ibc_reconnect = 1;
case IB_CM_REJ_INVALID_SERVICE_ID:
status = -EHOSTUNREACH;
- peer_ni->ibp_retries++;
- kiblnd_check_reconnect(conn, IBLND_MSG_VERSION, 0,
- IBLND_REJECT_INVALID_SRV_ID, NULL);
CNETERR("%s rejected: no listener at %d\n",
libcfs_nid2str(peer_ni->ibp_nid),
*kiblnd_tunables.kib_service);
}
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (msg->ibm_dstnid == ni->ni_nid &&
+ if (msg->ibm_dstnid == lnet_nid_to_nid4(&ni->ni_nid) &&
msg->ibm_dststamp == net->ibn_incarnation)
rc = 0;
else
kiblnd_check_txs_locked(struct kib_conn *conn, struct list_head *txs)
{
struct kib_tx *tx;
- struct list_head *ttmp;
-
- list_for_each(ttmp, txs) {
- tx = list_entry(ttmp, struct kib_tx, tx_list);
+ list_for_each_entry(tx, txs, tx_list) {
if (txs != &conn->ibc_active_txs) {
LASSERT(tx->tx_queued);
} else {
struct kib_peer_ni *peer_ni;
struct kib_conn *conn;
struct kib_tx *tx, *tx_tmp;
- struct list_head *ctmp;
unsigned long flags;
/* NB. We expect to have a look at all the peers and not find any
}
}
- list_for_each(ctmp, &peer_ni->ibp_conns) {
+ list_for_each_entry(conn, &peer_ni->ibp_conns, ibc_list) {
int timedout;
int sendnoop;
- conn = list_entry(ctmp, struct kib_conn, ibc_list);
-
LASSERT(conn->ibc_state == IBLND_CONN_ESTABLISHED);
spin_lock(&conn->ibc_lock);
* connection. We can only be sure RDMA activity
* has ceased once the QP has been modified.
*/
- while (!list_empty(&closes)) {
- conn = list_entry(closes.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&closes,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
* NOOP, but there were no non-blocking tx descs
* free to do it last time...
*/
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&checksends,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
dropped_lock = false;
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+ struct kib_conn, ibc_list);
+ if (conn) {
struct kib_peer_ni *peer_ni = NULL;
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
&kiblnd_data.kib_reconn_wait);
}
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+ struct kib_conn, ibc_list);
+ if (conn) {
int wait;
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- struct kib_conn, ibc_list);
+
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
&kiblnd_data.kib_reconn_list);
}
- if (list_empty(&kiblnd_data.kib_reconn_list))
+ conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+ struct kib_conn, ibc_list);
+ if (!conn)
break;
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
spin_lock_irqsave(lock, flags);
}
- if (!list_empty(&kiblnd_data.kib_connd_waits)) {
- conn = list_entry(kiblnd_data.kib_connd_waits.next,
- struct kib_conn, ibc_list);
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+ struct kib_conn, ibc_list);
+ if (conn) {
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
case IB_EVENT_PORT_ERR:
case IB_EVENT_DEVICE_FATAL:
CERROR("Fatal device error for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 1);
return;
case IB_EVENT_PORT_ACTIVE:
CERROR("Port reactivated for NI %s\n",
- libcfs_nid2str(conn->ibc_peer->ibp_ni->ni_nid));
+ libcfs_nidstr(&conn->ibc_peer->ibp_ni->ni_nid));
atomic_set(&conn->ibc_peer->ibp_ni->ni_fatal_error_on, 0);
return;
did_something = false;
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- struct kib_conn, ibc_sched_list);
+ conn = list_first_entry_or_null(&sched->ibs_conns,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);