static void
kiblnd_fail_fmr_poolset(struct kib_fmr_poolset *fps, struct list_head *zombies)
{
+ struct kib_fmr_pool *fpo;
+
if (fps->fps_net == NULL) /* intialized? */
return;
spin_lock(&fps->fps_lock);
- while (!list_empty(&fps->fps_pool_list)) {
- struct kib_fmr_pool *fpo = list_entry(fps->fps_pool_list.next,
- struct kib_fmr_pool,
- fpo_list);
-
+ while ((fpo = list_first_entry_or_null(&fps->fps_pool_list,
+ struct kib_fmr_pool,
+ fpo_list)) != NULL) {
fpo->fpo_failed = 1;
if (fpo->fpo_map_count == 0)
list_move(&fpo->fpo_list, zombies);
{
struct kib_pool *pool;
- while (!list_empty(head)) {
- pool = list_entry(head->next, struct kib_pool, po_list);
+ while ((pool = list_first_entry_or_null(head,
+ struct kib_pool,
+ po_list)) != NULL) {
list_del(&pool->po_list);
LASSERT(pool->po_owner != NULL);
static void
kiblnd_fail_poolset(struct kib_poolset *ps, struct list_head *zombies)
{
+ struct kib_pool *po;
+
if (ps->ps_net == NULL) /* intialized? */
return;
spin_lock(&ps->ps_lock);
- while (!list_empty(&ps->ps_pool_list)) {
- struct kib_pool *po = list_entry(ps->ps_pool_list.next,
- struct kib_pool, po_list);
-
+ while ((po = list_first_entry_or_null(&ps->ps_pool_list,
+ struct kib_pool,
+ po_list)) == NULL) {
po->po_failed = 1;
if (po->po_allocated == 0)
list_move(&po->po_list, zombies);
{
struct kib_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct kib_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist,
+ struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
/* complete now */
tx->tx_waiting = 0;
LASSERT (conn->ibc_reserved_credits >= 0);
while (conn->ibc_reserved_credits > 0 &&
- !list_empty(&conn->ibc_tx_queue_rsrvd)) {
- tx = list_entry(conn->ibc_tx_queue_rsrvd.next,
- struct kib_tx, tx_list);
+ (tx = list_first_entry_or_null(&conn->ibc_tx_queue_rsrvd,
+ struct kib_tx, tx_list)) != NULL) {
list_move_tail(&tx->tx_list, &conn->ibc_tx_queue);
conn->ibc_reserved_credits--;
}
if (!list_empty(&conn->ibc_tx_queue_nocred)) {
credit = 0;
- tx = list_entry(conn->ibc_tx_queue_nocred.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue_nocred,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_noops)) {
LASSERT (!IBLND_OOB_CAPABLE(ver));
credit = 1;
- tx = list_entry(conn->ibc_tx_noops.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_noops,
+ struct kib_tx, tx_list);
} else if (!list_empty(&conn->ibc_tx_queue)) {
credit = 1;
- tx = list_entry(conn->ibc_tx_queue.next,
- struct kib_tx, tx_list);
+ tx = list_first_entry(&conn->ibc_tx_queue,
+ struct kib_tx, tx_list);
} else
break;
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
write_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- while (!list_empty(&conn->ibc_early_rxs)) {
- rx = list_entry(conn->ibc_early_rxs.next,
- struct kib_rx, rx_list);
+ while ((rx = list_first_entry_or_null(&conn->ibc_early_rxs,
+ struct kib_rx,
+ rx_list)) != NULL) {
list_del(&rx->rx_list);
write_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
* scheduled. We won't be using round robin on this first batch.
*/
spin_lock(&conn->ibc_lock);
- while (!list_empty(&txs)) {
- tx = list_entry(txs.next, struct kib_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&txs, struct kib_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
kiblnd_queue_tx_locked(tx, conn);
* connection. We can only be sure RDMA activity
* has ceased once the QP has been modified.
*/
- while (!list_empty(&closes)) {
- conn = list_entry(closes.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&closes,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
kiblnd_close_conn(conn, -ETIMEDOUT);
kiblnd_conn_decref(conn);
* NOOP, but there were no non-blocking tx descs
* free to do it last time...
*/
- while (!list_empty(&checksends)) {
- conn = list_entry(checksends.next,
- struct kib_conn, ibc_connd_list);
+ while ((conn = list_first_entry_or_null(&checksends,
+ struct kib_conn,
+ ibc_connd_list)) != NULL) {
list_del(&conn->ibc_connd_list);
spin_lock(&conn->ibc_lock);
dropped_lock = false;
- if (!list_empty(&kiblnd_data.kib_connd_zombies)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_zombies,
+ struct kib_conn, ibc_list);
+ if (conn) {
struct kib_peer_ni *peer_ni = NULL;
- conn = list_entry(kiblnd_data.kib_connd_zombies.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
if (conn->ibc_reconnect) {
peer_ni = conn->ibc_peer;
&kiblnd_data.kib_reconn_wait);
}
- if (!list_empty(&kiblnd_data.kib_connd_conns)) {
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_conns,
+ struct kib_conn, ibc_list);
+ if (conn) {
int wait;
- conn = list_entry(kiblnd_data.kib_connd_conns.next,
- struct kib_conn, ibc_list);
+
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
&kiblnd_data.kib_reconn_list);
}
- if (list_empty(&kiblnd_data.kib_reconn_list))
+ conn = list_first_entry_or_null(&kiblnd_data.kib_reconn_list,
+ struct kib_conn, ibc_list);
+ if (!conn)
break;
- conn = list_entry(kiblnd_data.kib_reconn_list.next,
- struct kib_conn, ibc_list);
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
spin_lock_irqsave(lock, flags);
}
- if (!list_empty(&kiblnd_data.kib_connd_waits)) {
- conn = list_entry(kiblnd_data.kib_connd_waits.next,
- struct kib_conn, ibc_list);
+ conn = list_first_entry_or_null(&kiblnd_data.kib_connd_waits,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
list_del(&conn->ibc_list);
spin_unlock_irqrestore(lock, flags);
did_something = false;
- if (!list_empty(&sched->ibs_conns)) {
- conn = list_entry(sched->ibs_conns.next,
- struct kib_conn, ibc_sched_list);
+ conn = list_first_entry_or_null(&sched->ibs_conns,
+ struct kib_conn,
+ ibc_sched_list);
+ if (conn) {
/* take over kib_sched_conns' ref on conn... */
LASSERT(conn->ibc_scheduled);
list_del(&conn->ibc_sched_list);
spin_unlock(&peer_ni->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
-
+ while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
+ tx_zc_list)) != NULL) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}
list_splice_init(&ksocknal_data.ksnd_idle_noop_txs, &zlist);
spin_unlock(&ksocknal_data.ksnd_tx_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, struct ksock_tx, tx_list);
+ while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
+ tx_list)) != NULL) {
list_del(&tx->tx_list);
LIBCFS_FREE(tx, tx->tx_desc_size);
}
/* searching for a noop tx in free list */
spin_lock(&ksocknal_data.ksnd_tx_lock);
- if (!list_empty(&ksocknal_data.ksnd_idle_noop_txs)) {
- tx = list_entry(ksocknal_data.ksnd_idle_noop_txs.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry_or_null(&ksocknal_data.ksnd_idle_noop_txs,
+ struct ksock_tx, tx_list);
+ if (tx) {
LASSERT(tx->tx_desc_size == size);
list_del(&tx->tx_list);
}
{
struct ksock_tx *tx;
- while (!list_empty(txlist)) {
- tx = list_entry(txlist->next, struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(txlist, struct ksock_tx,
+ tx_list)) != NULL) {
if (error && tx->tx_lnetmsg != NULL) {
CNETERR("Deleting packet type %d len %d %s->%s\n",
le32_to_cpu(tx->tx_lnetmsg->msg_hdr.type),
bool did_something = false;
/* Ensure I progress everything semi-fairly */
-
- if (!list_empty(&sched->kss_rx_conns)) {
- conn = list_entry(sched->kss_rx_conns.next,
- struct ksock_conn, ksnc_rx_list);
+ conn = list_first_entry_or_null(&sched->kss_rx_conns,
+ struct ksock_conn,
+ ksnc_rx_list);
+ if (conn) {
list_del(&conn->ksnc_rx_list);
LASSERT(conn->ksnc_rx_scheduled);
list_splice_init(&sched->kss_zombie_noop_txs, &zlist);
- conn = list_entry(sched->kss_tx_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ conn = list_first_entry(&sched->kss_tx_conns,
+ struct ksock_conn,
+ ksnc_tx_list);
list_del(&conn->ksnc_tx_list);
LASSERT(conn->ksnc_tx_scheduled);
LASSERT(conn->ksnc_tx_ready);
LASSERT(!list_empty(&conn->ksnc_tx_queue));
- tx = list_entry(conn->ksnc_tx_queue.next,
- struct ksock_tx, tx_list);
+ tx = list_first_entry(&conn->ksnc_tx_queue,
+ struct ksock_tx, tx_list);
if (conn->ksnc_tx_carrier == tx)
ksocknal_next_tx_carrier(conn);
/* ksnp_tx_queue is queued on a conn on successful
* connection for V1.x and V2.x
*/
- if (!list_empty(&peer_ni->ksnp_conns)) {
- conn = list_entry(peer_ni->ksnp_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&peer_ni->ksnp_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn)
LASSERT(conn->ksnc_proto == &ksocknal_protocol_v3x);
- }
/* take all the blocked packets while I've got the lock and
* complete below...
dropped_lock = true;
}
- if (!list_empty(&ksocknal_data.ksnd_connd_connreqs)) {
+ cr = list_first_entry_or_null(&ksocknal_data.ksnd_connd_connreqs,
+ struct ksock_connreq, ksncr_list);
+ if (cr) {
/* Connection accepted by the listener */
- cr = list_entry(ksocknal_data.ksnd_connd_connreqs.next,
- struct ksock_connreq, ksncr_list);
-
list_del(&cr->ksncr_list);
spin_unlock_bh(connd_lock);
dropped_lock = true;
write_lock_bh(&ksocknal_data.ksnd_global_lock);
- while (!list_empty(&peer_ni->ksnp_tx_queue)) {
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
-
+ while ((tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx,
+ tx_list)) != NULL) {
if (ktime_get_seconds() < tx->tx_deadline)
break;
/* we can't process stale txs right here because we're
* holding only shared lock
*/
- if (!list_empty(&peer_ni->ksnp_tx_queue)) {
- struct ksock_tx *tx;
-
- tx = list_entry(peer_ni->ksnp_tx_queue.next,
- struct ksock_tx, tx_list);
- if (ktime_get_seconds() >= tx->tx_deadline) {
- ksocknal_peer_addref(peer_ni);
- read_unlock(&ksocknal_data.ksnd_global_lock);
+ tx = list_first_entry_or_null(&peer_ni->ksnp_tx_queue,
+ struct ksock_tx, tx_list);
+ if (tx && ktime_get_seconds() >= tx->tx_deadline) {
+ ksocknal_peer_addref(peer_ni);
+ read_unlock(&ksocknal_data.ksnd_global_lock);
- ksocknal_flush_stale_txs(peer_ni);
+ ksocknal_flush_stale_txs(peer_ni);
- ksocknal_peer_decref(peer_ni);
- goto again;
- }
+ ksocknal_peer_decref(peer_ni);
+ goto again;
}
if (list_empty(&peer_ni->ksnp_zc_req_list))
spin_lock_bh(&ksocknal_data.ksnd_reaper_lock);
while (!ksocknal_data.ksnd_shuttingdown) {
- if (!list_empty(&ksocknal_data.ksnd_deathrow_conns)) {
- conn = list_entry(ksocknal_data.ksnd_deathrow_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_deathrow_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
continue;
}
- if (!list_empty(&ksocknal_data.ksnd_zombie_conns)) {
- conn = list_entry(ksocknal_data.ksnd_zombie_conns.next,
- struct ksock_conn, ksnc_list);
+ conn = list_first_entry_or_null(&ksocknal_data.ksnd_zombie_conns,
+ struct ksock_conn, ksnc_list);
+ if (conn) {
list_del(&conn->ksnc_list);
spin_unlock_bh(&ksocknal_data.ksnd_reaper_lock);
/* reschedule all the connections that stalled with ENOMEM... */
nenomem_conns = 0;
- while (!list_empty(&enomem_conns)) {
- conn = list_entry(enomem_conns.next,
- struct ksock_conn, ksnc_tx_list);
+ while ((conn = list_first_entry_or_null(&enomem_conns,
+ struct ksock_conn,
+ ksnc_tx_list)) != NULL) {
list_del(&conn->ksnc_tx_list);
sched = conn->ksnc_scheduler;
spin_unlock(&peer_ni->ksnp_lock);
- while (!list_empty(&zlist)) {
- tx = list_entry(zlist.next, struct ksock_tx, tx_zc_list);
+ while ((tx = list_first_entry_or_null(&zlist, struct ksock_tx,
+ tx_zc_list)) != NULL) {
list_del(&tx->tx_zc_list);
ksocknal_tx_decref(tx);
}