static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
{
LASSERT(plist->pl_nr > 0);
- return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+ return list_first_entry(&plist->pl_pages, struct cl_page, cp_batch);
}
/**
static inline struct ll_inode_info *
agl_first_entry(struct ll_statahead_info *sai)
{
- return list_entry(sai->sai_agls.next, struct ll_inode_info,
- lli_agl_list);
+ return list_first_entry(&sai->sai_agls, struct ll_inode_info,
+ lli_agl_list);
}
/* statahead window is full */
while (({set_current_state(TASK_IDLE);
!kthread_should_stop(); })) {
spin_lock(&plli->lli_agl_lock);
- if (!agl_list_empty(sai)) {
+ clli = list_first_entry_or_null(&sai->sai_agls,
+ struct ll_inode_info,
+ lli_agl_list);
+ if (clli) {
__set_current_state(TASK_RUNNING);
- clli = agl_first_entry(sai);
list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
ll_agl_trigger(&clli->lli_vfs_inode, sai);
kthread_stop(agl_task);
spin_lock(&plli->lli_agl_lock);
- while (!agl_list_empty(sai)) {
- clli = agl_first_entry(sai);
+ while ((clli = list_first_entry_or_null(&sai->sai_agls,
+ struct ll_inode_info,
+ lli_agl_list)) != NULL) {
list_del_init(&clli->lli_agl_list);
spin_unlock(&plli->lli_agl_lock);
clli->lli_agl_index = 0;
{
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_object *lov = cl2lov(ios->cis_obj);
+ struct lov_io_sub *sub;
ENTRY;
-
LASSERT(list_empty(&lio->lis_active));
- while (!list_empty(&lio->lis_subios)) {
- struct lov_io_sub *sub = list_entry(lio->lis_subios.next,
- struct lov_io_sub,
- sub_list);
-
+ while ((sub = list_first_entry_or_null(&lio->lis_subios,
+ struct lov_io_sub,
+ sub_list)) != NULL) {
list_del_init(&sub->sub_list);
lio->lis_nr_subios--;
/* Import management functions */
static void obd_zombie_import_free(struct obd_import *imp)
{
- ENTRY;
+ struct obd_import_conn *imp_conn;
+ ENTRY;
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
imp->imp_obd->obd_name);
ptlrpc_connection_put(imp->imp_connection);
- while (!list_empty(&imp->imp_conn_list)) {
- struct obd_import_conn *imp_conn;
-
- imp_conn = list_first_entry(&imp->imp_conn_list,
- struct obd_import_conn, oic_item);
+ while ((imp_conn = list_first_entry_or_null(&imp->imp_conn_list,
+ struct obd_import_conn,
+ oic_item)) != NULL) {
list_del_init(&imp_conn->oic_item);
ptlrpc_connection_put(imp_conn->oic_conn);
OBD_FREE(imp_conn, sizeof(*imp_conn));
ENTRY;
/* It's possible that an export may disconnect itself, but
- * nothing else will be added to this list. */
- while (!list_empty(list)) {
- exp = list_first_entry(list, struct obd_export,
- exp_obd_chain);
+ * nothing else will be added to this list.
+ */
+ while ((exp = list_first_entry_or_null(list, struct obd_export,
+ exp_obd_chain)) != NULL) {
/* need for safe call CDEBUG after obd_disconnect */
class_export_get(exp);
/* We increase the max_rpcs_in_flight, then wakeup some waiters. */
for (i = 0; i < diff; i++) {
- if (list_empty(&cli->cl_flight_waiters))
+ orsw = list_first_entry_or_null(&cli->cl_loi_read_list,
+ struct obd_request_slot_waiter,
+ orsw_entry);
+ if (!orsw)
break;
- orsw = list_first_entry(&cli->cl_flight_waiters,
- struct obd_request_slot_waiter,
- orsw_entry);
list_del_init(&orsw->orsw_entry);
cli->cl_rpcs_in_flight++;
wake_up(&orsw->orsw_waitq);
return -EINVAL;
}
- while (!list_empty(&deathrow)) {
- data = list_entry(deathrow.next, struct uuid_nid_data,
- un_list);
+ while ((data = list_first_entry_or_null(&deathrow, struct uuid_nid_data,
+ un_list)) != NULL) {
list_del(&data->un_list);
CDEBUG(D_INFO, "del uuid %s %s/%d\n",
};
assert_osc_object_is_locked(obj);
- while (!list_empty(&obj->oo_hp_exts)) {
- ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
- oe_link);
+ while ((ext = list_first_entry_or_null(&obj->oo_hp_exts,
+ struct osc_extent,
+ oe_link)) != NULL) {
if (!try_to_add_extent_for_io(cli, ext, &data))
return data.erd_page_count;
EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
if (data.erd_page_count == data.erd_max_pages)
return data.erd_page_count;
- while (!list_empty(&obj->oo_urgent_exts)) {
- ext = list_entry(obj->oo_urgent_exts.next,
- struct osc_extent, oe_link);
+ while ((ext = list_first_entry_or_null(&obj->oo_urgent_exts,
+ struct osc_extent,
+ oe_link)) != NULL) {
if (!try_to_add_extent_for_io(cli, ext, &data))
return data.erd_page_count;
}
* extents can usually only be added if the rpclist was empty, so if we
* can't add one, we continue on to trying to add normal extents. This
* is so we don't miss adding extra extents to an RPC containing high
- * priority or urgent extents. */
- while (!list_empty(&obj->oo_full_exts)) {
- ext = list_entry(obj->oo_full_exts.next,
- struct osc_extent, oe_link);
+ * priority or urgent extents.
+ */
+ while ((ext = list_first_entry_or_null(&obj->oo_full_exts,
+ struct osc_extent,
+ oe_link)) != NULL) {
if (!try_to_add_extent_for_io(cli, ext, &data))
break;
}
osc_list_maint(cli, obj);
- while (!list_empty(&list)) {
+ while ((ext = list_first_entry_or_null(&list,
+ struct osc_extent,
+ oe_link)) != NULL) {
int rc;
- ext = list_entry(list.next, struct osc_extent, oe_link);
list_del_init(&ext->oe_link);
/* extent may be in OES_ACTIVE state because inode mutex
void osc_lock_wake_waiters(const struct lu_env *env, struct osc_object *osc,
struct osc_lock *oscl)
{
+ struct osc_lock *scan;
+
spin_lock(&osc->oo_ol_spin);
list_del_init(&oscl->ols_nextlock_oscobj);
spin_unlock(&osc->oo_ol_spin);
spin_lock(&oscl->ols_lock);
- while (!list_empty(&oscl->ols_waiting_list)) {
- struct osc_lock *scan;
-
- scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
- ols_wait_entry);
+ while ((scan = list_first_entry_or_null(&oscl->ols_waiting_list,
+ struct osc_lock,
+ ols_wait_entry)) != NULL) {
list_del_init(&scan->ols_wait_entry);
cl_sync_io_note(env, scan->ols_owner, 0);
if (--maxscan < 0)
break;
- opg = list_entry(cli->cl_lru_list.next, struct osc_page,
- ops_lru);
+ opg = list_first_entry(&cli->cl_lru_list, struct osc_page,
+ ops_lru);
page = opg->ops_cl.cpl_page;
if (lru_page_busy(cli, page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
max_scans = atomic_read(&cache->ccc_users) - 2;
- while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
- cli = list_entry(cache->ccc_lru.next, struct client_obd,
- cl_lru_osc);
-
+ while (--max_scans > 0 &&
+ (cli = list_first_entry_or_null(&cache->ccc_lru,
+ struct client_obd,
+ cl_lru_osc)) != NULL) {
CDEBUG(D_CACHE, "%s: cli %p LRU pages: %ld, busy: %ld.\n",
cli_name(cli), cli,
atomic_long_read(&cli->cl_lru_in_list),
return SHRINK_STOP;
spin_lock(&osc_shrink_lock);
- while (!list_empty(&osc_shrink_list)) {
- cli = list_entry(osc_shrink_list.next, struct client_obd,
- cl_shrink_list);
-
+ while ((cli = list_first_entry_or_null(&osc_shrink_list,
+ struct client_obd,
+ cl_shrink_list)) != NULL) {
if (stop_anchor == NULL)
stop_anchor = cli;
else if (cli == stop_anchor)
}
/* first page in the list */
- oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
+ oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
crattr = &osc_env_info(env)->oti_req_attr;
memset(crattr, 0, sizeof(*crattr));
osc_release_ppga(pga, page_count);
}
/* this should happen rarely and is pretty bad, it makes the
- * pending list not follow the dirty order */
- while (!list_empty(ext_list)) {
- ext = list_entry(ext_list->next, struct osc_extent,
- oe_link);
+ * pending list not follow the dirty order
+ */
+ while ((ext = list_first_entry_or_null(ext_list,
+ struct osc_extent,
+ oe_link)) != NULL) {
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 0, rc);
}
return NULL;
}
- request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
- rq_list);
+ request = list_first_entry(&pool->prp_req_list, struct ptlrpc_request,
+ rq_list);
list_del_init(&request->rq_list);
spin_unlock(&pool->prp_lock);
if (list_empty(&imp->imp_unreplied_list))
return 0;
- req = list_entry(imp->imp_unreplied_list.next, struct ptlrpc_request,
- rq_unreplied_list);
+ req = list_first_entry(&imp->imp_unreplied_list, struct ptlrpc_request,
+ rq_unreplied_list);
LASSERTF(req->rq_xid >= 1, "XID:%llu\n", req->rq_xid);
if (imp->imp_known_replied_xid < req->rq_xid - 1)
struct nrs_fifo_head *head = policy->pol_private;
struct ptlrpc_nrs_request *nrq;
- nrq = unlikely(list_empty(&head->fh_list)) ? NULL :
- list_entry(head->fh_list.next, struct ptlrpc_nrs_request,
- nr_u.fifo.fr_list);
+ nrq = list_first_entry_or_null(&head->fh_list,
+ struct ptlrpc_nrs_request,
+ nr_u.fifo.fr_list);
if (likely(!peek && nrq != NULL)) {
struct ptlrpc_request *req = container_of(nrq,
spin_lock(&svcpt->scp_rep_lock);
}
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state, rs_list);
+ rs = list_first_entry(&svcpt->scp_rep_idle,
+ struct ptlrpc_reply_state, rs_list);
list_del(&rs->rs_list);
spin_unlock(&svcpt->scp_rep_lock);
spin_lock(&sec_gc_ctx_list_lock);
- while (!list_empty(&sec_gc_ctx_list)) {
- ctx = list_entry(sec_gc_ctx_list.next,
- struct ptlrpc_cli_ctx, cc_gc_chain);
+ while ((ctx = list_first_entry_or_null(&sec_gc_ctx_list,
+ struct ptlrpc_cli_ctx,
+ cc_gc_chain)) != NULL) {
list_del_init(&ctx->cc_gc_chain);
spin_unlock(&sec_gc_ctx_list_lock);
return posted;
}
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ rqbd = list_first_entry(&svcpt->scp_rqbd_idle,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
/* assume we will post successfully */
svcpt->scp_nrqbds_posted++;
* I expect only about 1 or 2 rqbds need to be recycled here
*/
while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
- rqbd = list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ rqbd = list_first_entry(&svcpt->scp_hist_rqbds,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
list_del(&rqbd->rqbd_list);
svcpt->scp_hist_nrqbds--;
* we took additional refcount so entries can't be deleted from list, no
* locking is needed
*/
- while (!list_empty(&work_list)) {
- rq = list_entry(work_list.next, struct ptlrpc_request,
- rq_timed_list);
+ while ((rq = list_first_entry_or_null(&work_list,
+ struct ptlrpc_request,
+ rq_timed_list)) != NULL) {
list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
RETURN(0);
}
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&svcpt->scp_req_incoming,
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
svcpt->scp_nreqs_incoming--;
/*
wake_up_all(&svcpt->scp_waitq);
- while (!list_empty(&svcpt->scp_threads)) {
- thread = list_entry(svcpt->scp_threads.next,
- struct ptlrpc_thread, t_link);
+ while ((thread = list_first_entry_or_null(&svcpt->scp_threads,
+ struct ptlrpc_thread,
+ t_link)) != NULL) {
if (thread_is_stopped(thread)) {
list_move(&thread->t_link, &zombie);
continue;
spin_unlock(&svcpt->scp_lock);
- while (!list_empty(&zombie)) {
- thread = list_entry(zombie.next,
- struct ptlrpc_thread, t_link);
+ while ((thread = list_first_entry_or_null(&zombie,
+ struct ptlrpc_thread,
+ t_link)) != NULL) {
list_del(&thread->t_link);
OBD_FREE_PTR(thread);
}
break;
spin_lock(&svcpt->scp_rep_lock);
- while (!list_empty(&svcpt->scp_rep_active)) {
- rs = list_entry(svcpt->scp_rep_active.next,
- struct ptlrpc_reply_state, rs_list);
+ while ((rs = list_first_entry_or_null(&svcpt->scp_rep_active,
+ struct ptlrpc_reply_state,
+ rs_list)) != NULL) {
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
spin_unlock(&rs->rs_lock);
* all unlinked) and no service threads, so I'm the only
* thread noodling the request queue now
*/
- while (!list_empty(&svcpt->scp_req_incoming)) {
- req = list_entry(svcpt->scp_req_incoming.next,
- struct ptlrpc_request, rq_list);
-
+ while ((req = list_first_entry_or_null(&svcpt->scp_req_incoming,
+ struct ptlrpc_request,
+ rq_list)) != NULL) {
list_del(&req->rq_list);
svcpt->scp_nreqs_incoming--;
ptlrpc_server_finish_request(svcpt, req);
* Now free all the request buffers since nothing
* references them any more...
*/
-
- while (!list_empty(&svcpt->scp_rqbd_idle)) {
- rqbd = list_entry(svcpt->scp_rqbd_idle.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ while ((rqbd = list_first_entry_or_null(&svcpt->scp_rqbd_idle,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list)) != NULL)
ptlrpc_free_rqbd(rqbd);
- }
+
ptlrpc_wait_replies(svcpt);
- while (!list_empty(&svcpt->scp_rep_idle)) {
- rs = list_entry(svcpt->scp_rep_idle.next,
- struct ptlrpc_reply_state,
- rs_list);
+ while ((rs = list_first_entry_or_null(&svcpt->scp_rep_idle,
+ struct ptlrpc_reply_state,
+ rs_list)) != NULL) {
list_del(&rs->rs_list);
OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
}