struct lu_fld_target,
ft_chain);
else
- target = list_entry(target->ft_chain.next,
+ target = list_first_entry(&target->ft_chain,
struct lu_fld_target,
ft_chain);
spin_unlock(&fld->lcf_lock);
struct ldlm_lock *lock;
LASSERT(!list_empty(&node->li_group));
- lock = list_entry(node->li_group.next, struct ldlm_lock,
- l_sl_policy);
+ lock = list_first_entry(&node->li_group, struct ldlm_lock,
+ l_sl_policy);
return &lock->l_policy_data.l_extent;
}
spin_lock(&exp->exp_lock);
while (!list_empty(&exp->exp_outstanding_replies)) {
struct ptlrpc_reply_state *rs =
- list_entry(exp->exp_outstanding_replies.next,
- struct ptlrpc_reply_state, rs_exp_list);
+ list_first_entry(&exp->exp_outstanding_replies,
+ struct ptlrpc_reply_state,
+ rs_exp_list);
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
spin_lock(&svcpt->scp_rep_lock);
spin_lock(&obd->obd_recovery_task_lock);
if (!list_empty(&obd->obd_req_replay_queue)) {
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&obd->obd_req_replay_queue,
+ struct ptlrpc_request, rq_list);
req_transno = lustre_msg_get_transno(req->rq_reqmsg);
}
spin_lock(&obd->obd_recovery_task_lock);
if (!list_empty(&obd->obd_lock_replay_queue)) {
- req = list_entry(obd->obd_lock_replay_queue.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&obd->obd_lock_replay_queue,
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
spin_unlock(&obd->obd_recovery_task_lock);
} else {
spin_lock(&obd->obd_recovery_task_lock);
if (!list_empty(&obd->obd_final_req_queue)) {
- req = list_entry(obd->obd_final_req_queue.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&obd->obd_final_req_queue,
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
spin_unlock(&obd->obd_recovery_task_lock);
if (req->rq_export->exp_in_recovery) {
if (!list_empty(&obd->obd_req_replay_queue)) {
struct ptlrpc_request *req;
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&obd->obd_req_replay_queue,
+ struct ptlrpc_request, rq_list);
transno = lustre_msg_get_transno(req->rq_reqmsg);
}
* replay has been executed by update with the
* same transno
*/
- req = list_entry(obd->obd_req_replay_queue.next,
- struct ptlrpc_request, rq_list);
+ req = list_first_entry(&obd->obd_req_replay_queue,
+ struct ptlrpc_request, rq_list);
list_del_init(&req->rq_list);
obd->obd_requests_queued_for_recovery--;
spin_lock_bh(&exp->exp_bl_list_lock);
if (!list_empty(&exp->exp_bl_list)) {
- lock = list_entry(exp->exp_bl_list.next,
- struct ldlm_lock, l_exp_list);
+ lock = list_first_entry(&exp->exp_bl_list,
+ struct ldlm_lock, l_exp_list);
LDLM_LOCK_GET(lock);
list_del_init(&lock->l_exp_list);
} else {
spin_lock_bh(&waiting_locks_spinlock);
while (!list_empty(&waiting_locks_list)) {
- lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
+ lock = list_first_entry(&waiting_locks_list, struct ldlm_lock,
+ l_pending_chain);
if (lock->l_callback_timestamp > ktime_get_seconds() ||
lock->l_req_mode == LCK_GROUP)
break;
time64_t now = ktime_get_seconds();
timeout_t delta = 0;
- lock = list_entry(waiting_locks_list.next, struct ldlm_lock,
- l_pending_chain);
+ lock = list_first_entry(&waiting_locks_list, struct ldlm_lock,
+ l_pending_chain);
if (lock->l_callback_timestamp - now > 0)
delta = lock->l_callback_timestamp - now;
mod_timer(&waiting_locks_timer,
struct lfsck_lmv_unit *llu;
spin_lock(&lfsck->li_lock);
- llu = list_entry(lfsck->li_list_lmv.next,
- struct lfsck_lmv_unit, llu_link);
+ llu = list_first_entry(&lfsck->li_list_lmv,
+ struct lfsck_lmv_unit, llu_link);
list_del_init(&llu->llu_link);
spin_unlock(&lfsck->li_lock);
struct lfsck_lmv_unit *llu;
spin_lock(&lfsck->li_lock);
- llu = list_entry(lfsck->li_list_lmv.next,
- struct lfsck_lmv_unit, llu_link);
+ llu = list_first_entry(&lfsck->li_list_lmv,
+ struct lfsck_lmv_unit, llu_link);
list_del_init(&llu->llu_link);
spin_unlock(&lfsck->li_lock);
__u32 *gen;
if (com->lc_type == LFSCK_TYPE_LAYOUT) {
- ltd = list_entry(lad->lad_mdt_list.next,
- struct lfsck_tgt_desc,
- ltd_layout_list);
+ ltd = list_first_entry(&lad->lad_mdt_list,
+ struct lfsck_tgt_desc,
+ ltd_layout_list);
list = <d->ltd_layout_list;
gen = <d->ltd_layout_gen;
} else {
struct lfsck_namespace *ns = com->lc_file_ram;
- ltd = list_entry(lad->lad_mdt_list.next,
- struct lfsck_tgt_desc,
- ltd_namespace_list);
+ ltd = list_first_entry(&lad->lad_mdt_list,
+ struct lfsck_tgt_desc,
+ ltd_namespace_list);
list = <d->ltd_namespace_list;
gen = <d->ltd_namespace_gen;
lr->lr_flags2 = ns->ln_flags & ~LF_INCOMPLETE;
!thread_is_running(mthread)))
GOTO(cleanup, rc = lad->lad_post_result);
- lar = list_entry(lad->lad_req_list.next,
- struct lfsck_assistant_req,
- lar_list);
+ lar = list_first_entry(&lad->lad_req_list,
+ struct lfsck_assistant_req,
+ lar_list);
/* Only the lfsck_assistant_engine thread itself can
* remove the "lar" from the head of the list, LFSCK
* engine thread only inserts other new "lar" at the
thread_set_flags(athread, SVC_STOPPING);
while (!list_empty(&lad->lad_req_list)) {
- lar = list_entry(lad->lad_req_list.next,
- struct lfsck_assistant_req,
- lar_list);
+ lar = list_first_entry(&lad->lad_req_list,
+ struct lfsck_assistant_req,
+ lar_list);
list_del_init(&lar->lar_list);
lad->lad_prefetched--;
spin_unlock(&lad->lad_lock);
spin_lock(<ds->ltd_lock);
while (!list_empty(&lad->lad_ost_phase2_list)) {
- ltd = list_entry(lad->lad_ost_phase2_list.next,
- struct lfsck_tgt_desc,
- ltd_layout_phase_list);
+ ltd = list_first_entry(&lad->lad_ost_phase2_list,
+ struct lfsck_tgt_desc,
+ ltd_layout_phase_list);
list_del_init(<d->ltd_layout_phase_list);
if (bk->lb_param & LPF_OST_ORPHAN) {
spin_unlock(<ds->ltd_lock);
llsd->llsd_touch_gen++;
spin_lock(&llsd->llsd_lock);
while (!list_empty(&llsd->llsd_master_list)) {
- llst = list_entry(llsd->llsd_master_list.next,
- struct lfsck_layout_slave_target,
- llst_list);
+ llst = list_first_entry(&llsd->llsd_master_list,
+ struct lfsck_layout_slave_target,
+ llst_list);
if (llst->llst_gen == llsd->llsd_touch_gen)
break;
llsd->llsd_touch_gen++;
spin_lock(&llsd->llsd_lock);
while (!list_empty(&llsd->llsd_master_list)) {
- llst = list_entry(llsd->llsd_master_list.next,
- struct lfsck_layout_slave_target,
- llst_list);
+ llst = list_first_entry(&llsd->llsd_master_list,
+ struct lfsck_layout_slave_target,
+ llst_list);
if (llst->llst_gen == llsd->llsd_touch_gen)
break;
spin_lock(&llsd->llsd_lock);
while (!list_empty(&llsd->llsd_master_list)) {
- llst = list_entry(llsd->llsd_master_list.next,
- struct lfsck_layout_slave_target, llst_list);
+ llst = list_first_entry(&llsd->llsd_master_list,
+ struct lfsck_layout_slave_target,
+ llst_list);
list_del_init(&llst->llst_list);
spin_unlock(&llsd->llsd_lock);
lfsck_layout_llst_put(llst);
if (list_empty(&lad->lad_req_list))
return;
- llr = list_entry(lad->lad_req_list.next,
- struct lfsck_layout_req,
- llr_lar.lar_list);
+ llr = list_first_entry(&lad->lad_req_list,
+ struct lfsck_layout_req,
+ llr_lar.lar_list);
pos->lp_oit_cookie = llr->llr_lar.lar_parent->lso_oit_cookie - 1;
}
down_write(<ds->ltd_rw_sem);
while (!list_empty(<ds->ltd_orphan)) {
- ltd = list_entry(ltds->ltd_orphan.next,
- struct lfsck_tgt_desc,
- ltd_orphan_list);
+ ltd = list_first_entry(<ds->ltd_orphan,
+ struct lfsck_tgt_desc,
+ ltd_orphan_list);
list_del_init(<d->ltd_orphan_list);
rc = __lfsck_add_target(env, lfsck, ltd, for_ost, true);
/* Do not hold the semaphore for too long time. */
struct lfsck_lmv_unit *llu;
struct lfsck_lmv *llmv;
- llu = list_entry(lfsck->li_list_lmv.next,
- struct lfsck_lmv_unit, llu_link);
+ llu = list_first_entry(&lfsck->li_list_lmv,
+ struct lfsck_lmv_unit, llu_link);
llmv = &llu->llu_lmv;
LASSERTF(atomic_read(&llmv->ll_ref) == 1,
RETURN(1);
spin_lock(&lad->lad_lock);
- lar = list_entry(lad->lad_req_list.next, struct lfsck_assistant_req,
- lar_list);
+ lar = list_first_entry(&lad->lad_req_list, struct lfsck_assistant_req,
+ lar_list);
list_del_init(&lar->lar_list);
spin_unlock(&lad->lad_lock);
struct lfsck_lmv_unit *llu;
spin_lock(&lfsck->li_lock);
- llu = list_entry(lfsck->li_list_lmv.next,
- struct lfsck_lmv_unit, llu_link);
+ llu = list_first_entry(&lfsck->li_list_lmv,
+ struct lfsck_lmv_unit, llu_link);
list_del_init(&llu->llu_link);
spin_unlock(&lfsck->li_lock);
if (list_empty(&lad->lad_req_list))
return;
- lnr = list_entry(lad->lad_req_list.next,
- struct lfsck_namespace_req,
- lnr_lar.lar_list);
+ lnr = list_first_entry(&lad->lad_req_list,
+ struct lfsck_namespace_req,
+ lnr_lar.lar_list);
pos->lp_oit_cookie = lnr->lnr_lar.lar_parent->lso_oit_cookie;
pos->lp_dir_cookie = lnr->lnr_dir_cookie - 1;
pos->lp_dir_parent = lnr->lnr_lar.lar_parent->lso_fid;
list_add_tail(&work->gl_list, &gl_list);
}
- work = list_entry(gl_list.next, struct ldlm_glimpse_work, gl_list);
+ work = list_first_entry(&gl_list, struct ldlm_glimpse_work, gl_list);
lock_res(res);
list_for_each(pos, &res->lr_granted) {
while (!list_empty(&head)) {
struct mgs_nidtbl_target *tgt;
- tgt = list_entry(head.next, struct mgs_nidtbl_target, mnt_list);
+ tgt = list_first_entry(&head, struct mgs_nidtbl_target,
+ mnt_list);
list_del(&tgt->mnt_list);
OBD_FREE_PTR(tgt);
}
/* we need extra list - because hash_exit called to early */
/* not need locking because all clients is died */
while (!list_empty(&obd->obd_nid_stats)) {
- stat = list_entry(obd->obd_nid_stats.next,
- struct nid_stat, nid_list);
+ stat = list_first_entry(&obd->obd_nid_stats,
+ struct nid_stat, nid_list);
list_del_init(&stat->nid_list);
cfs_hash_del(hash, &stat->nid, &stat->nid_hash);
lprocfs_free_client_stats(stat);
lprocfs_nid_stats_clear_write_cb, &free_list);
while (!list_empty(&free_list)) {
- client_stat = list_entry(free_list.next, struct nid_stat,
- nid_list);
+ client_stat = list_first_entry(&free_list, struct nid_stat,
+ nid_list);
list_del_init(&client_stat->nid_list);
lprocfs_free_client_stats(client_stat);
}
while (!list_empty(&ofd->ofd_inconsistency_list)) {
__set_current_state(TASK_RUNNING);
- oii = list_entry(ofd->ofd_inconsistency_list.next,
- struct ofd_inconsistency_item,
- oii_list);
+ oii = list_first_entry(&ofd->ofd_inconsistency_list,
+ struct ofd_inconsistency_item,
+ oii_list);
list_del_init(&oii->oii_list);
spin_unlock(&ofd->ofd_inconsistency_lock);
ofd_inconsistency_verify_one(env, ofd, oii, lrl);
while (!list_empty(&ofd->ofd_inconsistency_list)) {
struct ofd_object *fo;
- oii = list_entry(ofd->ofd_inconsistency_list.next,
- struct ofd_inconsistency_item,
- oii_list);
+ oii = list_first_entry(&ofd->ofd_inconsistency_list,
+ struct ofd_inconsistency_item,
+ oii_list);
list_del_init(&oii->oii_list);
fo = oii->oii_obj;
spin_unlock(&ofd->ofd_inconsistency_lock);
if (likely(!list_empty(&scrub->os_inconsistent_items))) {
struct osd_inconsistent_item *oii;
- oii = list_entry(scrub->os_inconsistent_items.next,
- struct osd_inconsistent_item, oii_list);
+ oii = list_first_entry(&scrub->os_inconsistent_items,
+ struct osd_inconsistent_item,
+ oii_list);
*oic = &oii->oii_cache;
scrub->os_in_prior = 1;
if (list_empty(&dev->od_ios_list))
break;
- item = list_entry(dev->od_ios_list.next,
- struct osd_ios_item, oii_list);
+ item = list_first_entry(&dev->od_ios_list,
+ struct osd_ios_item, oii_list);
list_del_init(&item->oii_list);
LASSERT(item->oii_scandir != NULL);
while (!list_empty(&dev->od_index_restore_list)) {
struct lustre_index_restore_unit *liru;
- liru = list_entry(dev->od_index_restore_list.next,
- struct lustre_index_restore_unit,
- liru_link);
+ liru = list_first_entry(&dev->od_index_restore_list,
+ struct lustre_index_restore_unit,
+ liru_link);
list_del(&liru->liru_link);
if (buf)
osd_index_restore(info->oti_env, dev, liru,
struct osd_object *obj;
while (!list_empty(&oh->ot_sa_list)) {
- obj = list_entry(oh->ot_sa_list.next,
- struct osd_object, oo_sa_linkage);
+ obj = list_first_entry(&oh->ot_sa_list,
+ struct osd_object, oo_sa_linkage);
write_lock(&obj->oo_attr_lock);
list_del_init(&obj->oo_sa_linkage);
write_unlock(&obj->oo_attr_lock);
GOTO(out, rc = val);
if (scrub->os_in_prior)
- oii = list_entry(scrub->os_inconsistent_items.next,
- struct osd_inconsistent_item, oii_list);
+ oii = list_first_entry(&scrub->os_inconsistent_items,
+ struct osd_inconsistent_item, oii_list);
if (oid < sf->sf_pos_latest_start && !oii)
GOTO(out, rc = 0);
if (likely(!list_empty(&scrub->os_inconsistent_items))) {
struct osd_inconsistent_item *oii;
- oii = list_entry(scrub->os_inconsistent_items.next,
- struct osd_inconsistent_item, oii_list);
+ oii = list_first_entry(&scrub->os_inconsistent_items,
+ struct osd_inconsistent_item,
+ oii_list);
*fid = oii->oii_cache.oic_fid;
*oid = oii->oii_cache.oic_dnode;
scrub->os_in_prior = 1;
while (!list_empty(&scrub->os_inconsistent_items)) {
struct osd_inconsistent_item *oii;
- oii = list_entry(scrub->os_inconsistent_items.next,
- struct osd_inconsistent_item, oii_list);
+ oii = list_first_entry(&scrub->os_inconsistent_items,
+ struct osd_inconsistent_item, oii_list);
list_del_init(&oii->oii_list);
OBD_FREE_PTR(oii);
}
while (!list_empty(&dev->od_ios_list)) {
struct osd_ios_item *item;
- item = list_entry(dev->od_ios_list.next,
- struct osd_ios_item, oii_list);
+ item = list_first_entry(&dev->od_ios_list,
+ struct osd_ios_item, oii_list);
list_del_init(&item->oii_list);
item->oii_scan_dir(env, dev, item->oii_parent,
item->oii_handle_dirent, item->oii_flags);
while (!list_empty(&dev->od_index_restore_list)) {
struct lustre_index_restore_unit *liru;
- liru = list_entry(dev->od_index_restore_list.next,
- struct lustre_index_restore_unit,
- liru_link);
+ liru = list_first_entry(&dev->od_index_restore_list,
+ struct lustre_index_restore_unit,
+ liru_link);
list_del(&liru->liru_link);
if (buf)
osd_index_restore(env, dev, liru, buf,
while (!list_empty(&list)) {
struct osp_job_req_args *jra;
- jra = list_entry(list.next, struct osp_job_req_args,
- jra_committed_link);
+ jra = list_first_entry(&list, struct osp_job_req_args,
+ jra_committed_link);
LASSERT(jra->jra_magic == OSP_JOB_MAGIC);
list_del_init(&jra->jra_committed_link);
__u32 update_req_size;
int rc;
- ours = list_entry(our->our_req_list.next,
- struct osp_update_request_sub, ours_list);
+ ours = list_first_entry(&our->our_req_list,
+ struct osp_update_request_sub, ours_list);
update_req_size = object_update_request_size(ours->ours_req);
req_capsule_set_size(&req->rq_pill, &RMF_OUT_UPDATE_HEADER, RCL_CLIENT,
update_req_size + sizeof(*ouh));
RETURN(-ENOMEM);
if (buf_count == 1) {
- ours = list_entry(our->our_req_list.next,
- struct osp_update_request_sub, ours_list);
+ ours = list_first_entry(&our->our_req_list,
+ struct osp_update_request_sub,
+ ours_list);
/* Let's check if it can be packed inline */
if (object_update_request_size(ours->ours_req) +
while (cfs_hash_bd_count_get(&bd) > hw) {
if (unlikely(list_empty(&bkt->ntb_lru)))
break;
- cli = list_entry(bkt->ntb_lru.next,
- struct nrs_tbf_client,
- tc_lru);
+ cli = list_first_entry(&bkt->ntb_lru,
+ struct nrs_tbf_client,
+ tc_lru);
LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
list_move(&cli->tc_lru, &zombies);
while (cfs_hash_bd_count_get(&bd) > hw) {
if (unlikely(list_empty(&bkt->ntb_lru)))
break;
- cli = list_entry(bkt->ntb_lru.next,
- struct nrs_tbf_client,
- tc_lru);
+ cli = list_first_entry(&bkt->ntb_lru,
+ struct nrs_tbf_client,
+ tc_lru);
LASSERT(atomic_read(&cli->tc_ref) == 0);
cfs_hash_bd_del_locked(hs, &bd, &cli->tc_hnode);
list_move(&cli->tc_lru, &zombies);
cli = container_of(node, struct nrs_tbf_client, tc_node);
LASSERT(cli->tc_in_heap);
if (unlikely(peek)) {
- nrq = list_entry(cli->tc_list.next,
- struct ptlrpc_nrs_request,
- nr_u.tbf.tr_list);
+ nrq = list_first_entry(&cli->tc_list,
+ struct ptlrpc_nrs_request,
+ nr_u.tbf.tr_list);
} else {
struct nrs_tbf_rule *rule = cli->tc_rule;
__u64 now = ktime_to_ns(ktime_get());
ntoken = 1;
if (ntoken > 0) {
- nrq = list_entry(cli->tc_list.next,
+ nrq = list_first_entry(&cli->tc_list,
struct ptlrpc_nrs_request,
nr_u.tbf.tr_list);
ntoken--;
* is not strictly necessary.
*/
spin_lock(&pet_lock);
- obd = list_entry(pet_list.next, struct obd_device,
- obd_evict_list);
+ obd = list_first_entry(&pet_list, struct obd_device,
+ obd_evict_list);
spin_unlock(&pet_lock);
expire_time = ktime_get_real_seconds() - PING_EVICT_TIMEOUT;
*/
spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_exports_timed)) {
- exp = list_entry(obd->obd_exports_timed.next,
- struct obd_export,
- exp_obd_chain_timed);
+ exp = list_first_entry(&obd->obd_exports_timed,
+ struct obd_export,
+ exp_obd_chain_timed);
if (expire_time > exp->exp_last_request_time) {
struct obd_uuid *client_uuid;
RETURN_EXIT;
}
- newest_exp = list_entry(exp->exp_obd->obd_exports_timed.prev,
- struct obd_export, exp_obd_chain_timed);
+ newest_exp = list_last_entry(&exp->exp_obd->obd_exports_timed,
+ struct obd_export, exp_obd_chain_timed);
list_move_tail(&exp->exp_obd_chain_timed,
&exp->exp_obd->obd_exports_timed);
spin_lock(&qsd->qsd_adjust_lock);
if (!list_empty(&qsd->qsd_adjust_list)) {
struct lquota_entry *lqe;
- lqe = list_entry(qsd->qsd_adjust_list.next,
- struct lquota_entry, lqe_link);
+ lqe = list_first_entry(&qsd->qsd_adjust_list,
+ struct lquota_entry, lqe_link);
if (ktime_get_seconds() >= lqe->lqe_adjust_time)
job_pending = true;
}
spin_lock(&qsd->qsd_adjust_lock);
cur_time = ktime_get_seconds();
while (!list_empty(&qsd->qsd_adjust_list)) {
- lqe = list_entry(qsd->qsd_adjust_list.next,
- struct lquota_entry, lqe_link);
+ lqe = list_first_entry(&qsd->qsd_adjust_list,
+ struct lquota_entry, lqe_link);
/* deferred items are sorted by time */
if (lqe->lqe_adjust_time > cur_time)
break;
spin_lock(&qsd->qsd_adjust_lock);
while (!list_empty(&qsd->qsd_adjust_list)) {
- lqe = list_entry(qsd->qsd_adjust_list.next,
- struct lquota_entry, lqe_link);
+ lqe = list_first_entry(&qsd->qsd_adjust_list,
+ struct lquota_entry, lqe_link);
list_del_init(&lqe->lqe_link);
lqe_putref(lqe);
}
ENTRY;
mutex_lock(&lsi->lsi_lwp_mutex);
while (!list_empty(&lsi->lsi_lwp_list)) {
- lwp = list_entry(lsi->lsi_lwp_list.next, struct obd_device,
- obd_lwp_list);
+ lwp = list_first_entry(&lsi->lsi_lwp_list, struct obd_device,
+ obd_lwp_list);
list_del_init(&lwp->obd_lwp_list);
lwp->obd_force = 1;
mutex_unlock(&lsi->lsi_lwp_mutex);
spin_lock(&tdtd->tdtd_replay_list_lock);
if (!list_empty(&tdtd->tdtd_replay_list)) {
- dtrq = list_entry(tdtd->tdtd_replay_list.next,
- struct distribute_txn_replay_req, dtrq_list);
+ dtrq = list_first_entry(&tdtd->tdtd_replay_list,
+ struct distribute_txn_replay_req,
+ dtrq_list);
list_del_init(&dtrq->dtrq_list);
}
spin_unlock(&tdtd->tdtd_replay_list_lock);
spin_lock(&tdtd->tdtd_replay_list_lock);
if (!list_empty(&tdtd->tdtd_replay_list)) {
- dtrq = list_entry(tdtd->tdtd_replay_list.next,
- struct distribute_txn_replay_req, dtrq_list);
+ dtrq = list_first_entry(&tdtd->tdtd_replay_list,
+ struct distribute_txn_replay_req,
+ dtrq_list);
transno = dtrq->dtrq_master_transno;
}
spin_unlock(&tdtd->tdtd_replay_list_lock);