From dcc8b9c00d5de4bf4580e74bea47e3f99b2ad5da Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Sun, 7 Jun 2020 19:24:24 -0400 Subject: [PATCH] LU-9679 ptlrpc: list_for_each improvements. 1/ use list_for_each_entry_safe() instead of list_for_each_safe() and similar. 2/ use list_first_entry() and list_last_entry() where appropriate. 3/ When removing everything from a list, use while ((x = list_first_entry_or_null()) { as it makes the intent clear Linux-commit: ef8e5dbbb09035a0c41aa47a328e6248702d4d2b Signed-off-by: Mr NeilBrown Change-Id: I490afcb70d18170e4ecc5d062d327d141668d3db Reviewed-on: https://review.whamcloud.com/39133 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Shaun Tancheff Reviewed-by: Oleg Drokin --- lustre/ptlrpc/client.c | 101 ++++++++++++++---------------------------------- lustre/ptlrpc/import.c | 37 +++++++----------- lustre/ptlrpc/pinger.c | 6 +-- lustre/ptlrpc/ptlrpcd.c | 16 +++----- lustre/ptlrpc/recover.c | 48 +++++++++++------------ lustre/ptlrpc/service.c | 13 ++----- 6 files changed, 77 insertions(+), 144 deletions(-) diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index 22c831c..6f9bfbf 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -551,14 +551,14 @@ void ptlrpc_request_cache_free(struct ptlrpc_request *req) */ void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool) { - struct list_head *l, *tmp; struct ptlrpc_request *req; LASSERT(pool != NULL); spin_lock(&pool->prp_lock); - list_for_each_safe(l, tmp, &pool->prp_req_list) { - req = list_entry(l, struct ptlrpc_request, rq_list); + while ((req = list_first_entry_or_null(&pool->prp_req_list, + struct ptlrpc_request, + rq_list))) { list_del(&req->rq_list); LASSERT(req->rq_reqbuf); LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); @@ -702,17 +702,14 @@ static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request) void ptlrpc_add_unreplied(struct ptlrpc_request *req) { struct obd_import *imp = req->rq_import; - struct list_head *tmp; struct ptlrpc_request *iter; assert_spin_locked(&imp->imp_lock); LASSERT(list_empty(&req->rq_unreplied_list)); /* unreplied list is sorted by xid in ascending order */ - list_for_each_prev(tmp, &imp->imp_unreplied_list) { - iter = list_entry(tmp, struct ptlrpc_request, - rq_unreplied_list); - + list_for_each_entry_reverse(iter, &imp->imp_unreplied_list, + rq_unreplied_list) { LASSERT(req->rq_xid != iter->rq_xid); if (req->rq_xid < iter->rq_xid) continue; @@ -1106,8 +1103,7 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, */ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) { - struct list_head *tmp; - struct list_head *next; + struct ptlrpc_request *req; int expected_phase; int n = 0; @@ -1116,11 +1112,7 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) /* Requests on the set should either all be completed, or all be new */ expected_phase = (atomic_read(&set->set_remaining) == 0) ? RQ_PHASE_COMPLETE : RQ_PHASE_NEW; - list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - + list_for_each_entry(req, &set->set_requests, rq_set_chain) { LASSERT(req->rq_phase == expected_phase); n++; } @@ -1129,10 +1121,9 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set) atomic_read(&set->set_remaining) == n, "%d / %d\n", atomic_read(&set->set_remaining), n); - list_for_each_safe(tmp, next, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + while ((req = list_first_entry_or_null(&set->set_requests, + struct ptlrpc_request, + rq_set_chain))) { list_del_init(&req->rq_set_chain); LASSERT(req->rq_phase == expected_phase); @@ -1786,7 +1777,7 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set) */ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) { - struct list_head *tmp, *next; + struct ptlrpc_request *req, *next; LIST_HEAD(comp_reqs); int force_timer_recalc = 0; @@ -1794,10 +1785,8 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set) if (atomic_read(&set->set_remaining) == 0) RETURN(1); - list_for_each_safe(tmp, next, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each_entry_safe(req, next, &set->set_requests, + rq_set_chain) { struct obd_import *imp = req->rq_import; int unregistered = 0; int async = 1; @@ -2338,7 +2327,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) */ void ptlrpc_expired_set(struct ptlrpc_request_set *set) { - struct list_head *tmp; + struct ptlrpc_request *req; time64_t now = ktime_get_real_seconds(); ENTRY; @@ -2347,11 +2336,7 @@ void ptlrpc_expired_set(struct ptlrpc_request_set *set) /* * A timeout expired. See which reqs it applies to... */ - list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, - rq_set_chain); - + list_for_each_entry(req, &set->set_requests, rq_set_chain) { /* don't expire request waiting for context */ if (req->rq_wait_ctx) continue; @@ -2387,15 +2372,12 @@ void ptlrpc_expired_set(struct ptlrpc_request_set *set) */ static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set) { - struct list_head *tmp; + struct ptlrpc_request *req; LASSERT(set != NULL); CDEBUG(D_RPCTRACE, "INTERRUPTED SET %p\n", set); - list_for_each(tmp, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_set_chain); - + list_for_each_entry(req, &set->set_requests, rq_set_chain) { if (req->rq_intr) continue; @@ -2415,16 +2397,13 @@ static void ptlrpc_interrupted_set(struct ptlrpc_request_set *set) */ time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) { - struct list_head *tmp; time64_t now = ktime_get_real_seconds(); int timeout = 0; struct ptlrpc_request *req; time64_t deadline; ENTRY; - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); - + list_for_each_entry(req, &set->set_requests, rq_set_chain) { /* Request in-flight? */ if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || (req->rq_phase == RQ_PHASE_BULK) || @@ -2462,7 +2441,6 @@ time64_t ptlrpc_set_next_timeout(struct ptlrpc_request_set *set) */ int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set) { - struct list_head *tmp; struct ptlrpc_request *req; time64_t timeout; int rc; @@ -2471,9 +2449,7 @@ int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set) if (set->set_producer) (void)ptlrpc_set_producer(set); else - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each_entry(req, &set->set_requests, rq_set_chain) { if (req->rq_phase == RQ_PHASE_NEW) (void)ptlrpc_send_new_req(req); } @@ -2567,9 +2543,8 @@ int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set) * the error cases -eeb. */ if (rc == 0 && atomic_read(&set->set_remaining) == 0) { - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, - rq_set_chain); + list_for_each_entry(req, &set->set_requests, + rq_set_chain) { spin_lock(&req->rq_lock); req->rq_invalid_rqset = 1; spin_unlock(&req->rq_lock); @@ -2580,9 +2555,7 @@ int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *set) LASSERT(atomic_read(&set->set_remaining) == 0); rc = set->set_rc; /* rq_status of already freed requests if any */ - list_for_each(tmp, &set->set_requests) { - req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); - + list_for_each_entry(req, &set->set_requests, rq_set_chain) { LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); if (req->rq_status != 0) rc = req->rq_status; @@ -3026,7 +2999,7 @@ EXPORT_SYMBOL(ptlrpc_request_addref); void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, struct obd_import *imp) { - struct list_head *tmp; + struct ptlrpc_request *iter; assert_spin_locked(&imp->imp_lock); @@ -3054,11 +3027,8 @@ void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, LASSERT(imp->imp_replayable); /* Balanced in ptlrpc_free_committed, usually. */ ptlrpc_request_addref(req); - list_for_each_prev(tmp, &imp->imp_replay_list) { - struct ptlrpc_request *iter = list_entry(tmp, - struct ptlrpc_request, - rq_replay_list); - + list_for_each_entry_reverse(iter, &imp->imp_replay_list, + rq_replay_list) { /* * We may have duplicate transnos if we create and then * open a file, or for closes retained if to match creating @@ -3306,7 +3276,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req) */ void ptlrpc_abort_inflight(struct obd_import *imp) { - struct list_head *tmp, *n; + struct ptlrpc_request *req, *n; ENTRY; /* @@ -3321,11 +3291,7 @@ void ptlrpc_abort_inflight(struct obd_import *imp) * locked? Also, how do we know if the requests on the list are * being freed at this time? */ - list_for_each_safe(tmp, n, &imp->imp_sending_list) { - struct ptlrpc_request *req = list_entry(tmp, - struct ptlrpc_request, - rq_list); - + list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list) { DEBUG_REQ(D_RPCTRACE, req, "inflight"); spin_lock(&req->rq_lock); @@ -3337,10 +3303,7 @@ void ptlrpc_abort_inflight(struct obd_import *imp) spin_unlock(&req->rq_lock); } - list_for_each_safe(tmp, n, &imp->imp_delayed_list) { - struct ptlrpc_request *req = - list_entry(tmp, struct ptlrpc_request, rq_list); - + list_for_each_entry_safe(req, n, &imp->imp_delayed_list, rq_list) { DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); spin_lock(&req->rq_lock); @@ -3367,15 +3330,11 @@ void ptlrpc_abort_inflight(struct obd_import *imp) */ void ptlrpc_abort_set(struct ptlrpc_request_set *set) { - struct list_head *tmp, *pos; + struct ptlrpc_request *req, *tmp; LASSERT(set != NULL); - list_for_each_safe(pos, tmp, &set->set_requests) { - struct ptlrpc_request *req = - list_entry(pos, struct ptlrpc_request, - rq_set_chain); - + list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) { spin_lock(&req->rq_lock); if (req->rq_phase != RQ_PHASE_RPC) { spin_unlock(&req->rq_lock); diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index 845846e..b7c07e2 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -278,15 +278,12 @@ static time64_t ptlrpc_inflight_deadline(struct ptlrpc_request *req, static time64_t ptlrpc_inflight_timeout(struct obd_import *imp) { time64_t now = ktime_get_real_seconds(); - struct list_head *tmp, *n; - struct ptlrpc_request *req; + struct ptlrpc_request *req, *n; time64_t timeout = 0; spin_lock(&imp->imp_lock); - list_for_each_safe(tmp, n, &imp->imp_sending_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); + list_for_each_entry_safe(req, n, &imp->imp_sending_list, rq_list) timeout = max(ptlrpc_inflight_deadline(req, now), timeout); - } spin_unlock(&imp->imp_lock); return timeout; } @@ -299,8 +296,7 @@ static time64_t ptlrpc_inflight_timeout(struct obd_import *imp) */ void ptlrpc_invalidate_import(struct obd_import *imp) { - struct list_head *tmp, *n; - struct ptlrpc_request *req; + struct ptlrpc_request *req, *n; time64_t timeout; int rc; @@ -376,19 +372,15 @@ void ptlrpc_invalidate_import(struct obd_import *imp) * this point. */ rc = 1; } else { - list_for_each_safe(tmp, n, - &imp->imp_sending_list) { - req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + list_for_each_entry_safe(req, n, + &imp->imp_sending_list, + rq_list) { DEBUG_REQ(D_ERROR, req, "still on sending list"); } - list_for_each_safe(tmp, n, - &imp->imp_delayed_list) { - req = list_entry(tmp, - struct ptlrpc_request, - rq_list); + list_for_each_entry_safe(req, n, + &imp->imp_delayed_list, + rq_list) { DEBUG_REQ(D_ERROR, req, "still on delayed list"); } @@ -634,14 +626,13 @@ out_unlock: */ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) { - struct ptlrpc_request *req; - struct list_head *tmp; + struct ptlrpc_request *req; /* The requests in committed_list always have smaller transnos than * the requests in replay_list */ if (!list_empty(&imp->imp_committed_list)) { - tmp = imp->imp_committed_list.next; - req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + req = list_first_entry(&imp->imp_committed_list, + struct ptlrpc_request, rq_replay_list); *transno = req->rq_transno; if (req->rq_transno == 0) { DEBUG_REQ(D_ERROR, req, @@ -651,8 +642,8 @@ static int ptlrpc_first_transno(struct obd_import *imp, __u64 *transno) return 1; } if (!list_empty(&imp->imp_replay_list)) { - tmp = imp->imp_replay_list.next; - req = list_entry(tmp, struct ptlrpc_request, rq_replay_list); + req = list_first_entry(&imp->imp_committed_list, + struct ptlrpc_request, rq_replay_list); *transno = req->rq_transno; if (req->rq_transno == 0) { DEBUG_REQ(D_ERROR, req, "zero transno in replay_list"); diff --git a/lustre/ptlrpc/pinger.c b/lustre/ptlrpc/pinger.c index f6f536c..1c47ab7 100644 --- a/lustre/ptlrpc/pinger.c +++ b/lustre/ptlrpc/pinger.c @@ -281,17 +281,13 @@ static void ptlrpc_pinger_main(struct work_struct *ws) time64_t this_ping, time_after_ping; timeout_t time_to_next_wake; struct obd_import *imp; - struct list_head *iter; do { this_ping = ktime_get_seconds(); mutex_lock(&pinger_mutex); - list_for_each(iter, &pinger_imports) { - imp = list_entry(iter, struct obd_import, - imp_pinger_chain); - + list_for_each_entry(imp, &pinger_imports, imp_pinger_chain) { ptlrpc_pinger_process_import(imp, this_ping); /* obd_timeout might have changed */ if (imp->imp_pingable && imp->imp_next_ping && diff --git a/lustre/ptlrpc/ptlrpcd.c b/lustre/ptlrpc/ptlrpcd.c index 42cb328..11803a8 100644 --- a/lustre/ptlrpc/ptlrpcd.c +++ b/lustre/ptlrpc/ptlrpcd.c @@ -240,17 +240,15 @@ void ptlrpcd_add_rqset(struct ptlrpc_request_set *set) static int ptlrpcd_steal_rqset(struct ptlrpc_request_set *des, struct ptlrpc_request_set *src) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; + struct ptlrpc_request *req, *tmp; int rc = 0; spin_lock(&src->set_new_req_lock); if (likely(!list_empty(&src->set_new_requests))) { - list_for_each_safe(pos, tmp, &src->set_new_requests) { - req = list_entry(pos, struct ptlrpc_request, - rq_set_chain); + list_for_each_entry_safe(req, tmp, &src->set_new_requests, + rq_set_chain) req->rq_set = des; - } + list_splice_init(&src->set_new_requests, &des->set_requests); rc = atomic_read(&src->set_new_count); @@ -318,8 +316,7 @@ static inline void ptlrpc_reqset_get(struct ptlrpc_request_set *set) */ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; + struct ptlrpc_request *req, *tmp; struct ptlrpc_request_set *set = pc->pc_set; int rc = 0; int rc2; @@ -369,8 +366,7 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc) * NB: ptlrpc_check_set has already moved complted request at the * head of seq::set_requests */ - list_for_each_safe(pos, tmp, &set->set_requests) { - req = list_entry(pos, struct ptlrpc_request, rq_set_chain); + list_for_each_entry_safe(req, tmp, &set->set_requests, rq_set_chain) { if (req->rq_phase != RQ_PHASE_COMPLETE) break; diff --git a/lustre/ptlrpc/recover.c b/lustre/ptlrpc/recover.c index d2da4a4..e5fae61 100644 --- a/lustre/ptlrpc/recover.c +++ b/lustre/ptlrpc/recover.c @@ -67,17 +67,16 @@ void ptlrpc_initiate_recovery(struct obd_import *imp) */ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) { - int rc = 0; - struct list_head *tmp, *pos; - struct ptlrpc_request *req = NULL; - __u64 last_transno; - ENTRY; + int rc = 0; + struct ptlrpc_request *req = NULL, *pos; + __u64 last_transno; + ENTRY; - *inflight = 0; + *inflight = 0; - /* It might have committed some after we last spoke, so make sure we - * get rid of them now. - */ + /* It might have committed some after we last spoke, so make sure we + * get rid of them now. + */ spin_lock(&imp->imp_lock); imp->imp_last_transno_checked = 0; ptlrpc_free_committed(imp); @@ -89,9 +88,8 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* Replay all the committed open requests on committed_list first */ if (!list_empty(&imp->imp_committed_list)) { - tmp = imp->imp_committed_list.prev; - req = list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + req = list_last_entry(&imp->imp_committed_list, + struct ptlrpc_request, rq_replay_list); /* The last request on committed_list hasn't been replayed */ if (req->rq_transno > last_transno) { @@ -103,8 +101,8 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) while (imp->imp_replay_cursor != &imp->imp_committed_list) { req = list_entry(imp->imp_replay_cursor, - struct ptlrpc_request, - rq_replay_list); + struct ptlrpc_request, + rq_replay_list); if (req->rq_transno > last_transno) break; @@ -123,13 +121,14 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) /* All the requests in committed list have been replayed, let's replay * the imp_replay_list */ if (req == NULL) { - list_for_each_safe(tmp, pos, &imp->imp_replay_list) { - req = list_entry(tmp, struct ptlrpc_request, - rq_replay_list); + struct ptlrpc_request *tmp; - if (req->rq_transno > last_transno) + list_for_each_entry_safe(tmp, pos, &imp->imp_replay_list, + rq_replay_list) { + if (tmp->rq_transno > last_transno) { + req = tmp; break; - req = NULL; + } } } @@ -159,8 +158,8 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight) rc = ptlrpc_replay_req(req); if (rc) { - CERROR("recovery replay error %d for req " - "%llu\n", rc, req->rq_xid); + CERROR("recovery replay error %d for req %llu\n", + rc, req->rq_xid); RETURN(rc); } *inflight = 1; @@ -213,13 +212,10 @@ int ptlrpc_resend(struct obd_import *imp) */ void ptlrpc_wake_delayed(struct obd_import *imp) { - struct list_head *tmp, *pos; - struct ptlrpc_request *req; + struct ptlrpc_request *req, *pos; spin_lock(&imp->imp_lock); - list_for_each_safe(tmp, pos, &imp->imp_delayed_list) { - req = list_entry(tmp, struct ptlrpc_request, rq_list); - + list_for_each_entry_safe(req, pos, &imp->imp_delayed_list, rq_list) { DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); ptlrpc_client_wake_req(req); } diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index 05514cf..49cdbba 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -905,8 +905,6 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; struct ptlrpc_service *svc = svcpt->scp_service; int refcount; - struct list_head *tmp; - struct list_head *nxt; if (!atomic_dec_and_test(&req->rq_refcount)) return; @@ -961,9 +959,7 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) * remove rqbd's reqs from svc's req history while * I've got the service lock */ - list_for_each(tmp, &rqbd->rqbd_reqs) { - req = list_entry(tmp, struct ptlrpc_request, - rq_list); + list_for_each_entry(req, &rqbd->rqbd_reqs, rq_list) { /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) { @@ -975,10 +971,9 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) spin_unlock(&svcpt->scp_lock); - list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { - req = list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); + while ((req = list_first_entry_or_null( + &rqbd->rqbd_reqs, + struct ptlrpc_request, rq_list))) { list_del(&req->rq_list); ptlrpc_server_free_request(req); } -- 1.8.3.1