Some lock dances of service at_lock are unnecessary,
we made some cleanup in this patch to reduce useless lock/unlock.
Signed-off-by: Liang Zhen <liang@whamcloud.com>
Change-Id: I004729692254d1200a8b18c0c4494ff437233caf
Reviewed-on: http://review.whamcloud.com/2911
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: wangdi <di.wang@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
if (!cfs_atomic_dec_and_test(&req->rq_refcount))
return;
if (!cfs_atomic_dec_and_test(&req->rq_refcount))
return;
- cfs_spin_lock(&svcpt->scp_at_lock);
if (req->rq_at_linked) {
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
__u32 index = req->rq_at_index;
if (req->rq_at_linked) {
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
__u32 index = req->rq_at_index;
+ cfs_spin_lock(&svcpt->scp_at_lock);
+
LASSERT(!cfs_list_empty(&req->rq_timed_list));
cfs_list_del_init(&req->rq_timed_list);
cfs_spin_lock(&req->rq_lock);
LASSERT(!cfs_list_empty(&req->rq_timed_list));
cfs_list_del_init(&req->rq_timed_list);
cfs_spin_lock(&req->rq_lock);
cfs_spin_unlock(&req->rq_lock);
array->paa_reqs_count[index]--;
array->paa_count--;
cfs_spin_unlock(&req->rq_lock);
array->paa_reqs_count[index]--;
array->paa_count--;
- } else
- LASSERT(cfs_list_empty(&req->rq_timed_list));
- cfs_spin_unlock(&svcpt->scp_at_lock);
+ cfs_spin_unlock(&svcpt->scp_at_lock);
+ } else {
+ LASSERT(cfs_list_empty(&req->rq_timed_list));
+ }
/* finalize request */
if (req->rq_export) {
/* finalize request */
if (req->rq_export) {
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
__s32 next;
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
__s32 next;
- cfs_spin_lock(&svcpt->scp_at_lock);
if (array->paa_count == 0) {
cfs_timer_disarm(&svcpt->scp_at_timer);
if (array->paa_count == 0) {
cfs_timer_disarm(&svcpt->scp_at_timer);
- cfs_spin_unlock(&svcpt->scp_at_lock);
CDEBUG(D_INFO, "armed %s at %+ds\n",
svcpt->scp_service->srv_name, next);
}
CDEBUG(D_INFO, "armed %s at %+ds\n",
svcpt->scp_service->srv_name, next);
}
- cfs_spin_unlock(&svcpt->scp_at_lock);
}
/* Add rpc to early reply check list */
}
/* Add rpc to early reply check list */
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq = NULL;
__u32 index;
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq = NULL;
__u32 index;
array->paa_count++;
if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
array->paa_deadline = req->rq_deadline;
array->paa_count++;
if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) {
array->paa_deadline = req->rq_deadline;
- found = 1;
- }
- cfs_spin_unlock(&svcpt->scp_at_lock);
-
- if (found)
ptlrpc_at_set_timer(svcpt);
ptlrpc_at_set_timer(svcpt);
+ }
+ cfs_spin_unlock(&svcpt->scp_at_lock);
first = array->paa_deadline - now;
if (first > at_early_margin) {
/* We've still got plenty of time. Reset the timer. */
first = array->paa_deadline - now;
if (first > at_early_margin) {
/* We've still got plenty of time. Reset the timer. */
- cfs_spin_unlock(&svcpt->scp_at_lock);
ptlrpc_at_set_timer(svcpt);
ptlrpc_at_set_timer(svcpt);
+ cfs_spin_unlock(&svcpt->scp_at_lock);
+ RETURN(0);
+ }
/* We're close to a timeout, and we don't know how much longer the
server will take. Send early replies to everyone expiring soon. */
/* We're close to a timeout, and we don't know how much longer the
server will take. Send early replies to everyone expiring soon. */
index = 0;
}
array->paa_deadline = deadline;
index = 0;
}
array->paa_deadline = deadline;
- cfs_spin_unlock(&svcpt->scp_at_lock);
-
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svcpt);
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svcpt);
+ cfs_spin_unlock(&svcpt->scp_at_lock);
+
CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
"replies\n", first, at_extra, counter);
if (first < 0) {
CDEBUG(D_ADAPTTO, "timeout in %+ds, asking for %d secs on %d early "
"replies\n", first, at_extra, counter);
if (first < 0) {