From 3f848f85ba3d33030e903bbdf8a9a9f21cd47303 Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Sun, 7 Jun 2020 19:24:29 -0400 Subject: [PATCH] LU-12678 socklnd: use need_resched() Rather than using a counter to decide when to drop the lock and see if we need to reshedule we can use need_resched(), which is a precise test instead of a guess. Test-Parameters: trivial testlist=sanity-lnet Signed-off-by: Mr NeilBrown Change-Id: If13871a4a4c57ca87cbb1e22af85cb7fd24ab006 Reviewed-on: https://review.whamcloud.com/39128 Tested-by: jenkins Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Shaun Tancheff Reviewed-by: Chris Horn Reviewed-by: Oleg Drokin --- lnet/klnds/socklnd/socklnd.h | 1 - lnet/klnds/socklnd/socklnd_cb.c | 12 +++--------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/lnet/klnds/socklnd/socklnd.h b/lnet/klnds/socklnd/socklnd.h index abd733b..b5b3425 100644 --- a/lnet/klnds/socklnd/socklnd.h +++ b/lnet/klnds/socklnd/socklnd.h @@ -64,7 +64,6 @@ #define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1) #define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */ -#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */ #define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */ #define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */ diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index aa9f747..a7441fa 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -1446,7 +1446,6 @@ int ksocknal_scheduler(void *arg) struct ksock_conn *conn; struct ksock_tx *tx; int rc; - int nloops = 0; long id = (long)arg; struct page **rx_scratch_pgs; struct kvec *scratch_iov; @@ -1592,12 +1591,10 @@ int ksocknal_scheduler(void *arg) did_something = 1; } - if (!did_something || /* nothing to do */ - ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */ + if (!did_something || /* nothing to do */ + need_resched()) { /* hogging CPU? */ spin_unlock_bh(&sched->kss_lock); - nloops = 0; - if (!did_something) { /* wait for something to do */ rc = wait_event_interruptible_exclusive( sched->kss_waitq, @@ -2196,7 +2193,6 @@ ksocknal_connd(void *arg) spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock; struct ksock_connreq *cr; wait_queue_entry_t wait; - int nloops = 0; int cons_retry = 0; init_waitqueue_entry(&wait, current); @@ -2273,10 +2269,9 @@ ksocknal_connd(void *arg) } if (dropped_lock) { - if (++nloops < SOCKNAL_RESCHED) + if (!need_resched()) continue; spin_unlock_bh(connd_lock); - nloops = 0; cond_resched(); spin_lock_bh(connd_lock); continue; @@ -2287,7 +2282,6 @@ ksocknal_connd(void *arg) add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait); spin_unlock_bh(connd_lock); - nloops = 0; schedule_timeout(timeout); remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait); -- 1.8.3.1