Rather than using a counter to decide when to drop the lock and see if
we need to reshedule we can use need_resched(), which is a precise
test instead of a guess.
Test-Parameters: trivial testlist=sanity-lnet
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: If13871a4a4c57ca87cbb1e22af85cb7fd24ab006
Reviewed-on: https://review.whamcloud.com/39128
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Chris Horn <chris.horn@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
#define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
#define SOCKNAL_NSCHEDS_HIGH (SOCKNAL_NSCHEDS << 1)
#define SOCKNAL_PEER_HASH_BITS 7 /* log2 of # peer_ni lists */
-#define SOCKNAL_RESCHED 100 /* # scheduler loops before reschedule */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
#define SOCKNAL_INSANITY_RECONN 5000 /* connd is trying on reconn infinitely */
#define SOCKNAL_ENOMEM_RETRY 1 /* seconds between retries */
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
struct ksock_conn *conn;
struct ksock_tx *tx;
int rc;
long id = (long)arg;
struct page **rx_scratch_pgs;
struct kvec *scratch_iov;
long id = (long)arg;
struct page **rx_scratch_pgs;
struct kvec *scratch_iov;
- if (!did_something || /* nothing to do */
- ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+ if (!did_something || /* nothing to do */
+ need_resched()) { /* hogging CPU? */
spin_unlock_bh(&sched->kss_lock);
spin_unlock_bh(&sched->kss_lock);
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
if (!did_something) { /* wait for something to do */
rc = wait_event_interruptible_exclusive(
sched->kss_waitq,
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
struct ksock_connreq *cr;
wait_queue_entry_t wait;
int cons_retry = 0;
init_waitqueue_entry(&wait, current);
int cons_retry = 0;
init_waitqueue_entry(&wait, current);
- if (++nloops < SOCKNAL_RESCHED)
continue;
spin_unlock_bh(connd_lock);
continue;
spin_unlock_bh(connd_lock);
cond_resched();
spin_lock_bh(connd_lock);
continue;
cond_resched();
spin_lock_bh(connd_lock);
continue;
add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_unlock_bh(connd_lock);
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
schedule_timeout(timeout);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);