Whamcloud - gitweb
LU-12678 socklnd: use need_resched() 28/39128/2
authorMr NeilBrown <neilb@suse.de>
Sun, 7 Jun 2020 23:24:29 +0000 (19:24 -0400)
committerOleg Drokin <green@whamcloud.com>
Sat, 4 Jul 2020 03:05:33 +0000 (03:05 +0000)
Rather than using a counter to decide when to drop the lock and see if
we need to reshedule we can use need_resched(), which is a precise
test instead of a guess.

Test-Parameters: trivial testlist=sanity-lnet
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: If13871a4a4c57ca87cbb1e22af85cb7fd24ab006
Reviewed-on: https://review.whamcloud.com/39128
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: Chris Horn <chris.horn@hpe.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lnet/klnds/socklnd/socklnd.h
lnet/klnds/socklnd/socklnd_cb.c

index abd733b..b5b3425 100644 (file)
@@ -64,7 +64,6 @@
 #define SOCKNAL_NSCHEDS_HIGH   (SOCKNAL_NSCHEDS << 1)
 
 #define SOCKNAL_PEER_HASH_BITS 7       /* log2 of # peer_ni lists */
-#define SOCKNAL_RESCHED                100     /* # scheduler loops before reschedule */
 #define SOCKNAL_INSANITY_RECONN        5000    /* connd is trying on reconn infinitely */
 #define SOCKNAL_ENOMEM_RETRY   1       /* seconds between retries */
 
index aa9f747..a7441fa 100644 (file)
@@ -1446,7 +1446,6 @@ int ksocknal_scheduler(void *arg)
        struct ksock_conn *conn;
        struct ksock_tx *tx;
        int rc;
-       int nloops = 0;
        long id = (long)arg;
        struct page **rx_scratch_pgs;
        struct kvec *scratch_iov;
@@ -1592,12 +1591,10 @@ int ksocknal_scheduler(void *arg)
 
                        did_something = 1;
                }
-               if (!did_something ||           /* nothing to do */
-                   ++nloops == SOCKNAL_RESCHED) { /* hogging CPU? */
+               if (!did_something ||   /* nothing to do */
+                   need_resched()) {   /* hogging CPU? */
                        spin_unlock_bh(&sched->kss_lock);
 
-                       nloops = 0;
-
                        if (!did_something) {   /* wait for something to do */
                                rc = wait_event_interruptible_exclusive(
                                        sched->kss_waitq,
@@ -2196,7 +2193,6 @@ ksocknal_connd(void *arg)
        spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
        struct ksock_connreq *cr;
        wait_queue_entry_t wait;
-       int nloops = 0;
        int cons_retry = 0;
 
        init_waitqueue_entry(&wait, current);
@@ -2273,10 +2269,9 @@ ksocknal_connd(void *arg)
                }
 
                if (dropped_lock) {
-                       if (++nloops < SOCKNAL_RESCHED)
+                       if (!need_resched())
                                continue;
                        spin_unlock_bh(connd_lock);
-                       nloops = 0;
                        cond_resched();
                        spin_lock_bh(connd_lock);
                        continue;
@@ -2287,7 +2282,6 @@ ksocknal_connd(void *arg)
                add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_unlock_bh(connd_lock);
 
-               nloops = 0;
                schedule_timeout(timeout);
 
                remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);