Whamcloud - gitweb
LU-12930 various: use schedule_timeout_*interruptible 56/36656/6
authorMr NeilBrown <neilb@suse.de>
Mon, 4 Nov 2019 01:05:32 +0000 (12:05 +1100)
committerOleg Drokin <green@whamcloud.com>
Fri, 14 Feb 2020 05:50:24 +0000 (05:50 +0000)
The construct:

  set_current_state(TASK_UNINTERRUPTIBLE);
  schedule_timeout(time);

Is more clearly expressed as

  schedule_timeout_uninterruptible(time);

And similarly with TASK_INTERRUPTIBLE /
schedule_timeout_interruptible()

Establishing this practice makes it harder to forget to call
set_current_state() as has happened a couple of times - in
lnet_peer_discovery and mdd_changelog_fini().

Also, there is no need to set_current_state(TASK_RUNNABLE) after
calling schedule*().  That state is guaranteed to have been set.

In mdd_changelog_fini() there was an attempt to sleep for
10 microseconds.  This will always round up to 1 jiffy, so
just make it schedule_timeout_uninterruptible(1).

Finally a few places where the number of seconds was multiplied
by 1, have had the '1 *' removed.

Test-Parameters: trivial
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I01b37039de0bf7e07480de372c1a4cfe78a8cdd8
Reviewed-on: https://review.whamcloud.com/36656
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
36 files changed:
libcfs/libcfs/fail.c
libcfs/libcfs/tracefile.c
libcfs/libcfs/workitem.c
lnet/klnds/gnilnd/gnilnd.c
lnet/klnds/gnilnd/gnilnd_stack.c
lnet/klnds/gnilnd/gnilnd_sysctl.c
lnet/klnds/o2iblnd/o2iblnd.c
lnet/klnds/o2iblnd/o2iblnd_cb.c
lnet/klnds/socklnd/socklnd.c
lnet/klnds/socklnd/socklnd_cb.c
lnet/lnet/acceptor.c
lnet/lnet/api-ni.c
lnet/lnet/peer.c
lnet/lnet/router.c
lnet/selftest/conrpc.c
lnet/selftest/rpc.c
lnet/selftest/selftest.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_resource.c
lustre/lfsck/lfsck_lib.c
lustre/llite/llite_lib.c
lustre/lov/lov_io.c
lustre/mdc/mdc_changelog.c
lustre/mdd/mdd_device.c
lustre/mdt/mdt_handler.c
lustre/obdclass/genops.c
lustre/obdclass/obd_mount_server.c
lustre/obdecho/echo_client.c
lustre/osp/osp_internal.h
lustre/osp/osp_sync.c
lustre/ptlrpc/gss/gss_svc_upcall.c
lustre/ptlrpc/sec.c
lustre/quota/lquota_entry.c
lustre/quota/qsd_handler.c
lustre/quota/qsd_lib.c

index 304faae..0478f4a 100644 (file)
@@ -129,8 +129,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
        if (ret && likely(ms > 0)) {
                CERROR("cfs_fail_timeout id %x sleeping for %dms\n", id, ms);
                while (ktime_before(ktime_get(), till)) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1) / 10);
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1)
+                                                        / 10);
                        set_current_state(TASK_RUNNING);
                        if (!cfs_fail_loc) {
                                CERROR("cfs_fail_timeout interrupted\n");
index 0fe7c7f..555a5bb 100644 (file)
@@ -1002,8 +1002,7 @@ end_loop:
                }
                init_waitqueue_entry(&__wait, current);
                add_wait_queue(&tctl->tctl_waitq, &__wait);
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
                remove_wait_queue(&tctl->tctl_waitq, &__wait);
         }
        complete(&tctl->tctl_stop);
index 7768e5c..be11e32 100644 (file)
@@ -315,8 +315,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched)
                               i / 20, sched->ws_nthreads, sched->ws_name);
 
                        spin_unlock(&cfs_wi_data.wi_glock);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1) / 20);
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1)
+                                                        / 20);
                        spin_lock(&cfs_wi_data.wi_glock);
                }
        }
@@ -446,8 +446,8 @@ cfs_wi_shutdown (void)
 
                while (sched->ws_nthreads != 0) {
                        spin_unlock(&cfs_wi_data.wi_glock);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1) / 20);
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1)
+                                                        / 20);
                        spin_lock(&cfs_wi_data.wi_glock);
                }
                spin_unlock(&cfs_wi_data.wi_glock);
index 8b5bc2e..14fb5d6 100644 (file)
@@ -1598,8 +1598,7 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command,
               atomic_read(&kgnilnd_data.kgn_npending_detach)  ||
               atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
 
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
                i++;
 
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
@@ -2464,8 +2463,7 @@ kgnilnd_base_shutdown(void)
 
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                        "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
        /* Peer state all cleaned up BEFORE setting shutdown, so threads don't
         * have to worry about shutdown races.  NB connections may be created
@@ -2484,8 +2482,7 @@ kgnilnd_base_shutdown(void)
                i++;
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                       "Waiting for ruhroh thread to terminate\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 
        /* Flag threads to terminate */
@@ -2517,8 +2514,7 @@ kgnilnd_base_shutdown(void)
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                       "Waiting for %d threads to terminate\n",
                       atomic_read(&kgnilnd_data.kgn_nthreads));
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 
        LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
@@ -2769,8 +2765,7 @@ kgnilnd_shutdown(struct lnet_ni *ni)
                                "Waiting for %d references to clear on net %d\n",
                                atomic_read(&net->gnn_refcount),
                                net->gnn_netnum);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                }
 
                /* release ref from kgnilnd_startup */
index bdec685..456d181 100644 (file)
@@ -133,8 +133,7 @@ kgnilnd_quiesce_wait(char *reason)
                                 atomic_read(&kgnilnd_data.kgn_nthreads) -
                                 atomic_read(&kgnilnd_data.kgn_nquiesce));
                        CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1 * i));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(i));
 
                        LASSERTF(quiesce_deadline > jiffies,
                                 "couldn't quiesce threads in %lu seconds, falling over now\n",
@@ -159,8 +158,7 @@ kgnilnd_quiesce_wait(char *reason)
                                 "%s: Waiting for %d threads to wake up\n",
                                  reason,
                                  atomic_read(&kgnilnd_data.kgn_nquiesce));
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1 * i));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(i));
                }
 
                CDEBUG(D_INFO, "%s: All threads awake!\n", reason);
@@ -422,8 +420,8 @@ kgnilnd_ruhroh_thread(void *arg)
                                i++;
                                CDEBUG(D_INFO, "Waiting for hardware quiesce "
                                               "flag to clear\n");
-                               set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(1 * i));
+                               schedule_timeout_uninterruptible(
+                                       cfs_time_seconds(i));
 
                                /* If we got a quiesce event with bump info, DO THE BUMP!. */
                                if (kgnilnd_data.kgn_bump_info_rdy) {
index 56241b1..4b472f1 100644 (file)
@@ -133,8 +133,7 @@ proc_trigger_stack_reset(struct ctl_table *table, int write,
                i++;
                LCONSOLE((((i) & (-i)) == i) ? D_WARNING : D_NET,
                                "Waiting for stack reset request to clear\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1 * i));
+               schedule_timeout_uninterruptible(cfs_time_seconds(i));
        }
 
        RETURN(rc);
index 68de736..aaa9ad3 100644 (file)
@@ -1368,8 +1368,7 @@ kiblnd_current_hdev(struct kib_dev *dev)
                if (i++ % 50 == 0)
                        CDEBUG(D_NET, "%s: Wait for failover\n",
                               dev->ibd_ifname);
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 100);
+               schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
 
                read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
        }
@@ -2177,8 +2176,7 @@ again:
                       "trips = %d\n",
                       ps->ps_name, interval, trips);
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(interval);
+               schedule_timeout_interruptible(interval);
                if (interval < cfs_time_seconds(1))
                        interval *= 2;
 
@@ -2989,8 +2987,7 @@ kiblnd_base_shutdown(void)
                        CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
                               "Waiting for %d threads to terminate\n",
                               atomic_read(&kiblnd_data.kib_nthreads));
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                }
 
                 /* fall through */
@@ -3052,8 +3049,7 @@ kiblnd_shutdown(struct lnet_ni *ni)
                               "%s: waiting for %d peers to disconnect\n",
                               libcfs_nid2str(ni->ni_nid),
                               atomic_read(&net->ibn_npeers));
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                }
 
                kiblnd_net_fini_pools(net);
index a370ed5..674f66c 100644 (file)
@@ -3629,7 +3629,6 @@ kiblnd_connd (void *arg)
 
                schedule_timeout(timeout);
 
-               set_current_state(TASK_RUNNING);
                remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
                spin_lock_irqsave(lock, flags);
        }
index 17115c1..96c16fd 100644 (file)
@@ -2216,8 +2216,7 @@ ksocknal_base_shutdown(void)
                                "waiting for %d threads to terminate\n",
                                ksocknal_data.ksnd_nthreads);
                        read_unlock(&ksocknal_data.ksnd_global_lock);
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                        read_lock(&ksocknal_data.ksnd_global_lock);
                }
                read_unlock(&ksocknal_data.ksnd_global_lock);
@@ -2425,8 +2424,7 @@ ksocknal_shutdown(struct lnet_ni *ni)
                CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
                       "waiting for %d peers to disconnect\n",
                       atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
 
                ksocknal_debug_peerhash(ni);
        }
index 134aaff..bf62b22 100644 (file)
@@ -194,10 +194,9 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx,
        int     rc;
        int     bufnob;
 
-       if (ksocknal_data.ksnd_stall_tx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
-       }
+       if (ksocknal_data.ksnd_stall_tx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
 
        LASSERT(tx->tx_resid != 0);
 
@@ -354,10 +353,9 @@ ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs,
        int     rc;
        ENTRY;
 
-       if (ksocknal_data.ksnd_stall_rx != 0) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
-       }
+       if (ksocknal_data.ksnd_stall_rx != 0)
+               schedule_timeout_uninterruptible(
+                       cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
 
        rc = ksocknal_connsock_addref(conn);
        if (rc != 0) {
@@ -2315,7 +2313,6 @@ ksocknal_connd(void *arg)
                nloops = 0;
                schedule_timeout(timeout);
 
-               set_current_state(TASK_RUNNING);
                remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
                spin_lock_bh(connd_lock);
        }
index 94d16fa..3659d0c 100644 (file)
@@ -377,8 +377,8 @@ lnet_acceptor(void *arg)
                if (rc != 0) {
                        if (rc != -EAGAIN) {
                                CWARN("Accept error %d: pausing...\n", rc);
-                               set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(1));
+                               schedule_timeout_uninterruptible(
+                                       cfs_time_seconds(1));
                        }
                        continue;
                }
index f09bc5f..d39e6b9 100644 (file)
@@ -1749,8 +1749,7 @@ lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf,
        /* NB the MD could be busy; this just starts the unlink */
        while (atomic_read(&pbuf->pb_refcnt) > 1) {
                CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 
        cfs_restore_sigs(blocked);
@@ -1983,8 +1982,7 @@ static void lnet_push_target_fini(void)
        /* Wait for the unlink to complete. */
        while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) {
                CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 
        lnet_ping_buffer_decref(the_lnet.ln_push_target);
@@ -2060,8 +2058,7 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net)
                                       "Waiting for zombie LNI %s\n",
                                       libcfs_nid2str(ni->ni_nid));
                        }
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
+                       schedule_timeout_uninterruptible(cfs_time_seconds(1));
                        lnet_net_lock(LNET_LOCK_EX);
                        continue;
                }
index 4e6a3e5..8570b5d 100644 (file)
@@ -587,8 +587,7 @@ lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable)
                               "Waiting for %d zombies on peer table\n",
                               ptable->pt_zombies);
                }
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) >> 1);
+               schedule_timeout_uninterruptible(cfs_time_seconds(1) >> 1);
                spin_lock(&ptable->pt_zombie_lock);
        }
        spin_unlock(&ptable->pt_zombie_lock);
@@ -3389,7 +3388,7 @@ static int lnet_peer_discovery(void *arg)
 
        /* Queue cleanup 2: wait for the expired queue to clear. */
        while (!list_empty(&the_lnet.ln_dc_expired))
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
 
        /* Queue cleanup 3: clear the request queue. */
        lnet_net_lock(LNET_LOCK_EX);
index cae8da6..4ab65f7 100644 (file)
@@ -940,8 +940,7 @@ lnet_wait_known_routerstate(void)
                if (all_known)
                        return;
 
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
        }
 }
 
index 268b2ff..8efacc0 100644 (file)
@@ -1349,8 +1349,7 @@ lstcon_rpc_cleanup_wait(void)
 
                CWARN("Session is shutting down, "
                      "waiting for termination of transactions\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
 
                mutex_lock(&console_session.ses_mutex);
        }
index 47a89fa..b3ef75a 100644 (file)
@@ -1608,8 +1608,7 @@ srpc_startup (void)
        spin_lock_init(&srpc_data.rpc_glock);
 
        /* 1 second pause to avoid timestamp reuse */
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       schedule_timeout(cfs_time_seconds(1));
+       schedule_timeout_uninterruptible(cfs_time_seconds(1));
        srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48;
 
        srpc_data.rpc_state = SRPC_STATE_NONE;
index 1a39cd7..a1574b2 100644 (file)
@@ -578,8 +578,8 @@ do {                                                                        \
                       fmt, ## __VA_ARGS__);                            \
                spin_unlock(&(lock));                                   \
                                                                        \
-               set_current_state(TASK_UNINTERRUPTIBLE);                \
-               schedule_timeout(cfs_time_seconds(1) / 10);             \
+               schedule_timeout_uninterruptible(                       \
+                       cfs_time_seconds(1) / 10);                      \
                                                                        \
                spin_lock(&(lock));                                     \
        }                                                               \
@@ -597,8 +597,7 @@ srpc_wait_service_shutdown(struct srpc_service *sv)
                CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
                       "Waiting for %s service to shutdown...\n",
                       sv->sv_name);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 10);
+               schedule_timeout_uninterruptible(cfs_time_seconds(1) / 10);
        }
 }
 
index feacd50..87a04f7 100644 (file)
@@ -2861,8 +2861,8 @@ int target_queue_recovery_request(struct ptlrpc_request *req,
                                cfs_fail_val = 0;
                                wake_up(&cfs_race_waitq);
 
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(1));
+                               schedule_timeout_interruptible(
+                                       cfs_time_seconds(1));
                        }
                }
 
index 5c7088a..178976b 100644 (file)
@@ -1921,8 +1921,7 @@ static int ldlm_handle_cp_callback(struct ptlrpc_request *req,
                ldlm_callback_reply(req, 0);
 
                while (to > 0) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(to);
+                       schedule_timeout_interruptible(to);
                        if (ldlm_is_granted(lock) ||
                            ldlm_is_destroyed(lock))
                                break;
index 5b2ce0c..490e7c5 100644 (file)
@@ -1056,11 +1056,10 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
                         */
                        unlock_res(res);
                        LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
-                       if (lock->l_flags & LDLM_FL_FAIL_LOC) {
-                               set_current_state(TASK_UNINTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(4));
-                               set_current_state(TASK_RUNNING);
-                       }
+                       if (lock->l_flags & LDLM_FL_FAIL_LOC)
+                               schedule_timeout_uninterruptible(
+                                       cfs_time_seconds(4));
+
                        if (lock->l_completion_ast)
                                lock->l_completion_ast(lock,
                                                       LDLM_FL_FAILED, NULL);
index 72be4f5..9fe3a73 100644 (file)
@@ -3054,8 +3054,7 @@ again:
 
        if (unlikely(rc == -EINPROGRESS)) {
                retry = true;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
                set_current_state(TASK_RUNNING);
                if (!signal_pending(current) &&
                    thread_is_running(&lfsck->li_thread))
index e8e240b..e384d64 100644 (file)
@@ -807,10 +807,9 @@ void ll_kill_super(struct super_block *sb)
                sb->s_dev = sbi->ll_sdev_orig;
 
                /* wait running statahead threads to quit */
-               while (atomic_read(&sbi->ll_sa_running) > 0) {
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1) >> 3);
-               }
+               while (atomic_read(&sbi->ll_sa_running) > 0)
+                       schedule_timeout_uninterruptible(
+                               cfs_time_seconds(1) >> 3);
        }
 
        EXIT;
index e699603..92c84d9 100644 (file)
@@ -438,8 +438,7 @@ static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj,
         */
        if (io->ci_ndelay && io->ci_ndelay_tried > 0 &&
            (io->ci_ndelay_tried % comp->lo_mirror_count == 0)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 100); /* 10ms */
+               schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
                if (signal_pending(current))
                        RETURN(-EINTR);
 
index 5486047..fb9fa54 100644 (file)
@@ -299,7 +299,7 @@ again:
                llog_cat_close(NULL, llh);
                llog_ctxt_put(ctx);
                class_decref(obd, "changelog", crs);
-               schedule_timeout_interruptible(HZ);
+               schedule_timeout_interruptible(cfs_time_seconds(1));
                goto again;
        }
 
index a00a240..acb5d37 100644 (file)
@@ -616,7 +616,8 @@ again:
                         * and to have set mc_gc_task to itself
                         */
                        spin_unlock(&mdd->mdd_cl.mc_lock);
-                       schedule_timeout(usecs_to_jiffies(10));
+                       /* Add a tiny sleep */
+                       schedule_timeout_uninterruptible(1);
                        /* go back to fully check if GC-thread has started or
                         * even already exited or if a new one is starting...
                         */
index b2b29fd..17a5e44 100644 (file)
@@ -3796,10 +3796,8 @@ static int mdt_tgt_connect(struct tgt_session_info *tsi)
 {
        if (OBD_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) &&
            cfs_fail_val ==
-           tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(3));
-       }
+           tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id)
+               schedule_timeout_uninterruptible(cfs_time_seconds(3));
 
        return tgt_connect(tsi);
 }
index aca1fec..092ed77 100644 (file)
@@ -1773,8 +1773,7 @@ void obd_exports_barrier(struct obd_device *obd)
        spin_lock(&obd->obd_dev_lock);
        while (!list_empty(&obd->obd_unlinked_exports)) {
                spin_unlock(&obd->obd_dev_lock);
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(waited));
+               schedule_timeout_uninterruptible(cfs_time_seconds(waited));
                if (waited > 5 && is_power_of_2(waited)) {
                        LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
                                      "more than %d seconds. "
index d3ec2f8..d3854db 100644 (file)
@@ -476,8 +476,7 @@ void lustre_deregister_lwp_item(struct obd_export **exp)
                CDEBUG(D_MOUNT, "lri reference count %u, repeat: %d\n",
                       atomic_read(&lri->lri_ref), repeat);
                repeat++;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
        }
        lustre_put_lwp_item(lri);
 }
@@ -1265,9 +1264,8 @@ again:
                        if ((rc == -ESHUTDOWN || rc == -EIO) && ++tried < 5) {
                                /* The connection with MGS is not established.
                                 * Try again after 2 seconds. Interruptable. */
-                               set_current_state(TASK_INTERRUPTIBLE);
-                               schedule_timeout(cfs_time_seconds(2));
-                               set_current_state(TASK_RUNNING);
+                               schedule_timeout_interruptible(
+                                       cfs_time_seconds(2));
                                if (!signal_pending(current))
                                        goto again;
                        }
index 7609490..b54261b 100644 (file)
@@ -1079,8 +1079,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env,
                spin_unlock(&ec->ec_lock);
                CERROR(
                       "echo_client still has objects at cleanup time, wait for 1 second\n");
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_uninterruptible(cfs_time_seconds(1));
                lu_site_purge(env, ed->ed_site, -1);
                spin_lock(&ec->ec_lock);
        }
index 93c4e61..c78694d 100644 (file)
@@ -560,7 +560,7 @@ static inline void osp_get_rpc_lock(struct osp_device *osp)
         */
        if (unlikely(lck->rpcl_fakes)) {
                mutex_unlock(&lck->rpcl_mutex);
-               schedule_timeout(cfs_time_seconds(1) / 4);
+               schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
 
                goto again;
        }
index 6e430f8..1e82b9c 100644 (file)
@@ -1295,7 +1295,7 @@ next:
                        rc = llog_cleanup(&env, ctxt);
                        if (rc)
                                GOTO(out, rc);
-                       schedule_timeout_interruptible(HZ * 5);
+                       schedule_timeout_interruptible(cfs_time_seconds(5));
                        goto again;
                }
 
index 1c643ec..4b82735 100644 (file)
@@ -1139,8 +1139,7 @@ int __init gss_init_svc_upcall(void)
        for (i = 0; i < 6; i++) {
                if (channel_users(&rsi_cache) > 0)
                        break;
-               set_current_state(TASK_UNINTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1) / 4);
+               schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
        }
 
        if (channel_users(&rsi_cache) == 0)
index a11ab09..5ead685 100644 (file)
@@ -589,8 +589,7 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
                       "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
                       newctx, newctx->cc_flags);
 
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
        } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
                            == 0)) {
                /*
index ee73c16..e94cd66 100644 (file)
@@ -176,8 +176,7 @@ retry:
                        "freed:%lu, repeat:%u\n", hash,
                        d.lid_inuse, d.lid_freed, repeat);
                repeat++;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
                goto retry;
        }
        EXIT;
index 3fff49a..2119271 100644 (file)
@@ -654,10 +654,8 @@ static bool qsd_acquire(const struct lu_env *env, struct lquota_entry *lqe,
 
                /* if we have gotten some quota and stil wait more quota,
                 * it's better to give QMT some time to reclaim from clients */
-               if (count > 0) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       schedule_timeout(cfs_time_seconds(1));
-               }
+               if (count > 0)
+                       schedule_timeout_interruptible(cfs_time_seconds(1));
 
                /* need to acquire more quota space from master */
                rc = qsd_acquire_remote(env, lqe);
index cbdd74e..f50ca84 100644 (file)
@@ -369,8 +369,7 @@ static void qsd_qtype_fini(const struct lu_env *env, struct qsd_instance *qsd,
                CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n",
                       atomic_read(&qqi->qqi_ref), repeat);
                repeat++;
-               set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(cfs_time_seconds(1));
+               schedule_timeout_interruptible(cfs_time_seconds(1));
        }
 
        /* by now, all qqi users should have gone away */