From 5c883ea2748ae9e430a9cd863a9b630b2a74440a Mon Sep 17 00:00:00 2001 From: Mr NeilBrown Date: Mon, 4 Nov 2019 12:05:32 +1100 Subject: [PATCH 1/1] LU-12930 various: use schedule_timeout_*interruptible The construct: set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(time); Is more clearly expressed as schedule_timeout_uninterruptible(time); And similarly with TASK_INTERRUPTIBLE / schedule_timeout_interruptible() Establishing this practice makes it harder to forget to call set_current_state() as has happened a couple of times - in lnet_peer_discovery and mdd_changelog_fini(). Also, there is no need to set_current_state(TASK_RUNNABLE) after calling schedule*(). That state is guaranteed to have been set. In mdd_changelog_fini() there was an attempt to sleep for 10 microseconds. This will always round up to 1 jiffy, so just make it schedule_timeout_uninterruptible(1). Finally a few places where the number of seconds was multiplied by 1, have had the '1 *' removed. Test-Parameters: trivial Signed-off-by: Mr NeilBrown Change-Id: I01b37039de0bf7e07480de372c1a4cfe78a8cdd8 Reviewed-on: https://review.whamcloud.com/36656 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Shaun Tancheff Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- libcfs/libcfs/fail.c | 4 ++-- libcfs/libcfs/tracefile.c | 3 +-- libcfs/libcfs/workitem.c | 8 ++++---- lnet/klnds/gnilnd/gnilnd.c | 15 +++++---------- lnet/klnds/gnilnd/gnilnd_stack.c | 10 ++++------ lnet/klnds/gnilnd/gnilnd_sysctl.c | 3 +-- lnet/klnds/o2iblnd/o2iblnd.c | 12 ++++-------- lnet/klnds/o2iblnd/o2iblnd_cb.c | 1 - lnet/klnds/socklnd/socklnd.c | 6 ++---- lnet/klnds/socklnd/socklnd_cb.c | 15 ++++++--------- lnet/lnet/acceptor.c | 4 ++-- lnet/lnet/api-ni.c | 9 +++------ lnet/lnet/peer.c | 5 ++--- lnet/lnet/router.c | 3 +-- lnet/selftest/conrpc.c | 3 +-- lnet/selftest/rpc.c | 3 +-- lnet/selftest/selftest.h | 7 +++---- lustre/ldlm/ldlm_lib.c | 4 ++-- lustre/ldlm/ldlm_lockd.c | 3 +-- lustre/ldlm/ldlm_resource.c | 9 ++++----- lustre/lfsck/lfsck_lib.c | 3 +-- lustre/llite/llite_lib.c | 7 +++---- lustre/lov/lov_io.c | 3 +-- lustre/mdc/mdc_changelog.c | 2 +- lustre/mdd/mdd_device.c | 3 ++- lustre/mdt/mdt_handler.c | 6 ++---- lustre/obdclass/genops.c | 3 +-- lustre/obdclass/obd_mount_server.c | 8 +++----- lustre/obdecho/echo_client.c | 3 +-- lustre/osp/osp_internal.h | 2 +- lustre/osp/osp_sync.c | 2 +- lustre/ptlrpc/gss/gss_svc_upcall.c | 3 +-- lustre/ptlrpc/sec.c | 3 +-- lustre/quota/lquota_entry.c | 3 +-- lustre/quota/qsd_handler.c | 6 ++---- lustre/quota/qsd_lib.c | 3 +-- 36 files changed, 72 insertions(+), 115 deletions(-) diff --git a/libcfs/libcfs/fail.c b/libcfs/libcfs/fail.c index 304faae..0478f4a 100644 --- a/libcfs/libcfs/fail.c +++ b/libcfs/libcfs/fail.c @@ -129,8 +129,8 @@ int __cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set) if (ret && likely(ms > 0)) { CERROR("cfs_fail_timeout id %x sleeping for %dms\n", id, ms); while (ktime_before(ktime_get(), till)) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 10); + schedule_timeout_uninterruptible(cfs_time_seconds(1) + / 10); set_current_state(TASK_RUNNING); if (!cfs_fail_loc) { CERROR("cfs_fail_timeout interrupted\n"); diff --git a/libcfs/libcfs/tracefile.c b/libcfs/libcfs/tracefile.c index 0fe7c7f..555a5bb 100644 --- a/libcfs/libcfs/tracefile.c +++ b/libcfs/libcfs/tracefile.c @@ -1002,8 +1002,7 @@ end_loop: } init_waitqueue_entry(&__wait, current); add_wait_queue(&tctl->tctl_waitq, &__wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); remove_wait_queue(&tctl->tctl_waitq, &__wait); } complete(&tctl->tctl_stop); diff --git a/libcfs/libcfs/workitem.c b/libcfs/libcfs/workitem.c index 7768e5c..be11e32 100644 --- a/libcfs/libcfs/workitem.c +++ b/libcfs/libcfs/workitem.c @@ -315,8 +315,8 @@ cfs_wi_sched_destroy(struct cfs_wi_sched *sched) i / 20, sched->ws_nthreads, sched->ws_name); spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); + schedule_timeout_uninterruptible(cfs_time_seconds(1) + / 20); spin_lock(&cfs_wi_data.wi_glock); } } @@ -446,8 +446,8 @@ cfs_wi_shutdown (void) while (sched->ws_nthreads != 0) { spin_unlock(&cfs_wi_data.wi_glock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 20); + schedule_timeout_uninterruptible(cfs_time_seconds(1) + / 20); spin_lock(&cfs_wi_data.wi_glock); } spin_unlock(&cfs_wi_data.wi_glock); diff --git a/lnet/klnds/gnilnd/gnilnd.c b/lnet/klnds/gnilnd/gnilnd.c index 8b5bc2e..14fb5d6 100644 --- a/lnet/klnds/gnilnd/gnilnd.c +++ b/lnet/klnds/gnilnd/gnilnd.c @@ -1598,8 +1598,7 @@ kgnilnd_del_conn_or_peer(kgn_net_t *net, lnet_nid_t nid, int command, atomic_read(&kgnilnd_data.kgn_npending_detach) || atomic_read(&kgnilnd_data.kgn_npending_unlink)) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n", @@ -2464,8 +2463,7 @@ kgnilnd_base_shutdown(void) CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } /* Peer state all cleaned up BEFORE setting shutdown, so threads don't * have to worry about shutdown races. NB connections may be created @@ -2484,8 +2482,7 @@ kgnilnd_base_shutdown(void) i++; CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting for ruhroh thread to terminate\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } /* Flag threads to terminate */ @@ -2517,8 +2514,7 @@ kgnilnd_base_shutdown(void) CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "Waiting for %d threads to terminate\n", atomic_read(&kgnilnd_data.kgn_nthreads)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0, @@ -2769,8 +2765,7 @@ kgnilnd_shutdown(struct lnet_ni *ni) "Waiting for %d references to clear on net %d\n", atomic_read(&net->gnn_refcount), net->gnn_netnum); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } /* release ref from kgnilnd_startup */ diff --git a/lnet/klnds/gnilnd/gnilnd_stack.c b/lnet/klnds/gnilnd/gnilnd_stack.c index bdec685..456d181 100644 --- a/lnet/klnds/gnilnd/gnilnd_stack.c +++ b/lnet/klnds/gnilnd/gnilnd_stack.c @@ -133,8 +133,7 @@ kgnilnd_quiesce_wait(char *reason) atomic_read(&kgnilnd_data.kgn_nthreads) - atomic_read(&kgnilnd_data.kgn_nquiesce)); CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1 * i)); + schedule_timeout_uninterruptible(cfs_time_seconds(i)); LASSERTF(quiesce_deadline > jiffies, "couldn't quiesce threads in %lu seconds, falling over now\n", @@ -159,8 +158,7 @@ kgnilnd_quiesce_wait(char *reason) "%s: Waiting for %d threads to wake up\n", reason, atomic_read(&kgnilnd_data.kgn_nquiesce)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1 * i)); + schedule_timeout_uninterruptible(cfs_time_seconds(i)); } CDEBUG(D_INFO, "%s: All threads awake!\n", reason); @@ -422,8 +420,8 @@ kgnilnd_ruhroh_thread(void *arg) i++; CDEBUG(D_INFO, "Waiting for hardware quiesce " "flag to clear\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1 * i)); + schedule_timeout_uninterruptible( + cfs_time_seconds(i)); /* If we got a quiesce event with bump info, DO THE BUMP!. */ if (kgnilnd_data.kgn_bump_info_rdy) { diff --git a/lnet/klnds/gnilnd/gnilnd_sysctl.c b/lnet/klnds/gnilnd/gnilnd_sysctl.c index 56241b1..4b472f1 100644 --- a/lnet/klnds/gnilnd/gnilnd_sysctl.c +++ b/lnet/klnds/gnilnd/gnilnd_sysctl.c @@ -133,8 +133,7 @@ proc_trigger_stack_reset(struct ctl_table *table, int write, i++; LCONSOLE((((i) & (-i)) == i) ? D_WARNING : D_NET, "Waiting for stack reset request to clear\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1 * i)); + schedule_timeout_uninterruptible(cfs_time_seconds(i)); } RETURN(rc); diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 68de736..aaa9ad39 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -1368,8 +1368,7 @@ kiblnd_current_hdev(struct kib_dev *dev) if (i++ % 50 == 0) CDEBUG(D_NET, "%s: Wait for failover\n", dev->ibd_ifname); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 100); + schedule_timeout_interruptible(cfs_time_seconds(1) / 100); read_lock_irqsave(&kiblnd_data.kib_global_lock, flags); } @@ -2177,8 +2176,7 @@ again: "trips = %d\n", ps->ps_name, interval, trips); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(interval); + schedule_timeout_interruptible(interval); if (interval < cfs_time_seconds(1)) interval *= 2; @@ -2989,8 +2987,7 @@ kiblnd_base_shutdown(void) CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting for %d threads to terminate\n", atomic_read(&kiblnd_data.kib_nthreads)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } /* fall through */ @@ -3052,8 +3049,7 @@ kiblnd_shutdown(struct lnet_ni *ni) "%s: waiting for %d peers to disconnect\n", libcfs_nid2str(ni->ni_nid), atomic_read(&net->ibn_npeers)); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } kiblnd_net_fini_pools(net); diff --git a/lnet/klnds/o2iblnd/o2iblnd_cb.c b/lnet/klnds/o2iblnd/o2iblnd_cb.c index a370ed5..674f66c0 100644 --- a/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -3629,7 +3629,6 @@ kiblnd_connd (void *arg) schedule_timeout(timeout); - set_current_state(TASK_RUNNING); remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait); spin_lock_irqsave(lock, flags); } diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 17115c1..96c16fd 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -2216,8 +2216,7 @@ ksocknal_base_shutdown(void) "waiting for %d threads to terminate\n", ksocknal_data.ksnd_nthreads); read_unlock(&ksocknal_data.ksnd_global_lock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); read_lock(&ksocknal_data.ksnd_global_lock); } read_unlock(&ksocknal_data.ksnd_global_lock); @@ -2425,8 +2424,7 @@ ksocknal_shutdown(struct lnet_ni *ni) CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */ "waiting for %d peers to disconnect\n", atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); ksocknal_debug_peerhash(ni); } diff --git a/lnet/klnds/socklnd/socklnd_cb.c b/lnet/klnds/socklnd/socklnd_cb.c index 134aaff..bf62b22 100644 --- a/lnet/klnds/socklnd/socklnd_cb.c +++ b/lnet/klnds/socklnd/socklnd_cb.c @@ -194,10 +194,9 @@ ksocknal_transmit(struct ksock_conn *conn, struct ksock_tx *tx, int rc; int bufnob; - if (ksocknal_data.ksnd_stall_tx != 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); - } + if (ksocknal_data.ksnd_stall_tx != 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(ksocknal_data.ksnd_stall_tx)); LASSERT(tx->tx_resid != 0); @@ -354,10 +353,9 @@ ksocknal_receive(struct ksock_conn *conn, struct page **rx_scratch_pgs, int rc; ENTRY; - if (ksocknal_data.ksnd_stall_rx != 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); - } + if (ksocknal_data.ksnd_stall_rx != 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(ksocknal_data.ksnd_stall_rx)); rc = ksocknal_connsock_addref(conn); if (rc != 0) { @@ -2315,7 +2313,6 @@ ksocknal_connd(void *arg) nloops = 0; schedule_timeout(timeout); - set_current_state(TASK_RUNNING); remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait); spin_lock_bh(connd_lock); } diff --git a/lnet/lnet/acceptor.c b/lnet/lnet/acceptor.c index 94d16fa..3659d0c 100644 --- a/lnet/lnet/acceptor.c +++ b/lnet/lnet/acceptor.c @@ -377,8 +377,8 @@ lnet_acceptor(void *arg) if (rc != 0) { if (rc != -EAGAIN) { CWARN("Accept error %d: pausing...\n", rc); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible( + cfs_time_seconds(1)); } continue; } diff --git a/lnet/lnet/api-ni.c b/lnet/lnet/api-ni.c index f09bc5f..d39e6b9 100644 --- a/lnet/lnet/api-ni.c +++ b/lnet/lnet/api-ni.c @@ -1749,8 +1749,7 @@ lnet_ping_md_unlink(struct lnet_ping_buffer *pbuf, /* NB the MD could be busy; this just starts the unlink */ while (atomic_read(&pbuf->pb_refcnt) > 1) { CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } cfs_restore_sigs(blocked); @@ -1983,8 +1982,7 @@ static void lnet_push_target_fini(void) /* Wait for the unlink to complete. */ while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) { CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } lnet_ping_buffer_decref(the_lnet.ln_push_target); @@ -2060,8 +2058,7 @@ lnet_clear_zombies_nis_locked(struct lnet_net *net) "Waiting for zombie LNI %s\n", libcfs_nid2str(ni->ni_nid)); } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); lnet_net_lock(LNET_LOCK_EX); continue; } diff --git a/lnet/lnet/peer.c b/lnet/lnet/peer.c index 4e6a3e5..8570b5d 100644 --- a/lnet/lnet/peer.c +++ b/lnet/lnet/peer.c @@ -587,8 +587,7 @@ lnet_peer_ni_finalize_wait(struct lnet_peer_table *ptable) "Waiting for %d zombies on peer table\n", ptable->pt_zombies); } - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) >> 1); + schedule_timeout_uninterruptible(cfs_time_seconds(1) >> 1); spin_lock(&ptable->pt_zombie_lock); } spin_unlock(&ptable->pt_zombie_lock); @@ -3389,7 +3388,7 @@ static int lnet_peer_discovery(void *arg) /* Queue cleanup 2: wait for the expired queue to clear. */ while (!list_empty(&the_lnet.ln_dc_expired)) - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); /* Queue cleanup 3: clear the request queue. */ lnet_net_lock(LNET_LOCK_EX); diff --git a/lnet/lnet/router.c b/lnet/lnet/router.c index cae8da6..4ab65f7 100644 --- a/lnet/lnet/router.c +++ b/lnet/lnet/router.c @@ -940,8 +940,7 @@ lnet_wait_known_routerstate(void) if (all_known) return; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); } } diff --git a/lnet/selftest/conrpc.c b/lnet/selftest/conrpc.c index 268b2ff..8efacc0 100644 --- a/lnet/selftest/conrpc.c +++ b/lnet/selftest/conrpc.c @@ -1349,8 +1349,7 @@ lstcon_rpc_cleanup_wait(void) CWARN("Session is shutting down, " "waiting for termination of transactions\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); mutex_lock(&console_session.ses_mutex); } diff --git a/lnet/selftest/rpc.c b/lnet/selftest/rpc.c index 47a89fa..b3ef75a 100644 --- a/lnet/selftest/rpc.c +++ b/lnet/selftest/rpc.c @@ -1608,8 +1608,7 @@ srpc_startup (void) spin_lock_init(&srpc_data.rpc_glock); /* 1 second pause to avoid timestamp reuse */ - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48; srpc_data.rpc_state = SRPC_STATE_NONE; diff --git a/lnet/selftest/selftest.h b/lnet/selftest/selftest.h index 1a39cd7..a1574b2 100644 --- a/lnet/selftest/selftest.h +++ b/lnet/selftest/selftest.h @@ -578,8 +578,8 @@ do { \ fmt, ## __VA_ARGS__); \ spin_unlock(&(lock)); \ \ - set_current_state(TASK_UNINTERRUPTIBLE); \ - schedule_timeout(cfs_time_seconds(1) / 10); \ + schedule_timeout_uninterruptible( \ + cfs_time_seconds(1) / 10); \ \ spin_lock(&(lock)); \ } \ @@ -597,8 +597,7 @@ srpc_wait_service_shutdown(struct srpc_service *sv) CDEBUG(((i & -i) == i) ? D_WARNING : D_NET, "Waiting for %s service to shutdown...\n", sv->sv_name); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 10); + schedule_timeout_uninterruptible(cfs_time_seconds(1) / 10); } } diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index feacd50..87a04f7 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -2861,8 +2861,8 @@ int target_queue_recovery_request(struct ptlrpc_request *req, cfs_fail_val = 0; wake_up(&cfs_race_waitq); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible( + cfs_time_seconds(1)); } } diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 5c7088a..178976b 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -1921,8 +1921,7 @@ static int ldlm_handle_cp_callback(struct ptlrpc_request *req, ldlm_callback_reply(req, 0); while (to > 0) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(to); + schedule_timeout_interruptible(to); if (ldlm_is_granted(lock) || ldlm_is_destroyed(lock)) break; diff --git a/lustre/ldlm/ldlm_resource.c b/lustre/ldlm/ldlm_resource.c index 5b2ce0c..490e7c5 100644 --- a/lustre/ldlm/ldlm_resource.c +++ b/lustre/ldlm/ldlm_resource.c @@ -1056,11 +1056,10 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q, */ unlock_res(res); LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY"); - if (lock->l_flags & LDLM_FL_FAIL_LOC) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(4)); - set_current_state(TASK_RUNNING); - } + if (lock->l_flags & LDLM_FL_FAIL_LOC) + schedule_timeout_uninterruptible( + cfs_time_seconds(4)); + if (lock->l_completion_ast) lock->l_completion_ast(lock, LDLM_FL_FAILED, NULL); diff --git a/lustre/lfsck/lfsck_lib.c b/lustre/lfsck/lfsck_lib.c index 72be4f5..9fe3a73 100644 --- a/lustre/lfsck/lfsck_lib.c +++ b/lustre/lfsck/lfsck_lib.c @@ -3054,8 +3054,7 @@ again: if (unlikely(rc == -EINPROGRESS)) { retry = true; - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); set_current_state(TASK_RUNNING); if (!signal_pending(current) && thread_is_running(&lfsck->li_thread)) diff --git a/lustre/llite/llite_lib.c b/lustre/llite/llite_lib.c index e8e240b..e384d64 100644 --- a/lustre/llite/llite_lib.c +++ b/lustre/llite/llite_lib.c @@ -807,10 +807,9 @@ void ll_kill_super(struct super_block *sb) sb->s_dev = sbi->ll_sdev_orig; /* wait running statahead threads to quit */ - while (atomic_read(&sbi->ll_sa_running) > 0) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) >> 3); - } + while (atomic_read(&sbi->ll_sa_running) > 0) + schedule_timeout_uninterruptible( + cfs_time_seconds(1) >> 3); } EXIT; diff --git a/lustre/lov/lov_io.c b/lustre/lov/lov_io.c index e699603..92c84d9 100644 --- a/lustre/lov/lov_io.c +++ b/lustre/lov/lov_io.c @@ -438,8 +438,7 @@ static int lov_io_mirror_init(struct lov_io *lio, struct lov_object *obj, */ if (io->ci_ndelay && io->ci_ndelay_tried > 0 && (io->ci_ndelay_tried % comp->lo_mirror_count == 0)) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 100); /* 10ms */ + schedule_timeout_interruptible(cfs_time_seconds(1) / 100); if (signal_pending(current)) RETURN(-EINTR); diff --git a/lustre/mdc/mdc_changelog.c b/lustre/mdc/mdc_changelog.c index 5486047..fb9fa54 100644 --- a/lustre/mdc/mdc_changelog.c +++ b/lustre/mdc/mdc_changelog.c @@ -299,7 +299,7 @@ again: llog_cat_close(NULL, llh); llog_ctxt_put(ctx); class_decref(obd, "changelog", crs); - schedule_timeout_interruptible(HZ); + schedule_timeout_interruptible(cfs_time_seconds(1)); goto again; } diff --git a/lustre/mdd/mdd_device.c b/lustre/mdd/mdd_device.c index a00a240..acb5d37 100644 --- a/lustre/mdd/mdd_device.c +++ b/lustre/mdd/mdd_device.c @@ -616,7 +616,8 @@ again: * and to have set mc_gc_task to itself */ spin_unlock(&mdd->mdd_cl.mc_lock); - schedule_timeout(usecs_to_jiffies(10)); + /* Add a tiny sleep */ + schedule_timeout_uninterruptible(1); /* go back to fully check if GC-thread has started or * even already exited or if a new one is starting... */ diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index b2b29fd..17a5e44 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -3796,10 +3796,8 @@ static int mdt_tgt_connect(struct tgt_session_info *tsi) { if (OBD_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) && cfs_fail_val == - tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(3)); - } + tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) + schedule_timeout_uninterruptible(cfs_time_seconds(3)); return tgt_connect(tsi); } diff --git a/lustre/obdclass/genops.c b/lustre/obdclass/genops.c index aca1fec..092ed77 100644 --- a/lustre/obdclass/genops.c +++ b/lustre/obdclass/genops.c @@ -1773,8 +1773,7 @@ void obd_exports_barrier(struct obd_device *obd) spin_lock(&obd->obd_dev_lock); while (!list_empty(&obd->obd_unlinked_exports)) { spin_unlock(&obd->obd_dev_lock); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(waited)); + schedule_timeout_uninterruptible(cfs_time_seconds(waited)); if (waited > 5 && is_power_of_2(waited)) { LCONSOLE_WARN("%s is waiting for obd_unlinked_exports " "more than %d seconds. " diff --git a/lustre/obdclass/obd_mount_server.c b/lustre/obdclass/obd_mount_server.c index d3ec2f8..d3854db 100644 --- a/lustre/obdclass/obd_mount_server.c +++ b/lustre/obdclass/obd_mount_server.c @@ -476,8 +476,7 @@ void lustre_deregister_lwp_item(struct obd_export **exp) CDEBUG(D_MOUNT, "lri reference count %u, repeat: %d\n", atomic_read(&lri->lri_ref), repeat); repeat++; - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); } lustre_put_lwp_item(lri); } @@ -1265,9 +1264,8 @@ again: if ((rc == -ESHUTDOWN || rc == -EIO) && ++tried < 5) { /* The connection with MGS is not established. * Try again after 2 seconds. Interruptable. */ - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(2)); - set_current_state(TASK_RUNNING); + schedule_timeout_interruptible( + cfs_time_seconds(2)); if (!signal_pending(current)) goto again; } diff --git a/lustre/obdecho/echo_client.c b/lustre/obdecho/echo_client.c index 7609490..b54261b 100644 --- a/lustre/obdecho/echo_client.c +++ b/lustre/obdecho/echo_client.c @@ -1079,8 +1079,7 @@ static struct lu_device *echo_device_free(const struct lu_env *env, spin_unlock(&ec->ec_lock); CERROR( "echo_client still has objects at cleanup time, wait for 1 second\n"); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_uninterruptible(cfs_time_seconds(1)); lu_site_purge(env, ed->ed_site, -1); spin_lock(&ec->ec_lock); } diff --git a/lustre/osp/osp_internal.h b/lustre/osp/osp_internal.h index 93c4e61..c78694d 100644 --- a/lustre/osp/osp_internal.h +++ b/lustre/osp/osp_internal.h @@ -560,7 +560,7 @@ static inline void osp_get_rpc_lock(struct osp_device *osp) */ if (unlikely(lck->rpcl_fakes)) { mutex_unlock(&lck->rpcl_mutex); - schedule_timeout(cfs_time_seconds(1) / 4); + schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4); goto again; } diff --git a/lustre/osp/osp_sync.c b/lustre/osp/osp_sync.c index 6e430f8..1e82b9c 100644 --- a/lustre/osp/osp_sync.c +++ b/lustre/osp/osp_sync.c @@ -1295,7 +1295,7 @@ next: rc = llog_cleanup(&env, ctxt); if (rc) GOTO(out, rc); - schedule_timeout_interruptible(HZ * 5); + schedule_timeout_interruptible(cfs_time_seconds(5)); goto again; } diff --git a/lustre/ptlrpc/gss/gss_svc_upcall.c b/lustre/ptlrpc/gss/gss_svc_upcall.c index 1c643ec..4b82735 100644 --- a/lustre/ptlrpc/gss/gss_svc_upcall.c +++ b/lustre/ptlrpc/gss/gss_svc_upcall.c @@ -1139,8 +1139,7 @@ int __init gss_init_svc_upcall(void) for (i = 0; i < 6; i++) { if (channel_users(&rsi_cache) > 0) break; - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1) / 4); + schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4); } if (channel_users(&rsi_cache) == 0) diff --git a/lustre/ptlrpc/sec.c b/lustre/ptlrpc/sec.c index a11ab09..5ead685 100644 --- a/lustre/ptlrpc/sec.c +++ b/lustre/ptlrpc/sec.c @@ -589,8 +589,7 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) "ctx (%p, fl %lx) doesn't switch, relax a little bit\n", newctx, newctx->cc_flags); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); } else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags) == 0)) { /* diff --git a/lustre/quota/lquota_entry.c b/lustre/quota/lquota_entry.c index ee73c16..e94cd66 100644 --- a/lustre/quota/lquota_entry.c +++ b/lustre/quota/lquota_entry.c @@ -176,8 +176,7 @@ retry: "freed:%lu, repeat:%u\n", hash, d.lid_inuse, d.lid_freed, repeat); repeat++; - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); goto retry; } EXIT; diff --git a/lustre/quota/qsd_handler.c b/lustre/quota/qsd_handler.c index 3fff49a..2119271 100644 --- a/lustre/quota/qsd_handler.c +++ b/lustre/quota/qsd_handler.c @@ -654,10 +654,8 @@ static bool qsd_acquire(const struct lu_env *env, struct lquota_entry *lqe, /* if we have gotten some quota and stil wait more quota, * it's better to give QMT some time to reclaim from clients */ - if (count > 0) { - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); - } + if (count > 0) + schedule_timeout_interruptible(cfs_time_seconds(1)); /* need to acquire more quota space from master */ rc = qsd_acquire_remote(env, lqe); diff --git a/lustre/quota/qsd_lib.c b/lustre/quota/qsd_lib.c index cbdd74e..f50ca84 100644 --- a/lustre/quota/qsd_lib.c +++ b/lustre/quota/qsd_lib.c @@ -369,8 +369,7 @@ static void qsd_qtype_fini(const struct lu_env *env, struct qsd_instance *qsd, CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n", atomic_read(&qqi->qqi_ref), repeat); repeat++; - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(cfs_time_seconds(1)); + schedule_timeout_interruptible(cfs_time_seconds(1)); } /* by now, all qqi users should have gone away */ -- 1.8.3.1