if (ret && likely(ms > 0)) {
CERROR("cfs_fail_timeout id %x sleeping for %dms\n", id, ms);
while (ktime_before(ktime_get(), till)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 10);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1)
+ / 10);
set_current_state(TASK_RUNNING);
if (!cfs_fail_loc) {
CERROR("cfs_fail_timeout interrupted\n");
}
init_waitqueue_entry(&__wait, current);
add_wait_queue(&tctl->tctl_waitq, &__wait);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
remove_wait_queue(&tctl->tctl_waitq, &__wait);
}
complete(&tctl->tctl_stop);
i / 20, sched->ws_nthreads, sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1)
+ / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
}
while (sched->ws_nthreads != 0) {
spin_unlock(&cfs_wi_data.wi_glock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 20);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1)
+ / 20);
spin_lock(&cfs_wi_data.wi_glock);
}
spin_unlock(&cfs_wi_data.wi_glock);
atomic_read(&kgnilnd_data.kgn_npending_detach) ||
atomic_read(&kgnilnd_data.kgn_npending_unlink)) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, "Waiting on %d peers %d closes %d detaches\n",
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for conns to be cleaned up %d\n",atomic_read(&kgnilnd_data.kgn_nconns));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Peer state all cleaned up BEFORE setting shutdown, so threads don't
* have to worry about shutdown races. NB connections may be created
i++;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for ruhroh thread to terminate\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* Flag threads to terminate */
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"Waiting for %d threads to terminate\n",
atomic_read(&kgnilnd_data.kgn_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
LASSERTF(atomic_read(&kgnilnd_data.kgn_npeers) == 0,
"Waiting for %d references to clear on net %d\n",
atomic_read(&net->gnn_refcount),
net->gnn_netnum);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* release ref from kgnilnd_startup */
atomic_read(&kgnilnd_data.kgn_nthreads) -
atomic_read(&kgnilnd_data.kgn_nquiesce));
CFS_RACE(CFS_FAIL_GNI_QUIESCE_RACE);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1 * i));
+ schedule_timeout_uninterruptible(cfs_time_seconds(i));
LASSERTF(quiesce_deadline > jiffies,
"couldn't quiesce threads in %lu seconds, falling over now\n",
"%s: Waiting for %d threads to wake up\n",
reason,
atomic_read(&kgnilnd_data.kgn_nquiesce));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1 * i));
+ schedule_timeout_uninterruptible(cfs_time_seconds(i));
}
CDEBUG(D_INFO, "%s: All threads awake!\n", reason);
i++;
CDEBUG(D_INFO, "Waiting for hardware quiesce "
"flag to clear\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1 * i));
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(i));
/* If we got a quiesce event with bump info, DO THE BUMP!. */
if (kgnilnd_data.kgn_bump_info_rdy) {
i++;
LCONSOLE((((i) & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for stack reset request to clear\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1 * i));
+ schedule_timeout_uninterruptible(cfs_time_seconds(i));
}
RETURN(rc);
if (i++ % 50 == 0)
CDEBUG(D_NET, "%s: Wait for failover\n",
dev->ibd_ifname);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 100);
+ schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
}
"trips = %d\n",
ps->ps_name, interval, trips);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(interval);
+ schedule_timeout_interruptible(interval);
if (interval < cfs_time_seconds(1))
interval *= 2;
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET,
"Waiting for %d threads to terminate\n",
atomic_read(&kiblnd_data.kib_nthreads));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
/* fall through */
"%s: waiting for %d peers to disconnect\n",
libcfs_nid2str(ni->ni_nid),
atomic_read(&net->ibn_npeers));
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
kiblnd_net_fini_pools(net);
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&kiblnd_data.kib_connd_waitq, &wait);
spin_lock_irqsave(lock, flags);
}
"waiting for %d threads to terminate\n",
ksocknal_data.ksnd_nthreads);
read_unlock(&ksocknal_data.ksnd_global_lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
read_lock(&ksocknal_data.ksnd_global_lock);
}
read_unlock(&ksocknal_data.ksnd_global_lock);
CDEBUG(((i & (-i)) == i) ? D_WARNING : D_NET, /* power of 2? */
"waiting for %d peers to disconnect\n",
atomic_read(&net->ksnn_npeers) - SOCKNAL_SHUTDOWN_BIAS);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
ksocknal_debug_peerhash(ni);
}
int rc;
int bufnob;
- if (ksocknal_data.ksnd_stall_tx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
- }
+ if (ksocknal_data.ksnd_stall_tx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_tx));
LASSERT(tx->tx_resid != 0);
int rc;
ENTRY;
- if (ksocknal_data.ksnd_stall_rx != 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
- }
+ if (ksocknal_data.ksnd_stall_rx != 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(ksocknal_data.ksnd_stall_rx));
rc = ksocknal_connsock_addref(conn);
if (rc != 0) {
nloops = 0;
schedule_timeout(timeout);
- set_current_state(TASK_RUNNING);
remove_wait_queue(&ksocknal_data.ksnd_connd_waitq, &wait);
spin_lock_bh(connd_lock);
}
if (rc != 0) {
if (rc != -EAGAIN) {
CWARN("Accept error %d: pausing...\n", rc);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(1));
}
continue;
}
/* NB the MD could be busy; this just starts the unlink */
while (atomic_read(&pbuf->pb_refcnt) > 1) {
CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
cfs_restore_sigs(blocked);
/* Wait for the unlink to complete. */
while (atomic_read(&the_lnet.ln_push_target->pb_refcnt) > 1) {
CDEBUG(D_NET, "Still waiting for ping data MD to unlink\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
lnet_ping_buffer_decref(the_lnet.ln_push_target);
"Waiting for zombie LNI %s\n",
libcfs_nid2str(ni->ni_nid));
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
lnet_net_lock(LNET_LOCK_EX);
continue;
}
"Waiting for %d zombies on peer table\n",
ptable->pt_zombies);
}
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) >> 1);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) >> 1);
spin_lock(&ptable->pt_zombie_lock);
}
spin_unlock(&ptable->pt_zombie_lock);
/* Queue cleanup 2: wait for the expired queue to clear. */
while (!list_empty(&the_lnet.ln_dc_expired))
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
/* Queue cleanup 3: clear the request queue. */
lnet_net_lock(LNET_LOCK_EX);
if (all_known)
return;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
}
}
CWARN("Session is shutting down, "
"waiting for termination of transactions\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
mutex_lock(&console_session.ses_mutex);
}
spin_lock_init(&srpc_data.rpc_glock);
/* 1 second pause to avoid timestamp reuse */
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
srpc_data.rpc_matchbits = ((__u64) ktime_get_real_seconds()) << 48;
srpc_data.rpc_state = SRPC_STATE_NONE;
fmt, ## __VA_ARGS__); \
spin_unlock(&(lock)); \
\
- set_current_state(TASK_UNINTERRUPTIBLE); \
- schedule_timeout(cfs_time_seconds(1) / 10); \
+ schedule_timeout_uninterruptible( \
+ cfs_time_seconds(1) / 10); \
\
spin_lock(&(lock)); \
} \
CDEBUG(((i & -i) == i) ? D_WARNING : D_NET,
"Waiting for %s service to shutdown...\n",
sv->sv_name);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 10);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) / 10);
}
}
cfs_fail_val = 0;
wake_up(&cfs_race_waitq);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(
+ cfs_time_seconds(1));
}
}
ldlm_callback_reply(req, 0);
while (to > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(to);
+ schedule_timeout_interruptible(to);
if (ldlm_is_granted(lock) ||
ldlm_is_destroyed(lock))
break;
*/
unlock_res(res);
LDLM_DEBUG(lock, "setting FL_LOCAL_ONLY");
- if (lock->l_flags & LDLM_FL_FAIL_LOC) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(4));
- set_current_state(TASK_RUNNING);
- }
+ if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(4));
+
if (lock->l_completion_ast)
lock->l_completion_ast(lock,
LDLM_FL_FAILED, NULL);
if (unlikely(rc == -EINPROGRESS)) {
retry = true;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
set_current_state(TASK_RUNNING);
if (!signal_pending(current) &&
thread_is_running(&lfsck->li_thread))
sb->s_dev = sbi->ll_sdev_orig;
/* wait running statahead threads to quit */
- while (atomic_read(&sbi->ll_sa_running) > 0) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) >> 3);
- }
+ while (atomic_read(&sbi->ll_sa_running) > 0)
+ schedule_timeout_uninterruptible(
+ cfs_time_seconds(1) >> 3);
}
EXIT;
*/
if (io->ci_ndelay && io->ci_ndelay_tried > 0 &&
(io->ci_ndelay_tried % comp->lo_mirror_count == 0)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 100); /* 10ms */
+ schedule_timeout_interruptible(cfs_time_seconds(1) / 100);
if (signal_pending(current))
RETURN(-EINTR);
llog_cat_close(NULL, llh);
llog_ctxt_put(ctx);
class_decref(obd, "changelog", crs);
- schedule_timeout_interruptible(HZ);
+ schedule_timeout_interruptible(cfs_time_seconds(1));
goto again;
}
* and to have set mc_gc_task to itself
*/
spin_unlock(&mdd->mdd_cl.mc_lock);
- schedule_timeout(usecs_to_jiffies(10));
+ /* Add a tiny sleep */
+ schedule_timeout_uninterruptible(1);
/* go back to fully check if GC-thread has started or
* even already exited or if a new one is starting...
*/
{
if (OBD_FAIL_CHECK(OBD_FAIL_TGT_DELAY_CONDITIONAL) &&
cfs_fail_val ==
- tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(3));
- }
+ tsi2mdt_info(tsi)->mti_mdt->mdt_seq_site.ss_node_id)
+ schedule_timeout_uninterruptible(cfs_time_seconds(3));
return tgt_connect(tsi);
}
spin_lock(&obd->obd_dev_lock);
while (!list_empty(&obd->obd_unlinked_exports)) {
spin_unlock(&obd->obd_dev_lock);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(waited));
+ schedule_timeout_uninterruptible(cfs_time_seconds(waited));
if (waited > 5 && is_power_of_2(waited)) {
LCONSOLE_WARN("%s is waiting for obd_unlinked_exports "
"more than %d seconds. "
CDEBUG(D_MOUNT, "lri reference count %u, repeat: %d\n",
atomic_read(&lri->lri_ref), repeat);
repeat++;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
}
lustre_put_lwp_item(lri);
}
if ((rc == -ESHUTDOWN || rc == -EIO) && ++tried < 5) {
/* The connection with MGS is not established.
* Try again after 2 seconds. Interruptable. */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(2));
- set_current_state(TASK_RUNNING);
+ schedule_timeout_interruptible(
+ cfs_time_seconds(2));
if (!signal_pending(current))
goto again;
}
spin_unlock(&ec->ec_lock);
CERROR(
"echo_client still has objects at cleanup time, wait for 1 second\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_uninterruptible(cfs_time_seconds(1));
lu_site_purge(env, ed->ed_site, -1);
spin_lock(&ec->ec_lock);
}
*/
if (unlikely(lck->rpcl_fakes)) {
mutex_unlock(&lck->rpcl_mutex);
- schedule_timeout(cfs_time_seconds(1) / 4);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
goto again;
}
rc = llog_cleanup(&env, ctxt);
if (rc)
GOTO(out, rc);
- schedule_timeout_interruptible(HZ * 5);
+ schedule_timeout_interruptible(cfs_time_seconds(5));
goto again;
}
for (i = 0; i < 6; i++) {
if (channel_users(&rsi_cache) > 0)
break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1) / 4);
+ schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
}
if (channel_users(&rsi_cache) == 0)
"ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
newctx, newctx->cc_flags);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
} else if (unlikely(test_bit(PTLRPC_CTX_UPTODATE_BIT, &newctx->cc_flags)
== 0)) {
/*
"freed:%lu, repeat:%u\n", hash,
d.lid_inuse, d.lid_freed, repeat);
repeat++;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
goto retry;
}
EXIT;
/* if we have gotten some quota and stil wait more quota,
* it's better to give QMT some time to reclaim from clients */
- if (count > 0) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
- }
+ if (count > 0)
+ schedule_timeout_interruptible(cfs_time_seconds(1));
/* need to acquire more quota space from master */
rc = qsd_acquire_remote(env, lqe);
CDEBUG(D_QUOTA, "qqi reference count %u, repeat: %d\n",
atomic_read(&qqi->qqi_ref), repeat);
repeat++;
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(cfs_time_seconds(1));
+ schedule_timeout_interruptible(cfs_time_seconds(1));
}
/* by now, all qqi users should have gone away */