From: Mr NeilBrown Date: Thu, 21 Jan 2021 03:37:54 +0000 (+1100) Subject: LU-14352 various: only use wake_up_all() on exclusive waitqs X-Git-Tag: 2.14.52~128 X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=commitdiff_plain;h=1f4e9f0f4f483dc93cbecdc841740bc11ec59a73 LU-14352 various: only use wake_up_all() on exclusive waitqs wake_up_all() is not necessary of wait_queues which are not the subject of an exclusive waiter. When all waiters are non-exclusive, wake_up() will wake them all up. Use of wake_up_all() suggests to the reader that the queue is subject to exclusive waits. When that is not the case, the usage can cause confusion. So change all wake_up_all() on non-exclusive waitqueues to wake_up(). The only wait_queues on which exclusive waits are requested are: ws_waitq ibs_waitq kss_waitq ksnd_connd_waitq blp_waitq imp_replay_waitq cl_mod_rpcs_waitq cl_cache_waiters cl_destroy_waitq scp_waitq All others now only use wake_up(). Signed-off-by: Mr NeilBrown Change-Id: I8e4a82747a93a50344cf545f1ae51fffc432b788 Reviewed-on: https://review.whamcloud.com/41289 Tested-by: jenkins Tested-by: Maloo Reviewed-by: Andreas Dilger Reviewed-by: Yang Sheng Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- diff --git a/lnet/klnds/gnilnd/gnilnd.c b/lnet/klnds/gnilnd/gnilnd.c index 84fc55c..c5da2ee 100644 --- a/lnet/klnds/gnilnd/gnilnd.c +++ b/lnet/klnds/gnilnd/gnilnd.c @@ -2488,13 +2488,13 @@ kgnilnd_base_shutdown(void) kgnilnd_unmap_fma_blocks(dev); kgnilnd_schedule_device(dev); - wake_up_all(&dev->gnd_dgram_waitq); - wake_up_all(&dev->gnd_dgping_waitq); + wake_up(&dev->gnd_dgram_waitq); + wake_up(&dev->gnd_dgping_waitq); LASSERT(list_empty(&dev->gnd_connd_peers)); } spin_lock(&kgnilnd_data.kgn_reaper_lock); - wake_up_all(&kgnilnd_data.kgn_reaper_waitq); + wake_up(&kgnilnd_data.kgn_reaper_waitq); spin_unlock(&kgnilnd_data.kgn_reaper_lock); if (atomic_read(&kgnilnd_data.kgn_nthreads)) diff --git a/lnet/klnds/gnilnd/gnilnd_cb.c b/lnet/klnds/gnilnd/gnilnd_cb.c index 5ebf765..2e3232a 100644 --- a/lnet/klnds/gnilnd/gnilnd_cb.c +++ b/lnet/klnds/gnilnd/gnilnd_cb.c @@ -82,9 +82,8 @@ kgnilnd_schedule_device(kgn_device_t *dev) * has come around and set ready to zero */ already_live = cmpxchg(&dev->gnd_ready, GNILND_DEV_IDLE, GNILND_DEV_IRQ); - if (!already_live) { - wake_up_all(&dev->gnd_waitq); - } + if (!already_live) + wake_up(&dev->gnd_waitq); } void kgnilnd_schedule_device_timer(cfs_timer_cb_arg_t data) diff --git a/lnet/klnds/gnilnd/gnilnd_conn.c b/lnet/klnds/gnilnd/gnilnd_conn.c index a4d1a5e..c75f0ac 100644 --- a/lnet/klnds/gnilnd/gnilnd_conn.c +++ b/lnet/klnds/gnilnd/gnilnd_conn.c @@ -2537,7 +2537,7 @@ kgnilnd_dgram_mover(void *arg) !kgnilnd_data.kgn_quiesce_trigger) { CDEBUG(D_INFO, "schedule timeout %ld (%lu sec)\n", timeout, cfs_duration_sec(timeout)); - wake_up_all(&dev->gnd_dgping_waitq); + wake_up(&dev->gnd_dgping_waitq); schedule(); CDEBUG(D_INFO, "awake after schedule\n"); deadline = jiffies + cfs_time_seconds(*kgnilnd_tunables.kgn_dgram_timeout); diff --git a/lnet/klnds/gnilnd/gnilnd_stack.c b/lnet/klnds/gnilnd/gnilnd_stack.c index bab50cb..c6c6093 100644 --- a/lnet/klnds/gnilnd/gnilnd_stack.c +++ b/lnet/klnds/gnilnd/gnilnd_stack.c @@ -107,15 +107,15 @@ kgnilnd_quiesce_wait(char *reason) /* morning sunshine */ spin_lock(&kgnilnd_data.kgn_reaper_lock); - wake_up_all(&kgnilnd_data.kgn_reaper_waitq); + wake_up(&kgnilnd_data.kgn_reaper_waitq); spin_unlock(&kgnilnd_data.kgn_reaper_lock); for (i = 0; i < kgnilnd_data.kgn_ndevs; i++) { kgn_device_t *dev = &kgnilnd_data.kgn_devices[i]; - wake_up_all(&dev->gnd_waitq); - wake_up_all(&dev->gnd_dgram_waitq); - wake_up_all(&dev->gnd_dgping_waitq); + wake_up(&dev->gnd_waitq); + wake_up(&dev->gnd_dgram_waitq); + wake_up(&dev->gnd_dgping_waitq); } kgnilnd_wakeup_rca_thread(); diff --git a/lnet/klnds/o2iblnd/o2iblnd.c b/lnet/klnds/o2iblnd/o2iblnd.c index 337b74e..a2e0160 100644 --- a/lnet/klnds/o2iblnd/o2iblnd.c +++ b/lnet/klnds/o2iblnd/o2iblnd.c @@ -2956,8 +2956,8 @@ kiblnd_base_shutdown(void) cfs_percpt_for_each(sched, i, kiblnd_data.kib_scheds) wake_up_all(&sched->ibs_waitq); - wake_up_all(&kiblnd_data.kib_connd_waitq); - wake_up_all(&kiblnd_data.kib_failover_waitq); + wake_up(&kiblnd_data.kib_connd_waitq); + wake_up(&kiblnd_data.kib_failover_waitq); wait_var_event_warning(&kiblnd_data.kib_nthreads, !atomic_read(&kiblnd_data.kib_nthreads), diff --git a/lnet/klnds/socklnd/socklnd.c b/lnet/klnds/socklnd/socklnd.c index 7e86d92..1b77ddf 100644 --- a/lnet/klnds/socklnd/socklnd.c +++ b/lnet/klnds/socklnd/socklnd.c @@ -1292,7 +1292,7 @@ ksocknal_terminate_conn(struct ksock_conn *conn) /* extra ref for scheduler */ ksocknal_conn_addref(conn); - wake_up (&sched->kss_waitq); + wake_up(&sched->kss_waitq); } spin_unlock_bh(&sched->kss_lock); @@ -1793,7 +1793,7 @@ ksocknal_base_shutdown(void) /* flag threads to terminate; wake and wait for them to die */ ksocknal_data.ksnd_shuttingdown = 1; wake_up_all(&ksocknal_data.ksnd_connd_waitq); - wake_up_all(&ksocknal_data.ksnd_reaper_waitq); + wake_up(&ksocknal_data.ksnd_reaper_waitq); if (ksocknal_data.ksnd_schedulers != NULL) { cfs_percpt_for_each(sched, i, diff --git a/lnet/lnet/peer.c b/lnet/lnet/peer.c index 678b1f4..8acde11 100644 --- a/lnet/lnet/peer.c +++ b/lnet/lnet/peer.c @@ -2140,7 +2140,7 @@ static void lnet_peer_discovery_complete(struct lnet_peer *lp) spin_lock(&lp->lp_lock); list_splice_init(&lp->lp_dc_pendq, &pending_msgs); spin_unlock(&lp->lp_lock); - wake_up_all(&lp->lp_dc_waitq); + wake_up(&lp->lp_dc_waitq); if (lp->lp_rtr_refcount > 0) lnet_router_discovery_complete(lp); diff --git a/lustre/fid/fid_request.c b/lustre/fid/fid_request.c index a951964..985cf902 100644 --- a/lustre/fid/fid_request.c +++ b/lustre/fid/fid_request.c @@ -301,7 +301,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq, __u64 seqnr, } --seq->lcs_update; - wake_up_all(&seq->lcs_waitq); + wake_up(&seq->lcs_waitq); } /** diff --git a/lustre/ldlm/ldlm_lock.c b/lustre/ldlm/ldlm_lock.c index 48906c5..6e830f9 100644 --- a/lustre/ldlm/ldlm_lock.c +++ b/lustre/ldlm/ldlm_lock.c @@ -1337,7 +1337,7 @@ void ldlm_lock_fail_match_locked(struct ldlm_lock *lock) { if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) { lock->l_flags |= LDLM_FL_FAIL_NOTIFIED; - wake_up_all(&lock->l_waitq); + wake_up(&lock->l_waitq); } } EXPORT_SYMBOL(ldlm_lock_fail_match_locked); @@ -1359,7 +1359,7 @@ void ldlm_lock_fail_match(struct ldlm_lock *lock) void ldlm_lock_allow_match_locked(struct ldlm_lock *lock) { ldlm_set_lvb_ready(lock); - wake_up_all(&lock->l_waitq); + wake_up(&lock->l_waitq); } EXPORT_SYMBOL(ldlm_lock_allow_match_locked); @@ -2448,7 +2448,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock) /* only canceller can set bl_done bit */ ldlm_set_bl_done(lock); - wake_up_all(&lock->l_waitq); + wake_up(&lock->l_waitq); } else if (!ldlm_is_bl_done(lock)) { /* The lock is guaranteed to have been canceled once * returning from this function. */ diff --git a/lustre/lfsck/lfsck_engine.c b/lustre/lfsck/lfsck_engine.c index f9c65b8..2c2d90b 100644 --- a/lustre/lfsck/lfsck_engine.c +++ b/lustre/lfsck/lfsck_engine.c @@ -1070,7 +1070,7 @@ int lfsck_master_engine(void *args) thread_set_flags(thread, SVC_RUNNING); spin_unlock(&lfsck->li_lock); - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); wait_event_idle(thread->t_ctl_waitq, lfsck->li_start_unplug || @@ -1117,7 +1117,7 @@ fini_args: thread_set_flags(thread, SVC_STOPPED); lfsck->li_task = NULL; spin_unlock(&lfsck->li_lock); - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); lfsck_thread_args_fini(lta); return rc; } @@ -1598,7 +1598,7 @@ int lfsck_assistant_engine(void *args) lad->lad_task = current; thread_set_flags(athread, SVC_RUNNING); spin_unlock(&lad->lad_lock); - wake_up_all(&mthread->t_ctl_waitq); + wake_up(&mthread->t_ctl_waitq); while (1) { while (!list_empty(&lad->lad_req_list)) { @@ -1627,7 +1627,7 @@ int lfsck_assistant_engine(void *args) wakeup = true; spin_unlock(&lad->lad_lock); if (wakeup) - wake_up_all(&mthread->t_ctl_waitq); + wake_up(&mthread->t_ctl_waitq); lao->la_req_fini(env, lar); if (rc < 0 && bk->lb_param & LPF_FAILOUT) @@ -1657,7 +1657,7 @@ int lfsck_assistant_engine(void *args) LASSERT(lad->lad_post_result > 0); /* Wakeup the master engine to go ahead. */ - wake_up_all(&mthread->t_ctl_waitq); + wake_up(&mthread->t_ctl_waitq); memset(lr, 0, sizeof(*lr)); lr->lr_event = LE_PHASE1_DONE; @@ -1674,7 +1674,7 @@ int lfsck_assistant_engine(void *args) clear_bit(LAD_TO_DOUBLE_SCAN, &lad->lad_flags); atomic_inc(&lfsck->li_double_scan_count); set_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags); - wake_up_all(&mthread->t_ctl_waitq); + wake_up(&mthread->t_ctl_waitq); com->lc_new_checked = 0; com->lc_new_scanned = 0; @@ -1844,7 +1844,7 @@ fini: lad->lad_assistant_status); lfsck_thread_args_fini(lta); - wake_up_all(&mthread->t_ctl_waitq); + wake_up(&mthread->t_ctl_waitq); return rc; } diff --git a/lustre/lfsck/lfsck_layout.c b/lustre/lfsck/lfsck_layout.c index 95b764d..f62b365c 100644 --- a/lustre/lfsck/lfsck_layout.c +++ b/lustre/lfsck/lfsck_layout.c @@ -5577,7 +5577,7 @@ static int lfsck_layout_scan_stripes(const struct lu_env *env, lad->lad_prefetched++; spin_unlock(&lad->lad_lock); if (wakeup) - wake_up_all(&athread->t_ctl_waitq); + wake_up(&athread->t_ctl_waitq); next: down_write(&com->lc_sem); @@ -6305,7 +6305,7 @@ done: (rc > 0 && lo->ll_flags & LF_INCOMPLETE) ? 0 : rc); lfsck_layout_slave_quit(env, com); if (atomic_dec_and_test(&lfsck->li_double_scan_count)) - wake_up_all(&lfsck->li_thread.t_ctl_waitq); + wake_up(&lfsck->li_thread.t_ctl_waitq); CDEBUG(D_LFSCK, "%s: layout LFSCK slave phase2 scan finished, " "status %d: rc = %d\n", @@ -6561,7 +6561,7 @@ static int lfsck_layout_master_in_notify(const struct lu_env *env, stop->ls_flags = lr->lr_param & ~LPF_BROADCAST; lfsck_stop(env, lfsck->li_bottom, stop); } else if (lfsck_phase2_next_ready(lad)) { - wake_up_all(&lad->lad_thread.t_ctl_waitq); + wake_up(&lad->lad_thread.t_ctl_waitq); } RETURN(0); @@ -6645,7 +6645,7 @@ static int lfsck_layout_slave_in_notify(const struct lu_env *env, true); if (llst != NULL) { lfsck_layout_llst_put(llst); - wake_up_all(&lfsck->li_thread.t_ctl_waitq); + wake_up(&lfsck->li_thread.t_ctl_waitq); } } @@ -6667,7 +6667,7 @@ static int lfsck_layout_slave_in_notify(const struct lu_env *env, lfsck_layout_llst_put(llst); if (list_empty(&llsd->llsd_master_list)) - wake_up_all(&lfsck->li_thread.t_ctl_waitq); + wake_up(&lfsck->li_thread.t_ctl_waitq); if (lr->lr_event == LE_PEER_EXIT && (lfsck->li_bookmark_ram.lb_param & LPF_FAILOUT || diff --git a/lustre/lfsck/lfsck_lib.c b/lustre/lfsck/lfsck_lib.c index e3b39d6..21cd6f8 100644 --- a/lustre/lfsck/lfsck_lib.c +++ b/lustre/lfsck/lfsck_lib.c @@ -2592,7 +2592,7 @@ void lfsck_post_generic(const struct lu_env *env, CDEBUG(D_LFSCK, "%s: waiting for assistant to do %s post, rc = %d\n", lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, *result); - wake_up_all(&athread->t_ctl_waitq); + wake_up(&athread->t_ctl_waitq); wait_event_idle(mthread->t_ctl_waitq, (*result > 0 && list_empty(&lad->lad_req_list)) || thread_is_stopped(athread)); @@ -2620,7 +2620,7 @@ int lfsck_double_scan_generic(const struct lu_env *env, "status %d\n", lfsck_lfsck2name(com->lc_lfsck), lad->lad_name, status); - wake_up_all(&athread->t_ctl_waitq); + wake_up(&athread->t_ctl_waitq); wait_event_idle(mthread->t_ctl_waitq, test_bit(LAD_IN_DOUBLE_SCAN, &lad->lad_flags) || thread_is_stopped(athread)); @@ -2643,7 +2643,7 @@ void lfsck_quit_generic(const struct lu_env *env, struct ptlrpc_thread *athread = &lad->lad_thread; set_bit(LAD_EXIT, &lad->lad_flags); - wake_up_all(&athread->t_ctl_waitq); + wake_up(&athread->t_ctl_waitq); wait_event_idle(mthread->t_ctl_waitq, thread_is_init(athread) || thread_is_stopped(athread)); @@ -3323,7 +3323,7 @@ trigger: thread_is_stopped(thread)); if (start == NULL || !(start->ls_flags & LPF_BROADCAST)) { lfsck->li_start_unplug = 1; - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); GOTO(out, rc = 0); } @@ -3342,13 +3342,13 @@ trigger: spin_unlock(&lfsck->li_lock); lfsck->li_start_unplug = 1; - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); wait_event_idle(thread->t_ctl_waitq, thread_is_stopped(thread)); } } else { lfsck->li_start_unplug = 1; - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); } GOTO(put, rc); @@ -3430,7 +3430,7 @@ int lfsck_stop(const struct lu_env *env, struct dt_device *key, } } - wake_up_all(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); spin_unlock(&lfsck->li_lock); if (stop && stop->ls_flags & LPF_BROADCAST) rc1 = lfsck_stop_all(env, lfsck, stop); diff --git a/lustre/lfsck/lfsck_namespace.c b/lustre/lfsck/lfsck_namespace.c index 9569d0c..a5725d7 100644 --- a/lustre/lfsck/lfsck_namespace.c +++ b/lustre/lfsck/lfsck_namespace.c @@ -4366,7 +4366,7 @@ static void lfsck_namespace_close_dir(const struct lu_env *env, lad->lad_prefetched++; spin_unlock(&lad->lad_lock); if (wakeup) - wake_up_all(&lad->lad_thread.t_ctl_waitq); + wake_up(&lad->lad_thread.t_ctl_waitq); EXIT; } @@ -4711,7 +4711,7 @@ static int lfsck_namespace_exec_dir(const struct lu_env *env, lad->lad_prefetched++; spin_unlock(&lad->lad_lock); if (wakeup) - wake_up_all(&lad->lad_thread.t_ctl_waitq); + wake_up(&lad->lad_thread.t_ctl_waitq); down_write(&com->lc_sem); com->lc_new_checked++; @@ -5153,7 +5153,7 @@ static int lfsck_namespace_in_notify(const struct lu_env *env, stop->ls_flags = lr->lr_param & ~LPF_BROADCAST; lfsck_stop(env, lfsck->li_bottom, stop); } else if (lfsck_phase2_next_ready(lad)) { - wake_up_all(&lad->lad_thread.t_ctl_waitq); + wake_up(&lad->lad_thread.t_ctl_waitq); } RETURN(0); diff --git a/lustre/llite/pcc.c b/lustre/llite/pcc.c index 93fad9a..a8dc228 100644 --- a/lustre/llite/pcc.c +++ b/lustre/llite/pcc.c @@ -1548,7 +1548,7 @@ static void pcc_io_fini(struct inode *inode) LASSERT(pcci && atomic_read(&pcci->pcci_active_ios) > 0); if (atomic_dec_and_test(&pcci->pcci_active_ios)) - wake_up_all(&pcci->pcci_waitq); + wake_up(&pcci->pcci_waitq); } diff --git a/lustre/lov/lov_io.c b/lustre/lov/lov_io.c index 79021f0..c615c2f 100644 --- a/lustre/lov/lov_io.c +++ b/lustre/lov/lov_io.c @@ -661,7 +661,7 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios) LASSERT(atomic_read(&lov->lo_active_ios) > 0); if (atomic_dec_and_test(&lov->lo_active_ios)) - wake_up_all(&lov->lo_waitq); + wake_up(&lov->lo_waitq); EXIT; } @@ -1716,7 +1716,7 @@ static void lov_empty_io_fini(const struct lu_env *env, ENTRY; if (atomic_dec_and_test(&lov->lo_active_ios)) - wake_up_all(&lov->lo_waitq); + wake_up(&lov->lo_waitq); EXIT; } diff --git a/lustre/mdc/mdc_changelog.c b/lustre/mdc/mdc_changelog.c index ee11f6b..968a043 100644 --- a/lustre/mdc/mdc_changelog.c +++ b/lustre/mdc/mdc_changelog.c @@ -260,7 +260,7 @@ static int chlg_read_cat_process_cb(const struct lu_env *env, crs->crs_rec_count++; mutex_unlock(&crs->crs_lock); - wake_up_all(&crs->crs_waitq_cons); + wake_up(&crs->crs_waitq_cons); RETURN(0); } @@ -349,7 +349,7 @@ err_out: if (rc < 0) crs->crs_err = rc; - wake_up_all(&crs->crs_waitq_cons); + wake_up(&crs->crs_waitq_cons); if (llh != NULL) llog_cat_close(NULL, llh); @@ -421,7 +421,7 @@ static ssize_t chlg_read(struct file *file, char __user *buff, size_t count, if (written_total > 0) { rc = written_total; - wake_up_all(&crs->crs_waitq_prod); + wake_up(&crs->crs_waitq_prod); } else if (rc == 0) { rc = crs->crs_err; } @@ -464,7 +464,7 @@ static int chlg_set_start_offset(struct chlg_reader_state *crs, __u64 offset) } mutex_unlock(&crs->crs_lock); - wake_up_all(&crs->crs_waitq_prod); + wake_up(&crs->crs_waitq_prod); return 0; } diff --git a/lustre/mdt/mdt_coordinator.c b/lustre/mdt/mdt_coordinator.c index ada7c9f..327cd59 100644 --- a/lustre/mdt/mdt_coordinator.c +++ b/lustre/mdt/mdt_coordinator.c @@ -565,7 +565,7 @@ static int mdt_coordinator(void *data) set_cdt_state(cdt, CDT_RUNNING); /* Inform mdt_hsm_cdt_start(). */ - wake_up_all(&cdt->cdt_waitq); + wake_up(&cdt->cdt_waitq); while (1) { int i; diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index 84f2109..d9e0516 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -1281,7 +1281,7 @@ void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor, * to immediately reclaim anchor when cl_sync_io_wait() * completes. */ - wake_up_all_locked(&anchor->csi_waitq); + wake_up_locked(&anchor->csi_waitq); if (end_io) end_io(env, anchor); if (anchor->csi_aio) diff --git a/lustre/obdclass/lu_object.c b/lustre/obdclass/lu_object.c index 8e79c57..b05871f 100644 --- a/lustre/obdclass/lu_object.c +++ b/lustre/obdclass/lu_object.c @@ -414,7 +414,7 @@ static void lu_object_free(const struct lu_env *env, struct lu_object *o) } if (waitqueue_active(wq)) - wake_up_all(wq); + wake_up(wq); } /** diff --git a/lustre/obdclass/upcall_cache.c b/lustre/obdclass/upcall_cache.c index 9624c0a..b172cfa 100644 --- a/lustre/obdclass/upcall_cache.c +++ b/lustre/obdclass/upcall_cache.c @@ -125,7 +125,7 @@ static int check_unlink_entry(struct upcall_cache *cache, return 0; UC_CACHE_SET_EXPIRED(entry); - wake_up_all(&entry->ue_waitq); + wake_up(&entry->ue_waitq); } else if (!UC_CACHE_IS_INVALID(entry)) { UC_CACHE_SET_EXPIRED(entry); } @@ -202,7 +202,7 @@ find_again: if (rc < 0) { UC_CACHE_CLEAR_ACQUIRING(entry); UC_CACHE_SET_INVALID(entry); - wake_up_all(&entry->ue_waitq); + wake_up(&entry->ue_waitq); if (unlikely(rc == -EREMCHG)) { put_entry(cache, entry); GOTO(out, entry = ERR_PTR(rc)); @@ -350,7 +350,7 @@ out: } UC_CACHE_CLEAR_ACQUIRING(entry); spin_unlock(&cache->uc_lock); - wake_up_all(&entry->ue_waitq); + wake_up(&entry->ue_waitq); put_entry(cache, entry); RETURN(rc); diff --git a/lustre/ofd/ofd_access_log.c b/lustre/ofd/ofd_access_log.c index 791d027..f27a49b 100644 --- a/lustre/ofd/ofd_access_log.c +++ b/lustre/ofd/ofd_access_log.c @@ -571,9 +571,8 @@ void ofd_access_log_delete(struct ofd_access_log *oal) oal->oal_is_closed = 1; down_read(&oal->oal_buf_list_sem); - list_for_each_entry(ocb, &oal->oal_circ_buf_list, ocb_list) { - wake_up_all(&ocb->ocb_read_wait_queue); - } + list_for_each_entry(ocb, &oal->oal_circ_buf_list, ocb_list) + wake_up(&ocb->ocb_read_wait_queue); up_read(&oal->oal_buf_list_sem); cdev_device_del(&oal->oal_cdev, &oal->oal_device); } diff --git a/lustre/osc/osc_io.c b/lustre/osc/osc_io.c index ae8cc68..7e507ec 100644 --- a/lustre/osc/osc_io.c +++ b/lustre/osc/osc_io.c @@ -440,7 +440,7 @@ void osc_io_iter_fini(const struct lu_env *env, oio->oi_is_active = 0; LASSERT(atomic_read(&osc->oo_nr_ios) > 0); if (atomic_dec_and_test(&osc->oo_nr_ios)) - wake_up_all(&osc->oo_io_waitq); + wake_up(&osc->oo_io_waitq); } } EXPORT_SYMBOL(osc_io_iter_fini); diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index 065bebe..05c1282 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -679,7 +679,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, atomic_dec(&cli->cl_lru_shrinkers); if (count > 0) { atomic_long_add(count, cli->cl_lru_left); - wake_up_all(&osc_lru_waitq); + wake_up(&osc_lru_waitq); } RETURN(count > 0 ? count : rc); } @@ -890,7 +890,7 @@ again: void osc_lru_unreserve(struct client_obd *cli, unsigned long npages) { atomic_long_add(npages, cli->cl_lru_left); - wake_up_all(&osc_lru_waitq); + wake_up(&osc_lru_waitq); } /** @@ -993,7 +993,7 @@ void osc_dec_unstable_pages(struct ptlrpc_request *req) &cli->cl_cache->ccc_unstable_nr); LASSERT(unstable_count >= 0); if (unstable_count == 0) - wake_up_all(&cli->cl_cache->ccc_unstable_waitq); + wake_up(&cli->cl_cache->ccc_unstable_waitq); if (waitqueue_active(&osc_lru_waitq)) (void)ptlrpcd_queue_work(cli->cl_lru_work); diff --git a/lustre/osp/osp_trans.c b/lustre/osp/osp_trans.c index f0cad02..dc29665 100644 --- a/lustre/osp/osp_trans.c +++ b/lustre/osp/osp_trans.c @@ -688,7 +688,7 @@ static int osp_update_interpret(const struct lu_env *env, } if (oaua->oaua_count != NULL && atomic_dec_and_test(oaua->oaua_count)) - wake_up_all(oaua->oaua_waitq); + wake_up(oaua->oaua_waitq); if (oth != NULL) { /* oth and osp_update_requests will be destoryed in diff --git a/lustre/ptlrpc/gss/gss_svc_upcall.c b/lustre/ptlrpc/gss/gss_svc_upcall.c index 845f0be..0c5a36a 100644 --- a/lustre/ptlrpc/gss/gss_svc_upcall.c +++ b/lustre/ptlrpc/gss/gss_svc_upcall.c @@ -385,7 +385,7 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen) out: rsi_free(&rsii); if (rsip) { - wake_up_all(&rsip->waitq); + wake_up(&rsip->waitq); cache_put(&rsip->h, &rsi_cache); } else { status = -ENOMEM; diff --git a/lustre/ptlrpc/sec_bulk.c b/lustre/ptlrpc/sec_bulk.c index 3bf607f..8e84243 100644 --- a/lustre/ptlrpc/sec_bulk.c +++ b/lustre/ptlrpc/sec_bulk.c @@ -475,7 +475,7 @@ static inline void enc_pools_wakeup(void) if (unlikely(page_pools.epp_waitqlen)) { LASSERT(waitqueue_active(&page_pools.epp_waitq)); - wake_up_all(&page_pools.epp_waitq); + wake_up(&page_pools.epp_waitq); } } diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index 738a313..bfd0665 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -3013,7 +3013,7 @@ static void ptlrpc_stop_hr_threads(void) if (hrp->hrp_thrs == NULL) continue; /* uninitialized */ for (j = 0; j < hrp->hrp_nthrs; j++) - wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); + wake_up(&hrp->hrp_thrs[j].hrt_waitq); } cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { diff --git a/lustre/quota/qsd_handler.c b/lustre/quota/qsd_handler.c index c29b303..d40251d 100644 --- a/lustre/quota/qsd_handler.c +++ b/lustre/quota/qsd_handler.c @@ -65,7 +65,7 @@ static inline void qsd_request_exit(struct lquota_entry *lqe) } lqe->lqe_pending_req--; lqe->lqe_pending_rel = 0; - wake_up_all(&lqe->lqe_waiters); + wake_up(&lqe->lqe_waiters); } /** diff --git a/lustre/quota/qsd_lock.c b/lustre/quota/qsd_lock.c index e058b38..7921f0e 100644 --- a/lustre/quota/qsd_lock.c +++ b/lustre/quota/qsd_lock.c @@ -514,7 +514,7 @@ static int qsd_id_glimpse_ast(struct ldlm_lock *lock, void *data) lqe_write_unlock(lqe); if (wakeup) - wake_up_all(&lqe->lqe_waiters); + wake_up(&lqe->lqe_waiters); lqe_putref(lqe); out: req->rq_status = rc; diff --git a/lustre/target/barrier.c b/lustre/target/barrier.c index 6ed91f7..4f8c5b3 100644 --- a/lustre/target/barrier.c +++ b/lustre/target/barrier.c @@ -272,7 +272,7 @@ void barrier_exit(struct dt_device *key) smp_mb(); if (unlikely(barrier->bi_status == BS_FREEZING_P1)) - wake_up_all(&barrier->bi_waitq); + wake_up(&barrier->bi_waitq); barrier_instance_put(barrier); } }