Some modules print less-than-useful messages on every load.
Turn these into internal debug messages to reduce noise.
The message in gss_init_svc_upcall() should also be quieted,
but it exposes that this function is waiting 1.5s on each module
load for lsvcgssd to start. This should be fixed separately.
Test-Parameters: trivial
Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
Change-Id: Ib51ce0e9a88a94d8d2d5eb0906abef0f544cab07
Reviewed-on: https://review.whamcloud.com/33281
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Nathaniel Clark <nclark@whamcloud.com>
Reviewed-by: Sebastien Buisson <sbuisson@ddn.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
int i = 2;
while (sched->ws_nthreads > 0) {
int i = 2;
while (sched->ws_nthreads > 0) {
- CDEBUG(is_power_of_2(++i) ? D_WARNING : D_NET,
- "waiting for %d threads of WI sched[%s] to "
- "terminate\n", sched->ws_nthreads,
- sched->ws_name);
+ CDEBUG(is_power_of_2(++i / 20) ? D_WARNING : D_NET,
+ "waiting %us for %d %s worker threads to exit\n",
+ i / 20, sched->ws_nthreads, sched->ws_name);
spin_unlock(&cfs_wi_data.wi_glock);
set_current_state(TASK_UNINTERRUPTIBLE);
spin_unlock(&cfs_wi_data.wi_glock);
set_current_state(TASK_UNINTERRUPTIBLE);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
struct lmv_tgt_desc *tgt = lmv->tgts[i];
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
struct lmv_tgt_desc *tgt = lmv->tgts[i];
- if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
+ if (tgt == NULL || tgt->ltd_exp == NULL) {
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
}
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
}
+ if (!tgt->ltd_active)
+ continue;
rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
if (rc) {
rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
if (rc) {
spin_lock(®istered_mechs_lock);
list_add(&gm->gm_list, ®istered_mechs);
spin_unlock(®istered_mechs_lock);
spin_lock(®istered_mechs_lock);
list_add(&gm->gm_list, ®istered_mechs);
spin_unlock(®istered_mechs_lock);
- CWARN("Register %s mechanism\n", gm->gm_name);
+ CDEBUG(D_SEC, "register %s mechanism\n", gm->gm_name);
spin_lock(®istered_mechs_lock);
list_del(&gm->gm_list);
spin_unlock(®istered_mechs_lock);
spin_lock(®istered_mechs_lock);
list_del(&gm->gm_list);
spin_unlock(®istered_mechs_lock);
- CWARN("Unregister %s mechanism\n", gm->gm_name);
+ CDEBUG(D_SEC, "Unregister %s mechanism\n", gm->gm_name);
/* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
/* FIXME this looks stupid. we intend to give lsvcgssd a chance to open
* the init upcall channel, otherwise there's big chance that the first
* upcall issued before the channel be opened thus nfsv4 cache code will
- * drop the request direclty, thus lead to unnecessary recovery time.
- * here we wait at miximum 1.5 seconds. */
+ * drop the request directly, thus lead to unnecessary recovery time.
+ * Here we wait at minimum 1.5 seconds.
+ */
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
for (i = 0; i < 6; i++) {
if (atomic_read(&rsi_cache.readers) > 0)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- LASSERT(msecs_to_jiffies(MSEC_PER_SEC) >= 4);
+ LASSERT(msecs_to_jiffies(MSEC_PER_SEC / 4) > 0);
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
}
schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC / 4));
}