osd_statfs() shouldn't cache statfs data anymore: the statfs data
is already cached in ofd layer, put another cache in osd layer
looks redundant, and what more important is: grant mechanism relies
on dt_statfs() returning fresh statfs data, caching statfs data in
osd layer would just break grant.
Signed-off-by: Niu Yawei <yawei.niu@intel.com>
Change-Id: I89b6384cc59d77b1edb0412f24b5c8e823532170
Reviewed-on: http://review.whamcloud.com/8911
Tested-by: Jenkins
Tested-by: Maloo <hpdd-maloo@intel.com>
Reviewed-by: Johann Lombardi <johann.lombardi@intel.com>
Reviewed-by: Alex Zhuravlev <alexey.zhuravlev@intel.com>
rc = ofd_statfs_internal(env, ofd, osfs, max_age, from_cache);
if (unlikely(rc)) {
rc = ofd_statfs_internal(env, ofd, osfs, max_age, from_cache);
if (unlikely(rc)) {
+ if (from_cache)
+ *from_cache = 0;
D_ERROR : D_CACHE;
CDEBUG_LIMIT(mask, "%s: cli %s/%p left "LPU64" < tot_grant "
D_ERROR : D_CACHE;
CDEBUG_LIMIT(mask, "%s: cli %s/%p left "LPU64" < tot_grant "
- LPU64" unstable "LPU64" pending "LPU64"\n",
+ LPU64" unstable "LPU64" pending "LPU64" "
+ "dirty "LPU64"\n",
obd->obd_name, exp->exp_client_uuid.uuid, exp,
left, tot_granted, unstable,
obd->obd_name, exp->exp_client_uuid.uuid, exp,
left, tot_granted, unstable,
+ ofd->ofd_tot_pending, ofd->ofd_tot_dirty);
d ? d->id_ops->id_name : "plain");
}
d ? d->id_ops->id_name : "plain");
}
+#define GRANT_FOR_LOCAL_OIDS 32 /* 128kB for last_rcvd, quota files, ... */
+
/*
* Concurrency: shouldn't matter.
*/
/*
* Concurrency: shouldn't matter.
*/
}
spin_lock(&osd->od_osfs_lock);
}
spin_lock(&osd->od_osfs_lock);
- /* cache 1 second */
- if (cfs_time_before_64(osd->od_osfs_age, cfs_time_shift_64(-1))) {
- result = sb->s_op->statfs(sb->s_root, ksfs);
- if (likely(result == 0)) { /* N.B. statfs can't really fail */
- osd->od_osfs_age = cfs_time_current_64();
- statfs_pack(&osd->od_statfs, ksfs);
- if (sb->s_flags & MS_RDONLY)
- sfs->os_state = OS_STATE_READONLY;
- }
+ result = sb->s_op->statfs(sb->s_root, ksfs);
+ if (likely(result == 0)) { /* N.B. statfs can't really fail */
+ statfs_pack(sfs, ksfs);
+ if (sb->s_flags & MS_RDONLY)
+ sfs->os_state = OS_STATE_READONLY;
- if (likely(result == 0))
- *sfs = osd->od_statfs;
spin_unlock(&osd->od_osfs_lock);
spin_unlock(&osd->od_osfs_lock);
- if (unlikely(env == NULL))
+ if (unlikely(env == NULL))
+ /* Reserve a small amount of space for local objects like last_rcvd,
+ * llog, quota files, ... */
+ if (sfs->os_bavail <= GRANT_FOR_LOCAL_OIDS) {
+ sfs->os_bavail = 0;
+ } else {
+ sfs->os_bavail -= GRANT_FOR_LOCAL_OIDS;
+ /** Take out metadata overhead for indirect blocks */
+ sfs->os_bavail -= sfs->os_bavail >> (sb->s_blocksize_bits - 3);
+ }
+
spin_lock_init(&o->od_osfs_lock);
mutex_init(&o->od_otable_mutex);
spin_lock_init(&o->od_osfs_lock);
mutex_init(&o->od_otable_mutex);
- o->od_osfs_age = cfs_time_shift_64(-1000);
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL)
o->od_capa_hash = init_capa_hash();
if (o->od_capa_hash == NULL)
cfs_proc_dir_entry_t *od_proc_entry;
struct lprocfs_stats *od_stats;
cfs_proc_dir_entry_t *od_proc_entry;
struct lprocfs_stats *od_stats;
- /*
- * statfs optimization: we cache a bit.
- */
- cfs_time_t od_osfs_age;
- struct obd_statfs od_statfs;
spinlock_t od_osfs_lock;
int od_connects;
spinlock_t od_osfs_lock;
int od_connects;
#run_test 62 "verify obd_match failure doesn't LBUG (should -EIO)"
# bug 2319 - oig_wait() interrupted causes crash because of invalid waitq.
#run_test 62 "verify obd_match failure doesn't LBUG (should -EIO)"
# bug 2319 - oig_wait() interrupted causes crash because of invalid waitq.
-test_63a() { # was test_63
+# Though this test is irrelevant anymore, it helped to reveal some
+# other grant bugs (LU-4482), let's keep it.
+test_63a() { # was test_63
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
MAX_DIRTY_MB=`lctl get_param -n osc.*.max_dirty_mb | head -n 1`
[ $PARALLEL == "yes" ] && skip "skip parallel run" && return
MAX_DIRTY_MB=`lctl get_param -n osc.*.max_dirty_mb | head -n 1`
- lctl set_param -n osc.*.max_dirty_mb 0
for i in `seq 10` ; do
dd if=/dev/zero of=$DIR/f63 bs=8k &
sleep 5
for i in `seq 10` ; do
dd if=/dev/zero of=$DIR/f63 bs=8k &
sleep 5
- lctl set_param -n osc.*.max_dirty_mb $MAX_DIRTY_MB
rm -f $DIR/f63 || true
}
run_test 63a "Verify oig_wait interruption does not crash ======="
rm -f $DIR/f63 || true
}
run_test 63a "Verify oig_wait interruption does not crash ======="