+ cfs_spin_lock(&ofd->ofd_osfs_lock);
+ if (cfs_time_before_64(ofd->ofd_osfs_age, max_age) || max_age == 0) {
+ obd_size unstable;
+
+ /* statfs data are too old, get up-to-date one.
+ * we must be cautious here since multiple threads might be
+ * willing to update statfs data concurrently and we must
+ * grant that cached statfs data are always consistent */
+
+ if (ofd->ofd_statfs_inflight == 0)
+ /* clear inflight counter if no users, although it would
+ * take a while to overflow this 64-bit counter ... */
+ ofd->ofd_osfs_inflight = 0;
+ /* notify ofd_grant_commit() that we want to track writes
+ * completed as of now */
+ ofd->ofd_statfs_inflight++;
+ /* record value of inflight counter before running statfs to
+ * compute the diff once statfs is completed */
+ unstable = ofd->ofd_osfs_inflight;
+ cfs_spin_unlock(&ofd->ofd_osfs_lock);
+
+ /* statfs can sleep ... hopefully not for too long since we can
+ * call it fairly often as space fills up */
+ rc = dt_statfs(env, ofd->ofd_osd, osfs);
+ if (unlikely(rc))
+ return rc;
+
+ cfs_spin_lock(&ofd->ofd_grant_lock);
+ cfs_spin_lock(&ofd->ofd_osfs_lock);
+ /* calculate how much space was written while we released the
+ * ofd_osfs_lock */
+ unstable = ofd->ofd_osfs_inflight - unstable;
+ ofd->ofd_osfs_unstable = 0;
+ if (unstable) {
+ /* some writes completed while we were running statfs
+ * w/o the ofd_osfs_lock. Those ones got added to
+ * the cached statfs data that we are about to crunch.
+ * Take them into account in the new statfs data */
+ osfs->os_bavail -= min_t(obd_size, osfs->os_bavail,
+ unstable >> ofd->ofd_blockbits);
+ /* However, we don't really know if those writes got
+ * accounted in the statfs call, so tell
+ * ofd_grant_space_left() there is some uncertainty
+ * on the accounting of those writes.
+ * The purpose is to prevent spurious error messages in
+ * ofd_grant_space_left() since those writes might be
+ * accounted twice. */
+ ofd->ofd_osfs_unstable += unstable;
+ }
+ /* similarly, there is some uncertainty on write requests
+ * between prepare & commit */
+ ofd->ofd_osfs_unstable += ofd->ofd_tot_pending;
+ cfs_spin_unlock(&ofd->ofd_grant_lock);
+
+ /* finally udpate cached statfs data */
+ ofd->ofd_osfs = *osfs;
+ ofd->ofd_osfs_age = cfs_time_current_64();
+
+ ofd->ofd_statfs_inflight--; /* stop tracking */
+ if (ofd->ofd_statfs_inflight == 0)
+ ofd->ofd_osfs_inflight = 0;
+ cfs_spin_unlock(&ofd->ofd_osfs_lock);
+
+ if (from_cache)
+ *from_cache = 0;
+ } else {
+ /* use cached statfs data */
+ *osfs = ofd->ofd_osfs;
+ cfs_spin_unlock(&ofd->ofd_osfs_lock);
+ if (from_cache)
+ *from_cache = 1;
+ }