])
#
+# 2.6.38 vfsmount.mnt_count doesn't use atomic_t
+#
+AC_DEFUN([LC_ATOMIC_MNT_COUNT],
+[AC_MSG_CHECKING([if vfsmount.mnt_count is atomic_t])
+LB_LINUX_TRY_COMPILE([
+ #include <asm/atomic.h>
+ #include <linux/fs.h>
+ #include <linux/mount.h>
+],[
+ ((struct vfsmount *)0)->mnt_count = ((atomic_t) { 0 });
+],[
+ AC_DEFINE(HAVE_ATOMIC_MNT_COUNT, 1,
+ [vfsmount.mnt_count is atomic_t])
+ AC_MSG_RESULT([yes])
+],[
+ AC_MSG_RESULT([no])
+])
+])
+
+#
# 2.6.39 remove unplug_fn from request_queue.
#
AC_DEFUN([LC_REQUEST_QUEUE_UNPLUG_FN],
LC_SBOPS_EVICT_INODE
# 2.6.38
+ LC_ATOMIC_MNT_COUNT
LC_BLKDEV_GET_BY_DEV
LC_GENERIC_PERMISSION
#define ll_kern_mount(fstype, flags, name, data) do_kern_mount((fstype), (flags), (name), (data))
#endif
+#ifndef HAVE_ATOMIC_MNT_COUNT
+static inline unsigned int mnt_get_count(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+ unsigned int count = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count;
+ }
+
+ return count;
+#else
+ return mnt->mnt_count;
+#endif
+}
+#else
+# define mnt_get_count(mnt) cfs_atomic_read(&mnt->mnt_count)
+#endif
+
#ifdef HAVE_STATFS_DENTRY_PARAM
#define ll_do_statfs(sb, sfs) (sb)->s_op->statfs((sb)->s_root, (sfs))
#else
cfs_up(&lustre_mount_info_lock);
CDEBUG(D_MOUNT, "reg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
RETURN(0);
}
}
CDEBUG(D_MOUNT, "dereg_mnt %p from %s, vfscount=%d\n",
- lmi->lmi_mnt, name, cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ lmi->lmi_mnt, name, mnt_get_count(lmi->lmi_mnt));
OBD_FREE(lmi->lmi_name, strlen(lmi->lmi_name) + 1);
cfs_list_del(&lmi->lmi_list_chain);
CDEBUG(D_MOUNT, "get_mnt %p from %s, refs=%d, vfscount=%d\n",
lmi->lmi_mnt, name, cfs_atomic_read(&lsi->lsi_mounts),
- cfs_atomic_read(&lmi->lmi_mnt->mnt_count));
+ mnt_get_count(lmi->lmi_mnt));
RETURN(lmi);
}
{
struct lustre_mount_info *lmi;
struct lustre_sb_info *lsi;
- int count = atomic_read(&mnt->mnt_count) - 1;
+ int count = mnt_get_count(mnt) - 1;
ENTRY;
/* This might be the last one, can't deref after this */
cfs_waitq_init(&waitq);
- while (atomic_read(&mnt->mnt_count) > 1) {
+ while (mnt_get_count(mnt) > 1) {
if (waited && (waited % 30 == 0))
LCONSOLE_WARN("Mount still busy with %d refs after "
"%d secs.\n",
- atomic_read(&mnt->mnt_count),
+ mnt_get_count(mnt),
waited);
/* Cannot use l_event_wait() for an interruptible sleep. */
waited += 3;
blocked = cfs_block_sigsinv(sigmask(SIGKILL));
cfs_waitq_wait_event_interruptible_timeout(
waitq,
- (atomic_read(&mnt->mnt_count) == 1),
+ (mnt_get_count(mnt) == 1),
cfs_time_seconds(3),
rc);
cfs_block_sigs(blocked);
if (rc < 0) {
LCONSOLE_EMERG("Danger: interrupted umount %s with "
"%d refs!\n", mnt->mnt_devname,
- atomic_read(&mnt->mnt_count));
+ mnt_get_count(mnt));
break;
}