Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
index 26bac6c..025ad7b 100644 (file)
@@ -19,7 +19,9 @@
  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#ifndef AUTOCONF_INCLUDED
 #include <linux/config.h>
+#endif
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
+//#include <lustre_mdc.h>
+#include <lustre_lite.h>
 #include "llite_internal.h"
 #include <linux/lustre_compat25.h>
 
+#define VMA_DEBUG(vma, fmt, arg...)                                     \
+        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
+               "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
+               vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
+               vma->vm_file->f_dentry->d_inode->i_ino,                       \
+               vma->vm_file->f_dentry->d_iname, ## arg);                     \
+
 
 struct ll_lock_tree_node {
         rb_node_t               lt_node;
@@ -57,18 +63,14 @@ struct ll_lock_tree_node {
         ldlm_policy_data_t      lt_policy;
         struct lustre_handle    lt_lockh;
         ldlm_mode_t             lt_mode;
+        struct inode           *lt_inode;
 };
 
-__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
+int lt_get_mmap_locks(struct ll_lock_tree *tree,
                       unsigned long addr, size_t count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+
 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
                        int *type);
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused);
-#endif
 
 struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
                                               __u64 end, ldlm_mode_t mode)
@@ -79,6 +81,7 @@ struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
         if (node == NULL)
                 RETURN(ERR_PTR(-ENOMEM));
 
+        node->lt_inode = inode;
         node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
         node->lt_policy.l_extent.start = start;
         node->lt_policy.l_extent.end = end;
@@ -91,18 +94,20 @@ struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
 
 int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
 {
-        /* XXX remove this assert when we really want to use this function
-         * to compare different file's region */
-        LASSERT(one->lt_oid == two->lt_oid);
+        /* To avoid multiple fs deadlock */
+        if (one->lt_inode->i_sb->s_dev < two->lt_inode->i_sb->s_dev)
+                return -1;
+        if (one->lt_inode->i_sb->s_dev > two->lt_inode->i_sb->s_dev)
+                return 1;
 
-        if ( one->lt_oid < two->lt_oid)
+        if (one->lt_oid < two->lt_oid)
                 return -1;
-        if ( one->lt_oid > two->lt_oid)
+        if (one->lt_oid > two->lt_oid)
                 return 1;
 
-        if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
+        if (one->lt_policy.l_extent.end < two->lt_policy.l_extent.start)
                 return -1;
-        if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
+        if (one->lt_policy.l_extent.start > two->lt_policy.l_extent.end)
                 return 1;
 
         return 0; /* they are the same object and overlap */
@@ -173,10 +178,11 @@ static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
         RETURN(node);
 }
 
-int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
+int ll_tree_unlock(struct ll_lock_tree *tree)
 {
         struct ll_lock_tree_node *node;
         struct list_head *pos, *n;
+        struct inode *inode;
         int rc = 0;
         ENTRY;
 
@@ -184,6 +190,7 @@ int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
                 node = list_entry(pos, struct ll_lock_tree_node,
                                   lt_locked_item);
 
+                inode = node->lt_inode;
                 rc = ll_extent_unlock(tree->lt_fd, inode,
                                       ll_i2info(inode)->lli_smd, node->lt_mode,
                                       &node->lt_lockh);
@@ -204,7 +211,7 @@ int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
 }
 
 int ll_tree_lock(struct ll_lock_tree *tree,
-                 struct ll_lock_tree_node *first_node, struct inode *inode,
+                 struct ll_lock_tree_node *first_node,
                  const char *buf, size_t count, int ast_flags)
 {
         struct ll_lock_tree_node *node;
@@ -216,24 +223,19 @@ int ll_tree_lock(struct ll_lock_tree *tree,
         if (first_node != NULL)
                 lt_insert(tree, first_node);
 
-        /* order locking. what we have to concern about is ONLY double lock:
-         * the buffer is mapped to exactly this file. */
-        if (mapping_mapped(inode->i_mapping)) {
-                rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
-                if (rc)
-                        GOTO(out, rc);
-        }
+        /* To avoid such subtle deadlock case: client1 try to read file1 to
+         * mmapped file2, on the same time, client2 try to read file2 to
+         * mmapped file1.*/
+        rc = lt_get_mmap_locks(tree, (unsigned long)buf, count);
+        if (rc)
+                GOTO(out, rc);
 
         while ((node = lt_least_node(tree))) {
-                struct obd_service_time *stime;
-                stime = (node->lt_mode & LCK_PW) ?
-                        &ll_i2sbi(inode)->ll_write_stime :
-                        &ll_i2sbi(inode)->ll_read_stime;
-
+                struct inode *inode = node->lt_inode;
                 rc = ll_extent_lock(tree->lt_fd, inode,
                                     ll_i2info(inode)->lli_smd, node->lt_mode,
                                     &node->lt_policy, &node->lt_lockh,
-                                    ast_flags, stime);
+                                    ast_flags);
                 if (rc != 0)
                         GOTO(out, rc);
 
@@ -242,8 +244,8 @@ int ll_tree_lock(struct ll_lock_tree *tree,
         }
         RETURN(rc);
 out:
-        ll_tree_unlock(tree, inode);
-        return rc;
+        ll_tree_unlock(tree);
+        RETURN(rc);
 }
 
 static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
@@ -260,24 +262,27 @@ static void policy_from_vma(ldlm_policy_data_t *policy,
                             struct vm_area_struct *vma, unsigned long addr,
                             size_t count)
 {
-        policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
-                                 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
+        policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+                                 (vma->vm_pgoff << CFS_PAGE_SHIFT);
         policy->l_extent.end = (policy->l_extent.start + count - 1) |
-                               (PAGE_CACHE_SIZE - 1);
+                               ~CFS_PAGE_MASK;
 }
 
-static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
-                                       struct inode *inode)
+static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
 {
         struct mm_struct *mm = current->mm;
         struct vm_area_struct *vma, *ret = NULL;
         ENTRY;
 
+        /* No MM (e.g. NFS)? No vmas too. */
+        if (!mm)
+                RETURN(NULL);
+
         spin_lock(&mm->page_table_lock);
         for(vma = find_vma(mm, addr);
             vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
                 if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
-                    vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
+                    vma->vm_flags & VM_SHARED) {
                         ret = vma;
                         break;
                 }
@@ -286,23 +291,26 @@ static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
         RETURN(ret);
 }
 
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
+int lt_get_mmap_locks(struct ll_lock_tree *tree,
                       unsigned long addr, size_t count)
 {
         struct vm_area_struct *vma;
         struct ll_lock_tree_node *node;
         ldlm_policy_data_t policy;
+        struct inode *inode;
         ENTRY;
 
         if (count == 0)
                 RETURN(0);
 
         /* we need to look up vmas on page aligned addresses */
-        count += addr & (PAGE_SIZE - 1);
-        addr -= addr & (PAGE_SIZE - 1);
+        count += addr & (~CFS_PAGE_MASK);
+        addr &= CFS_PAGE_MASK;
 
-        while ((vma = our_vma(addr, count, inode)) != NULL) {
+        while ((vma = our_vma(addr, count)) != NULL) {
+                LASSERT(vma->vm_file);
 
+                inode = vma->vm_file->f_dentry->d_inode;
                 policy_from_vma(&policy, vma, addr, count);
                 node = ll_node_from_inode(inode, policy.l_extent.start,
                                           policy.l_extent.end,
@@ -320,7 +328,8 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
         }
         RETURN(0);
 }
-/* FIXME: there is a pagefault race goes as follow:
+
+/* FIXME: there is a pagefault race goes as follow (only 2.4):
  * 1. A user process on node A accesses a portion of a mapped file,
  *    resulting in a page fault.  The pagefault handler invokes the
  *    ll_nopage function, which reads the page into memory.
@@ -332,59 +341,89 @@ int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
  *
  * Make the whole do_no_page as a hook to cover both the page cache
  * and page mapping installing with dlm lock would eliminate this race.
+ *
+ * In 2.6, the truncate_count of address_space can cover this race.
  */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
 struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
                        int *type)
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused)
-#endif
 {
         struct file *filp = vma->vm_file;
-        struct ll_file_data *fd = filp->private_data;
+        struct ll_file_data *fd = LUSTRE_FPRIVATE(filp);
         struct inode *inode = filp->f_dentry->d_inode;
         struct lustre_handle lockh = { 0 };
         ldlm_policy_data_t policy;
         ldlm_mode_t mode;
-        struct page *page;
-        struct obd_service_time *stime;
-        __u64 kms;
+        struct page *page = NULL;
+        struct ll_inode_info *lli = ll_i2info(inode);
+        struct lov_stripe_md *lsm;
+        struct ost_lvb lvb;
+        __u64 kms, old_mtime;
         unsigned long pgoff, size, rand_read, seq_read;
         int rc = 0;
         ENTRY;
 
-        if (ll_i2info(inode)->lli_smd == NULL) {
+        if (lli->lli_smd == NULL) {
                 CERROR("No lsm on fault?\n");
                 RETURN(NULL);
         }
 
         /* start and end the lock on the first and last bytes in the page */
-        policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
+        policy_from_vma(&policy, vma, address, CFS_PAGE_SIZE);
 
         CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
-               vma, inode->i_ino, policy.l_extent.start,
-               policy.l_extent.end);
+               vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
 
         mode = mode_from_vma(vma);
-        stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
-                                  &ll_i2sbi(inode)->ll_read_stime;
-        
-        rc = ll_extent_lock(fd, inode, ll_i2info(inode)->lli_smd, mode, &policy,
-                            &lockh, LDLM_FL_CBPENDING, stime);
+        old_mtime = LTIME_S(inode->i_mtime);
+
+        lsm = lli->lli_smd;
+        rc = ll_extent_lock(fd, inode, lsm, mode, &policy,
+                            &lockh, LDLM_FL_CBPENDING | LDLM_FL_NO_LRU);
         if (rc != 0)
                 RETURN(NULL);
 
-        /* XXX change inode size without i_sem hold! there is a race condition
-         *     with truncate path. (see ll_extent_lock) */
-        kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1);
-        pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
-        size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+        if (vma->vm_flags & VM_EXEC && LTIME_S(inode->i_mtime) != old_mtime)
+                CWARN("binary changed. inode %lu\n", inode->i_ino);
+
+        lov_stripe_lock(lsm);
+        inode_init_lvb(inode, &lvb);
+        obd_merge_lvb(ll_i2dtexp(inode), lsm, &lvb, 1);
+        kms = lvb.lvb_size;
+
+        pgoff = ((address - vma->vm_start) >> CFS_PAGE_SHIFT) + vma->vm_pgoff;
+        size = (kms + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
+
+        if (pgoff >= size) {
+                lov_stripe_unlock(lsm);
+                ll_glimpse_size(inode, LDLM_FL_BLOCK_GRANTED);
+        } else {
+                /* XXX change inode size without ll_inode_size_lock() held!
+                 *     there is a race condition with truncate path. (see
+                 *     ll_extent_lock) */
+               /* region is within kms and, hence, within real file size (A).
+                 * We need to increase i_size to cover the read region so that
+                 * generic_file_read() will do its job, but that doesn't mean
+                 * the kms size is _correct_, it is only the _minimum_ size.
+                 * If someone does a stat they will get the correct size which
+                 * will always be >= the kms value here.  b=11081 */
+                if (i_size_read(inode) < kms) {
+                        i_size_write(inode, kms);
+                        CDEBUG(D_INODE, "ino=%lu, updating i_size %llu\n",
+                               inode->i_ino, i_size_read(inode));
+                }
+                lov_stripe_unlock(lsm);
+        }
 
-        if (pgoff >= size)
-                ll_glimpse_size(inode);
-        else
-                inode->i_size = kms;
+        /* If mapping is writeable, adjust kms to cover this page,
+         * but do not extend kms beyond actual file size.
+         * policy.l_extent.end is set to the end of the page by policy_from_vma
+         * bug 10919 */
+        lov_stripe_lock(lsm);
+        if (mode == LCK_PW)
+                obd_adjust_kms(ll_i2dtexp(inode), lsm,
+                               min_t(loff_t, policy.l_extent.end + 1,
+                               i_size_read(inode)), 0);
+        lov_stripe_unlock(lsm);
 
         /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
          * the kernel will not read other pages not covered by ldlm in
@@ -395,11 +434,9 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         vma->vm_flags &= ~ VM_SEQ_READ;
         vma->vm_flags |= VM_RAND_READ;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
         page = filemap_nopage(vma, address, type);
-#else
-        page = filemap_nopage(vma, address, unused);
-#endif
+        LL_CDEBUG_PAGE(D_PAGE, page, "got addr %lu type %lx\n", address,
+                       (long)type);
         vma->vm_flags &= ~VM_RAND_READ;
         vma->vm_flags |= (rand_read | seq_read);
 
@@ -407,95 +444,114 @@ struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
         RETURN(page);
 }
 
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma,
-                                         __u64 byte)
+/* To avoid cancel the locks covering mmapped region for lock cache pressure,
+ * we track the mapped vma count by lli_mmap_cnt.
+ * ll_vm_open():  when first vma is linked, split locks from lru.
+ * ll_vm_close(): when last vma is unlinked, join all this file's locks to lru.
+ *
+ * XXX we don't check the if the region of vma/lock for performance.
+ */
+static void ll_vm_open(struct vm_area_struct * vma)
 {
-        return vma->vm_start +
-               (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
-}
+        struct inode *inode = vma->vm_file->f_dentry->d_inode;
+        struct ll_inode_info *lli = ll_i2info(inode);
+        ENTRY;
 
-#define VMA_DEBUG(vma, fmt, arg...)                                          \
-        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
-               "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
-               vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
-               vma->vm_file->f_dentry->d_inode->i_ino,                       \
-               vma->vm_file->f_dentry->d_iname, ## arg);                     \
+        LASSERT(vma->vm_file);
+
+        spin_lock(&lli->lli_lock);
+        LASSERT(atomic_read(&lli->lli_mmap_cnt) >= 0);
+
+        atomic_inc(&lli->lli_mmap_cnt);
+        if (atomic_read(&lli->lli_mmap_cnt) == 1) {
+                struct lov_stripe_md *lsm = lli->lli_smd;
+                struct ll_sb_info *sbi = ll_i2sbi(inode);
+                int count;
+
+                spin_unlock(&lli->lli_lock);
+
+                if (!lsm)
+                        return;
+                count = obd_join_lru(sbi->ll_dt_exp, lsm, 0);
+                VMA_DEBUG(vma, "split %d unused locks from lru\n", count);
+        } else {
+                spin_unlock(&lli->lli_lock);
+        }
+
+}
 
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-/* [first, last] are the byte offsets affected.
- * vm_{start, end} are user addresses of the first byte of the mapping and
- *      the next byte beyond it
- * vm_pgoff is the page index of the first byte in the mapping */
-static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
-                          __u64 last)
+static void ll_vm_close(struct vm_area_struct *vma)
 {
-        unsigned long address, len;
-        for (; vma ; vma = vma->vm_next_share) {
-                if (last >> PAGE_SHIFT < vma->vm_pgoff)
-                        continue;
-                if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
-                    ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
-                        continue;
-
-                /* XXX in case of unmap the cow pages of a running file,
-                 * don't unmap these private writeable mapping here!
-                 * though that will break private mappping a little.
-                 *
-                 * the clean way is to check the mapping of every page
-                 * and just unmap the non-cow pages, just like
-                 * unmap_mapping_range() with even_cow=0 in kernel 2.6.
-                 */
-                if (!(vma->vm_flags & VM_SHARED) &&
-                    (vma->vm_flags & VM_WRITE))
-                        continue;
-
-                address = max((unsigned long)vma->vm_start, 
-                              file_to_user(vma, first));
-                len = min((unsigned long)vma->vm_end,
-                          file_to_user(vma, last) + 1) - address;
-
-                VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
-                          "address=%ld len=%ld]\n", first, last, address, len);
-                LASSERT(len > 0);
-                ll_zap_page_range(vma, address, len);
+        struct inode *inode = vma->vm_file->f_dentry->d_inode;
+        struct ll_inode_info *lli = ll_i2info(inode);
+        ENTRY;
+
+        LASSERT(vma->vm_file);
+
+        spin_lock(&lli->lli_lock);
+        LASSERT(atomic_read(&lli->lli_mmap_cnt) > 0);
+
+        atomic_dec(&lli->lli_mmap_cnt);
+        if (atomic_read(&lli->lli_mmap_cnt) == 0) {
+                struct lov_stripe_md *lsm = lli->lli_smd;
+                struct ll_sb_info *sbi = ll_i2sbi(inode);
+                int count;
+
+                spin_unlock(&lli->lli_lock);
+
+                if (!lsm)
+                        return;
+                count = obd_join_lru(sbi->ll_dt_exp, lsm, 1);
+                VMA_DEBUG(vma, "join %d unused locks to lru\n", count);
+        } else {
+                spin_unlock(&lli->lli_lock);
         }
 }
+
+#ifndef HAVE_FILEMAP_POPULATE
+static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
 #endif
+static int ll_populate(struct vm_area_struct *area, unsigned long address,
+                       unsigned long len, pgprot_t prot, unsigned long pgoff,
+                       int nonblock)
+{
+        int rc = 0;
+        ENTRY;
+
+        /* always set nonblock as true to avoid page read ahead */
+        rc = filemap_populate(area, address, len, prot, pgoff, 1);
+        RETURN(rc);
+}
+
+/* return the user space pointer that maps to a file offset via a vma */
+static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
+{
+        return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+
+}
 
 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
  * nopage's reference passing to the pte */
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
-                       __u64 last)
+int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 {
         int rc = -ENOENT;
         ENTRY;
 
-        LASSERT(last > first);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+        LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
         if (mapping_mapped(mapping)) {
                 rc = 0;
-                unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
+                unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
                                     last - first + 1, 0);
         }
-#else
-        spin_lock(&mapping->i_shared_lock);
-        if (mapping->i_mmap != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap, first, last);
-        }
-        if (mapping->i_mmap_shared != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap_shared, first, last);
-        }
-        spin_unlock(&mapping->i_shared_lock);
-#endif
 
         RETURN(rc);
 }
 
 static struct vm_operations_struct ll_file_vm_ops = {
         .nopage         = ll_nopage,
+        .open           = ll_vm_open,
+        .close          = ll_vm_close,
+        .populate       = ll_populate,
 };
 
 int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
@@ -503,10 +559,18 @@ int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
         int rc;
         ENTRY;
 
+        ll_stats_ops_tally(ll_i2sbi(file->f_dentry->d_inode), LPROC_LL_MAP, 1);
         rc = generic_file_mmap(file, vma);
-        if (rc == 0)
+        if (rc == 0) {
+#if !defined(HAVE_FILEMAP_POPULATE)
+                if (!filemap_populate)
+                        filemap_populate = vma->vm_ops->populate;
+#endif
                 vma->vm_ops = &ll_file_vm_ops;
+                vma->vm_ops->open(vma);
+                /* update the inode's size and mtime */
+                rc = ll_glimpse_size(file->f_dentry->d_inode, 0);
+        }
 
         RETURN(rc);
 }
-