Whamcloud - gitweb
b=21122 fix a race between page fault and lock cancel.
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
index ec56b96..c955661 100644 (file)
@@ -1,25 +1,42 @@
 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
  * vim:expandtab:shiftwidth=8:tabstop=8:
  *
- *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * GPL HEADER START
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
  */
 
+#ifndef AUTOCONF_INCLUDED
 #include <linux/config.h>
+#endif
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/fs.h>
 #include <linux/stat.h>
 #include <asm/uaccess.h>
-#include <asm/segment.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/smp_lock.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
-
-#include <linux/pagevec.h>
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
+//#include <lustre_mdc.h>
+#include <lustre_lite.h>
 #include "llite_internal.h"
 #include <linux/lustre_compat25.h>
 
+#define VMA_DEBUG(vma, fmt, arg...)                                     \
+        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
+               "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
+               vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
+               vma->vm_file->f_dentry->d_inode->i_ino,                       \
+               vma->vm_file->f_dentry->d_iname, ## arg);                     \
 
-struct ll_lock_tree_node {
-        rb_node_t               lt_node;
-        struct list_head        lt_locked_item;
-        __u64                   lt_oid;
-        ldlm_policy_data_t      lt_policy;
-        struct lustre_handle    lt_lockh;
-        ldlm_mode_t             lt_mode;
-};
-
-__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
-                      unsigned long addr, size_t count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int *type);
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused);
-#endif
-
-struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
-                                              __u64 end, ldlm_mode_t mode)
-{
-        struct ll_lock_tree_node *node;
-
-        OBD_ALLOC(node, sizeof(*node));
-        if (node == NULL)
-                RETURN(ERR_PTR(-ENOMEM));
-
-        node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
-        node->lt_policy.l_extent.start = start;
-        node->lt_policy.l_extent.end = end;
-        memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
-        INIT_LIST_HEAD(&node->lt_locked_item);
-        node->lt_mode = mode;
-
-        return node;
-}
-
-int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
-{
-        /* XXX remove this assert when we really want to use this function
-         * to compare different file's region */
-        LASSERT(one->lt_oid == two->lt_oid);
-
-        if ( one->lt_oid < two->lt_oid)
-                return -1;
-        if ( one->lt_oid > two->lt_oid)
-                return 1;
-
-        if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
-                return -1;
-        if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
-                return 1;
-
-        return 0; /* they are the same object and overlap */
-}
+static struct vm_operations_struct ll_file_vm_ops;
 
-static void lt_merge(struct ll_lock_tree_node *dst,
-                     struct ll_lock_tree_node *src)
+void policy_from_vma(ldlm_policy_data_t *policy,
+                            struct vm_area_struct *vma, unsigned long addr,
+                            size_t count)
 {
-        dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
-                                            src->lt_policy.l_extent.start);
-        dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
-                                          src->lt_policy.l_extent.end);
-
-        /* XXX could be a real call to the dlm to find superset modes */
-        if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
-                dst->lt_mode = LCK_PW;
+        policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+                                 (vma->vm_pgoff << CFS_PAGE_SHIFT);
+        policy->l_extent.end = (policy->l_extent.start + count - 1) |
+                               ~CFS_PAGE_MASK;
 }
 
-static void lt_insert(struct ll_lock_tree *tree,
-                      struct ll_lock_tree_node *node)
+struct vm_area_struct * our_vma(unsigned long addr, size_t count)
 {
-        struct ll_lock_tree_node *walk;
-        rb_node_t **p, *parent;
+        struct mm_struct *mm = current->mm;
+        struct vm_area_struct *vma, *ret = NULL;
         ENTRY;
 
-restart:
-        p = &tree->lt_root.rb_node;
-        parent = NULL;
-        while (*p) {
-                parent = *p;
-                walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
-                switch (lt_compare(node, walk)) {
-                case -1:
-                        p = &(*p)->rb_left;
-                        break;
-                case 1:
-                        p = &(*p)->rb_right;
-                        break;
-                case 0:
-                        lt_merge(node, walk);
-                        rb_erase(&walk->lt_node, &tree->lt_root);
-                        OBD_FREE(walk, sizeof(*walk));
-                        goto restart;
-                        break;
-                default:
-                        LBUG();
-                        break;
-                }
-        }
-        rb_link_node(&node->lt_node, parent, p);
-        rb_insert_color(&node->lt_node, &tree->lt_root);
-        EXIT;
-}
+        /* No MM (e.g. NFS)? No vmas too. */
+        if (!mm)
+                RETURN(NULL);
 
-static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
-{
-        rb_node_t *rbnode;
-        struct ll_lock_tree_node *node = NULL;
-
-        for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
-              rbnode = rbnode->rb_left) {
-                if (rbnode->rb_left == NULL) {
-                        node = rb_entry(rbnode, struct ll_lock_tree_node,
-                                        lt_node);
+        spin_lock(&mm->page_table_lock);
+        for(vma = find_vma(mm, addr);
+            vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
+                if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
+                    vma->vm_flags & VM_SHARED) {
+                        ret = vma;
                         break;
                 }
         }
-        RETURN(node);
+        spin_unlock(&mm->page_table_lock);
+        RETURN(ret);
 }
 
-int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
+/**
+ * API independent part for page fault initialization.
+ * \param vma - virtual memory area addressed to page fault
+ * \param env - corespondent lu_env to processing
+ * \param nest - nested level
+ * \param index - page index corespondent to fault.
+ * \parm ra_flags - vma readahead flags.
+ *
+ * \return allocated and initialized env for fault operation.
+ * \retval EINVAL if env can't allocated
+ * \return other error codes from cl_io_init.
+ */
+int ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
+                     struct cl_env_nest *nest, pgoff_t index, unsigned long *ra_flags)
 {
-        struct ll_lock_tree_node *node;
-        struct list_head *pos, *n;
-        int rc = 0;
+        struct file       *file  = vma->vm_file;
+        struct inode      *inode = file->f_dentry->d_inode;
+        const unsigned long writable = VM_SHARED|VM_WRITE;
+        struct cl_io      *io;
+        struct cl_fault_io *fio;
+        struct lu_env     *env;
         ENTRY;
 
-        list_for_each_safe(pos, n, &tree->lt_locked_list) {
-                node = list_entry(pos, struct ll_lock_tree_node,
-                                  lt_locked_item);
+        if (ll_file_nolock(file))
+                RETURN(-EOPNOTSUPP);
 
-                rc = ll_extent_unlock(tree->lt_fd, inode,
-                                      ll_i2info(inode)->lli_smd, node->lt_mode,
-                                      &node->lt_lockh);
-                if (rc != 0) {
-                        /* XXX better message */
-                        CERROR("couldn't unlock %d\n", rc);
-                }
-                list_del(&node->lt_locked_item);
-                OBD_FREE(node, sizeof(*node));
+        /*
+         * page fault can be called when lustre IO is
+         * already active for the current thread, e.g., when doing read/write
+         * against user level buffer mapped from Lustre buffer. To avoid
+         * stomping on existing context, optionally force an allocation of a new
+         * one.
+         */
+        env = cl_env_nested_get(nest);
+        if (IS_ERR(env)) {
+                *env_ret = NULL;
+                 RETURN(-EINVAL);
         }
 
-        while ((node = lt_least_node(tree))) {
-                rb_erase(&node->lt_node, &tree->lt_root);
-                OBD_FREE(node, sizeof(*node));
-        }
+        *env_ret = env;
 
-        RETURN(rc);
-}
+        io = &ccc_env_info(env)->cti_io;
+        io->ci_obj = ll_i2info(inode)->lli_clob;
+        LASSERT(io->ci_obj != NULL);
 
-int ll_tree_lock(struct ll_lock_tree *tree,
-                 struct ll_lock_tree_node *first_node, struct inode *inode,
-                 const char *buf, size_t count, int ast_flags)
-{
-        struct ll_lock_tree_node *node;
-        int rc = 0;
-        ENTRY;
+        fio = &io->u.ci_fault;
+        fio->ft_index      = index;
+        fio->ft_writable   = (vma->vm_flags&writable) == writable;
+        fio->ft_executable = vma->vm_flags&VM_EXEC;
 
-        tree->lt_root.rb_node = NULL;
-        INIT_LIST_HEAD(&tree->lt_locked_list);
-        if (first_node != NULL)
-                lt_insert(tree, first_node);
-
-        /* order locking. what we have to concern about is ONLY double lock:
-         * the buffer is mapped to exactly this file. */
-        if (mapping_mapped(inode->i_mapping)) {
-                rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
-                if (rc)
-                        GOTO(out, rc);
-        }
+        /*
+         * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
+         * the kernel will not read other pages not covered by ldlm in
+         * filemap_nopage. we do our readahead in ll_readpage.
+         */
+        *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+        vma->vm_flags &= ~VM_SEQ_READ;
+        vma->vm_flags |= VM_RAND_READ;
 
-        while ((node = lt_least_node(tree))) {
-                struct obd_service_time *stime;
-                stime = (node->lt_mode & LCK_PW) ?
-                        &ll_i2sbi(inode)->ll_write_stime :
-                        &ll_i2sbi(inode)->ll_read_stime;
-
-                rc = ll_extent_lock(tree->lt_fd, inode,
-                                    ll_i2info(inode)->lli_smd, node->lt_mode,
-                                    &node->lt_policy, &node->lt_lockh,
-                                    ast_flags, stime);
-                if (rc != 0)
-                        GOTO(out, rc);
-
-                rb_erase(&node->lt_node, &tree->lt_root);
-                list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
-        }
-        RETURN(rc);
-out:
-        ll_tree_unlock(tree, inode);
-        return rc;
-}
+        CDEBUG(D_INFO, "vm_flags: %lx (%lu %d %d)\n", vma->vm_flags,
+               fio->ft_index, fio->ft_writable, fio->ft_executable);
 
-static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
-{
-        /* we only want to hold PW locks if the mmap() can generate
-         * writes back to the file and that only happens in shared
-         * writable vmas */
-        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
-                return LCK_PW;
-        return LCK_PR;
-}
+        if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
+                struct ccc_io *cio = ccc_env_io(env);
+                struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 
-static void policy_from_vma(ldlm_policy_data_t *policy,
-                            struct vm_area_struct *vma, unsigned long addr,
-                            size_t count)
-{
-        policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
-                                 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
-        policy->l_extent.end = (policy->l_extent.start + count - 1) |
-                               (PAGE_CACHE_SIZE - 1);
-}
+                LASSERT(cio->cui_cl.cis_io == io);
 
-static struct vm_area_struct *our_vma(unsigned long addr, size_t count,
-                                       struct inode *inode)
-{
-        struct mm_struct *mm = current->mm;
-        struct vm_area_struct *vma, *ret = NULL;
-        ENTRY;
+                /* mmap lock must be MANDATORY
+                 * it has to cache pages. */
+                io->ci_lockreq = CILR_MANDATORY;
 
-        spin_lock(&mm->page_table_lock);
-        for(vma = find_vma(mm, addr);
-            vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
-                if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage &&
-                    vma->vm_file && vma->vm_file->f_dentry->d_inode == inode) {
-                        ret = vma;
-                        break;
-                }
+                cio->cui_fd  = fd;
         }
-        spin_unlock(&mm->page_table_lock);
-        RETURN(ret);
+
+        return io->ci_result;
 }
 
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
-                      unsigned long addr, size_t count)
+#ifndef HAVE_VM_OP_FAULT
+/**
+ * Lustre implementation of a vm_operations_struct::nopage() method, called by
+ * VM to server page fault (both in kernel and user space).
+ *
+ * This function sets up CIT_FAULT cl_io that does the job.
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param address - address when hit fault
+ * \param type - of fault
+ *
+ * \return allocated and filled _unlocked_ page for address
+ * \retval NOPAGE_SIGBUS if page not exist on this address
+ * \retval NOPAGE_OOM not have memory for allocate new page
+ */
+struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
+                       int *type)
 {
-        struct vm_area_struct *vma;
-        struct ll_lock_tree_node *node;
-        ldlm_policy_data_t policy;
+        struct lu_env           *env;
+        struct cl_env_nest      nest;
+        struct cl_io            *io;
+        struct page             *page;
+        struct vvp_io           *vio;
+        unsigned long           ra_flags;
+        pgoff_t                 pg_offset;
+        int                     result;
         ENTRY;
 
-        if (count == 0)
-                RETURN(0);
+        pg_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
+        result = ll_fault_io_init(vma, &env,  &nest, pg_offset, &ra_flags);
+        if (env == NULL)
+                return NOPAGE_SIGBUS;
 
-        /* we need to look up vmas on page aligned addresses */
-        count += addr & (PAGE_SIZE - 1);
-        addr -= addr & (PAGE_SIZE - 1);
+        io = &ccc_env_info(env)->cti_io;
+        if (result < 0)
+                goto out_err;
 
-        while ((vma = our_vma(addr, count, inode)) != NULL) {
+        vio = vvp_env_io(env);
+        vio->u.fault.ft_vma            = vma;
+        vio->u.fault.ft_vmpage         = NULL;
+        vio->u.fault.nopage.ft_address = address;
+        vio->u.fault.nopage.ft_type    = type;
 
-                policy_from_vma(&policy, vma, addr, count);
-                node = ll_node_from_inode(inode, policy.l_extent.start,
-                                          policy.l_extent.end,
-                                          mode_from_vma(vma));
-                if (IS_ERR(node)) {
-                        CERROR("not enough mem for lock_tree_node!\n");
-                        RETURN(-ENOMEM);
-                }
-                lt_insert(tree, node);
+        result = cl_io_loop(env, io);
 
-                if (vma->vm_end - addr >= count)
-                        break;
-                count -= vma->vm_end - addr;
-                addr = vma->vm_end;
+        page = vio->u.fault.ft_vmpage;
+        if (page != NULL) {
+                LASSERT(PageLocked(page));
+                unlock_page(page);
+
+                if (result != 0)
+                        page_cache_release(page);
         }
-        RETURN(0);
+
+        LASSERT(ergo(result == 0, io->u.ci_fault.ft_page != NULL));
+out_err:
+        if (result != 0)
+                page = result == -ENOMEM ? NOPAGE_OOM : NOPAGE_SIGBUS;
+
+        vma->vm_flags &= ~VM_RAND_READ;
+        vma->vm_flags |= ra_flags;
+
+        cl_io_fini(env, io);
+        cl_env_nested_put(&nest, env);
+
+        RETURN(page);
 }
-/* FIXME: there is a pagefault race goes as follow:
- * 1. A user process on node A accesses a portion of a mapped file,
- *    resulting in a page fault.  The pagefault handler invokes the
- *    ll_nopage function, which reads the page into memory.
- * 2. A user process on node B writes to the same portion of the file
- *    (either via mmap or write()), that cause node A to cancel the
- *    lock and truncate the page.
- * 3. Node A then executes the rest of do_no_page(), entering the
- *    now-invalid page into the PTEs.
+#else
+/**
+ * Lustre implementation of a vm_operations_struct::fault() method, called by
+ * VM to server page fault (both in kernel and user space).
+ *
+ * \param vma - is virtiual area struct related to page fault
+ * \param vmf - structure which describe type and address where hit fault
  *
- * Make the whole do_no_page as a hook to cover both the page cache
- * and page mapping installing with dlm lock would eliminate this race.
+ * \return allocated and filled _locked_ page for address
+ * \retval VM_FAULT_ERROR on general error
+ * \retval NOPAGE_OOM not have memory for allocate new page
  */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int *type)
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused)
-#endif
+int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-        struct file *filp = vma->vm_file;
-        struct ll_file_data *fd = filp->private_data;
-        struct inode *inode = filp->f_dentry->d_inode;
-        struct lustre_handle lockh = { 0 };
-        ldlm_policy_data_t policy;
-        ldlm_mode_t mode;
-        struct page *page = NULL;
-        struct ll_inode_info *lli = ll_i2info(inode);
-        struct obd_service_time *stime;
-        __u64 kms;
-        unsigned long pgoff, size, rand_read, seq_read;
-        int rc = 0;
+        struct lu_env           *env;
+        struct cl_io            *io;
+        struct vvp_io           *vio;
+        unsigned long            ra_flags;
+        struct cl_env_nest       nest;
+        int                      result;
+        int                      fault_ret = 0;
         ENTRY;
 
-        if (lli->lli_smd == NULL) {
-                CERROR("No lsm on fault?\n");
-                RETURN(NULL);
-        }
+        result = ll_fault_io_init(vma, &env,  &nest, vmf->pgoff, &ra_flags);
+        if (env == NULL)
+                RETURN(VM_FAULT_ERROR);
 
-        /* start and end the lock on the first and last bytes in the page */
-        policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
+        io = &ccc_env_info(env)->cti_io;
+        if (result < 0)
+                goto out_err;
 
-        CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
-               vma, inode->i_ino, policy.l_extent.start, policy.l_extent.end);
+        vio = vvp_env_io(env);
+        vio->u.fault.ft_vma       = vma;
+        vio->u.fault.ft_vmpage    = NULL;
+        vio->u.fault.fault.ft_vmf = vmf;
 
-        mode = mode_from_vma(vma);
-        stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
-                                  &ll_i2sbi(inode)->ll_read_stime;
-        
-        rc = ll_extent_lock(fd, inode, lli->lli_smd, mode, &policy,
-                            &lockh, LDLM_FL_CBPENDING, stime);
-        if (rc != 0)
-                RETURN(NULL);
+        result = cl_io_loop(env, io);
+        if (unlikely(result != 0 && vio->u.fault.ft_vmpage != NULL)) {
+                struct page *vmpage = vio->u.fault.ft_vmpage;
 
-        /* XXX change inode size without i_sem hold! there is a race condition
-         *     with truncate path. (see ll_extent_lock) */
-        down(&lli->lli_size_sem);
-        kms = lov_merge_size(lli->lli_smd, 1);
-        pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
-        size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-
-        if (pgoff >= size) {
-                up(&lli->lli_size_sem);
-                ll_glimpse_size(inode);
-        } else {
-                inode->i_size = kms;
-                up(&lli->lli_size_sem);
+                LASSERT((vio->u.fault.fault.ft_flags & VM_FAULT_LOCKED) &&
+                        PageLocked(vmpage));
+                unlock_page(vmpage);
+                page_cache_release(vmpage);
+                vmf->page = NULL;
         }
 
-        /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
-         * the kernel will not read other pages not covered by ldlm in
-         * filemap_nopage. we do our readahead in ll_readpage.
-         */
-        rand_read = vma->vm_flags & VM_RAND_READ;
-        seq_read = vma->vm_flags & VM_SEQ_READ;
-        vma->vm_flags &= ~ VM_SEQ_READ;
-        vma->vm_flags |= VM_RAND_READ;
+        fault_ret = vio->u.fault.fault.ft_flags;
+out_err:
+        if (result != 0)
+                fault_ret |= VM_FAULT_ERROR;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-        page = filemap_nopage(vma, address, type);
-#else
-        page = filemap_nopage(vma, address, unused);
-#endif
-        vma->vm_flags &= ~VM_RAND_READ;
-        vma->vm_flags |= (rand_read | seq_read);
+        vma->vm_flags |= ra_flags;
 
-        ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
-        RETURN(page);
+        cl_io_fini(env, io);
+        cl_env_nested_put(&nest, env);
+
+        RETURN(fault_ret);
 }
 
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma,
-                                         __u64 byte)
+#endif
+
+/**
+ *  To avoid cancel the locks covering mmapped region for lock cache pressure,
+ *  we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ */
+static void ll_vm_open(struct vm_area_struct * vma)
 {
-        return vma->vm_start +
-               (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
+        struct inode *inode    = vma->vm_file->f_dentry->d_inode;
+        struct ccc_object *vob = cl_inode2ccc(inode);
+
+        ENTRY;
+        LASSERT(vma->vm_file);
+        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
+        cfs_atomic_inc(&vob->cob_mmap_cnt);
+        EXIT;
 }
 
-#define VMA_DEBUG(vma, fmt, arg...)                                          \
-        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p) "   \
-               "ino(%lu) iname(%s): " fmt, vma, vma->vm_start, vma->vm_end,  \
-               vma->vm_pgoff, vma->vm_file->f_dentry->d_inode,               \
-               vma->vm_file->f_dentry->d_inode->i_ino,                       \
-               vma->vm_file->f_dentry->d_iname, ## arg);                     \
+/**
+ * Dual to ll_vm_open().
+ */
+static void ll_vm_close(struct vm_area_struct *vma)
+{
+        struct inode      *inode = vma->vm_file->f_dentry->d_inode;
+        struct ccc_object *vob   = cl_inode2ccc(inode);
+
+        ENTRY;
+        LASSERT(vma->vm_file);
+        cfs_atomic_dec(&vob->cob_mmap_cnt);
+        LASSERT(cfs_atomic_read(&vob->cob_mmap_cnt) >= 0);
+        EXIT;
+}
 
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-/* [first, last] are the byte offsets affected.
- * vm_{start, end} are user addresses of the first byte of the mapping and
- *      the next byte beyond it
- * vm_pgoff is the page index of the first byte in the mapping */
-static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
-                          __u64 last)
+#ifndef HAVE_VM_OP_FAULT
+#ifndef HAVE_FILEMAP_POPULATE
+static int (*filemap_populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+#endif
+static int ll_populate(struct vm_area_struct *area, unsigned long address,
+                       unsigned long len, pgprot_t prot, unsigned long pgoff,
+                       int nonblock)
 {
-        unsigned long address, len;
-        for (; vma ; vma = vma->vm_next_share) {
-                if (last >> PAGE_SHIFT < vma->vm_pgoff)
-                        continue;
-                if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
-                    ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
-                        continue;
-
-                /* XXX in case of unmap the cow pages of a running file,
-                 * don't unmap these private writeable mapping here!
-                 * though that will break private mappping a little.
-                 *
-                 * the clean way is to check the mapping of every page
-                 * and just unmap the non-cow pages, just like
-                 * unmap_mapping_range() with even_cow=0 in kernel 2.6.
-                 */
-                if (!(vma->vm_flags & VM_SHARED) &&
-                    (vma->vm_flags & VM_WRITE))
-                        continue;
-
-                address = max((unsigned long)vma->vm_start, 
-                              file_to_user(vma, first));
-                len = min((unsigned long)vma->vm_end,
-                          file_to_user(vma, last) + 1) - address;
-
-                VMA_DEBUG(vma, "zapping vma [first="LPU64" last="LPU64" "
-                          "address=%ld len=%ld]\n", first, last, address, len);
-                LASSERT(len > 0);
-                ll_zap_page_range(vma, address, len);
-        }
+        int rc = 0;
+        ENTRY;
+
+        /* always set nonblock as true to avoid page read ahead */
+        rc = filemap_populate(area, address, len, prot, pgoff, 1);
+        RETURN(rc);
 }
 #endif
 
+/* return the user space pointer that maps to a file offset via a vma */
+static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
+{
+        return vma->vm_start + (byte - ((__u64)vma->vm_pgoff << CFS_PAGE_SHIFT));
+
+}
+
 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
  * nopage's reference passing to the pte */
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
-                       __u64 last)
+int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 {
         int rc = -ENOENT;
         ENTRY;
 
         LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
         if (mapping_mapped(mapping)) {
                 rc = 0;
-                unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
+                unmap_mapping_range(mapping, first + CFS_PAGE_SIZE - 1,
                                     last - first + 1, 0);
         }
-#else
-        spin_lock(&mapping->i_shared_lock);
-        if (mapping->i_mmap != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap, first, last);
-        }
-        if (mapping->i_mmap_shared != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap_shared, first, last);
-        }
-        spin_unlock(&mapping->i_shared_lock);
-#endif
 
         RETURN(rc);
 }
 
-
-static void ll_close_vma(struct vm_area_struct *vma)
-{
-        struct inode *inode = vma->vm_file->f_dentry->d_inode;
-        struct address_space *mapping = inode->i_mapping;
-        unsigned long next, size, end;
-        struct ll_async_page *llap;
-        struct obd_export *exp;
-        struct pagevec pvec;
-        int i;
-        
-        if (!(vma->vm_flags & VM_SHARED))
-                return;
-
-        /* all pte's are synced to mem_map by the moment
-         * we scan backing store and put all dirty pages
-         * onto pending list to track flushing */
-        
-        LASSERT(LLI_DIRTY_HANDLE(inode));
-        exp = ll_i2dtexp(inode);
-        if (exp == NULL) {
-                CERROR("can't get export for the inode\n");
-                return;
-        }
-        
-       pagevec_init(&pvec, 0);
-        next = vma->vm_pgoff;
-        size = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
-        end = next + size - 1;
-
-        CDEBUG(D_INODE, "close vma 0x%p[%lu/%lu/%lu from %lu/%u]\n", vma,
-               next, size, end, inode->i_ino, inode->i_generation);
-
-        while (next <= end && pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
-                for (i = 0; i < pagevec_count(&pvec); i++) {
-                        struct page *page = pvec.pages[i];
-
-                        if (page->index > next)
-                                next = page->index;
-                        if (next > end)
-                                continue;
-                        next++;
-
-                        lock_page(page);
-                        if (page->mapping != mapping || !PageDirty(page)) {
-                                unlock_page(page);
-                                continue;
-                        }
-
-                        llap = llap_from_page(page, LLAP_ORIGIN_COMMIT_WRITE);
-                        if (IS_ERR(llap)) {
-                                CERROR("can't get llap\n");
-                                unlock_page(page);
-                                continue;
-                        }
-
-                        llap_write_pending(inode, llap);
-                        unlock_page(page);
-                }
-                pagevec_release(&pvec);
-        }
-}
-
 static struct vm_operations_struct ll_file_vm_ops = {
+#ifndef HAVE_VM_OP_FAULT
         .nopage         = ll_nopage,
-        .close          = ll_close_vma,
+        .populate       = ll_populate,
+
+#else
+        .fault          = ll_fault,
+#endif
+        .open           = ll_vm_open,
+        .close          = ll_vm_close,
 };
 
-int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
+int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
 {
+        struct inode *inode = file->f_dentry->d_inode;
         int rc;
         ENTRY;
 
+        if (ll_file_nolock(file))
+                RETURN(-EOPNOTSUPP);
+
+        ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
         rc = generic_file_mmap(file, vma);
         if (rc == 0) {
-                struct ll_inode_info *lli = ll_i2info(file->f_dentry->d_inode);
+#if !defined(HAVE_FILEMAP_POPULATE) && !defined(HAVE_VM_OP_FAULT)
+                if (!filemap_populate)
+                        filemap_populate = vma->vm_ops->populate;
+#endif
                 vma->vm_ops = &ll_file_vm_ops;
-                /* mark i/o epoch dirty */
-                if (vma->vm_flags & VM_SHARED)
-                        set_bit(LLI_F_DIRTY_HANDLE, &lli->lli_flags);
+                vma->vm_ops->open(vma);
+                /* update the inode's size and mtime */
+                rc = cl_glimpse_size(inode);
         }
 
         RETURN(rc);
 }
-