Whamcloud - gitweb
LU-12631 llite: report latency for filesystem ops
[fs/lustre-release.git] / lustre / llite / llite_mmap.c
index 68af4ac..ead6b39 100644 (file)
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
  *
- *  Copyright (c) 2001-2003 Cluster File Systems, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2016, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
  */
 
-#include <linux/config.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/stat.h>
 #include <linux/errno.h>
-#include <linux/smp_lock.h>
-#include <linux/unistd.h>
-#include <linux/version.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#include <linux/fs.h>
-#include <linux/stat.h>
-#include <asm/uaccess.h>
-#include <asm/segment.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
 #include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-#include <linux/iobuf.h>
-#endif
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#include <linux/lustre_mds.h>
-#include <linux/lustre_lite.h>
 #include "llite_internal.h"
-#include <linux/lustre_compat25.h>
+#include <lustre_compat.h>
 
+static const struct vm_operations_struct ll_file_vm_ops;
 
-struct ll_lock_tree_node {
-        rb_node_t               lt_node;
-        struct list_head        lt_locked_item;
-        __u64                   lt_oid;
-        ldlm_policy_data_t      lt_policy;
-        struct lustre_handle    lt_lockh;
-        ldlm_mode_t             lt_mode;
-};
-
-__u64 lov_merge_size(struct lov_stripe_md *lsm, int kms);
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
-                      unsigned long addr, size_t count);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int *type);
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused);
-#endif
-
-struct ll_lock_tree_node * ll_node_from_inode(struct inode *inode, __u64 start,
-                                              __u64 end, ldlm_mode_t mode)
+void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
+                    unsigned long addr, size_t count)
 {
-        struct ll_lock_tree_node *node;
-
-        OBD_ALLOC(node, sizeof(*node));
-        if (node == NULL)
-                RETURN(ERR_PTR(-ENOMEM));
-
-        node->lt_oid = ll_i2info(inode)->lli_smd->lsm_object_id;
-        node->lt_policy.l_extent.start = start;
-        node->lt_policy.l_extent.end = end;
-        memset(&node->lt_lockh, 0, sizeof(node->lt_lockh));
-        INIT_LIST_HEAD(&node->lt_locked_item);
-        node->lt_mode = mode;
-
-        return node;
+       policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
+                                (vma->vm_pgoff << PAGE_SHIFT);
+       policy->l_extent.end = (policy->l_extent.start + count - 1) |
+                              ~PAGE_MASK;
 }
 
-int lt_compare(struct ll_lock_tree_node *one, struct ll_lock_tree_node *two)
+struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
+                               size_t count)
 {
-        if ( one->lt_oid < two->lt_oid)
-                return -1;
-        if ( one->lt_oid > two->lt_oid)
-                return 1;
-
-        if ( one->lt_policy.l_extent.end < two->lt_policy.l_extent.start )
-                return -1;
-        if ( one->lt_policy.l_extent.start > two->lt_policy.l_extent.end )
-                return 1;
-
-        return 0; /* they are the same object and overlap */
-}
-
-static void lt_merge(struct ll_lock_tree_node *dst,
-                     struct ll_lock_tree_node *src)
-{
-        dst->lt_policy.l_extent.start = min(dst->lt_policy.l_extent.start,
-                                            src->lt_policy.l_extent.start);
-        dst->lt_policy.l_extent.end = max(dst->lt_policy.l_extent.end,
-                                          src->lt_policy.l_extent.end);
-
-        /* XXX could be a real call to the dlm to find superset modes */
-        if (src->lt_mode == LCK_PW && dst->lt_mode != LCK_PW)
-                dst->lt_mode = LCK_PW;
-}
-
-static void lt_insert(struct ll_lock_tree *tree,
-                      struct ll_lock_tree_node *node)
-{
-        struct ll_lock_tree_node *walk;
-        rb_node_t **p, *parent;
+        struct vm_area_struct *vma, *ret = NULL;
         ENTRY;
 
-restart:
-        p = &tree->lt_root.rb_node;
-        parent = NULL;
-        while (*p) {
-                parent = *p;
-                walk = rb_entry(parent, struct ll_lock_tree_node, lt_node);
-                switch (lt_compare(node, walk)) {
-                case -1:
-                        p = &(*p)->rb_left;
-                        break;
-                case 1:
-                        p = &(*p)->rb_right;
-                        break;
-                case 0:
-                        lt_merge(node, walk);
-                        rb_erase(&walk->lt_node, &tree->lt_root);
-                        OBD_FREE(walk, sizeof(*walk));
-                        goto restart;
-                        break;
-                default:
-                        LBUG();
-                        break;
-                }
-        }
-        rb_link_node(&node->lt_node, parent, p);
-        rb_insert_color(&node->lt_node, &tree->lt_root);
-        EXIT;
-}
+        /* mmap_sem must have been held by caller. */
+        LASSERT(!down_write_trylock(&mm->mmap_sem));
 
-static struct ll_lock_tree_node *lt_least_node(struct ll_lock_tree *tree)
-{
-        rb_node_t *rbnode;
-        struct ll_lock_tree_node *node = NULL;
-
-        for ( rbnode = tree->lt_root.rb_node; rbnode != NULL;
-              rbnode = rbnode->rb_left) {
-                if (rbnode->rb_left == NULL) {
-                        node = rb_entry(rbnode, struct ll_lock_tree_node,
-                                        lt_node);
+        for(vma = find_vma(mm, addr);
+            vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
+                if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
+                    vma->vm_flags & VM_SHARED) {
+                        ret = vma;
                         break;
                 }
         }
-        RETURN(node);
+        RETURN(ret);
 }
 
-int ll_tree_unlock(struct ll_lock_tree *tree, struct inode *inode)
+/**
+ * API independent part for page fault initialization.
+ * \param env - corespondent lu_env to processing
+ * \param vma - virtual memory area addressed to page fault
+ * \param index - page index corespondent to fault.
+ * \parm ra_flags - vma readahead flags.
+ *
+ * \return error codes from cl_io_init.
+ */
+static struct cl_io *
+ll_fault_io_init(struct lu_env *env, struct vm_area_struct *vma,
+                pgoff_t index, unsigned long *ra_flags)
 {
-        struct ll_lock_tree_node *node;
-        struct list_head *pos, *n;
-        int rc = 0;
-        ENTRY;
+       struct file            *file = vma->vm_file;
+       struct inode           *inode = file_inode(file);
+       struct cl_io           *io;
+       struct cl_fault_io     *fio;
+       int                     rc;
+       ENTRY;
 
-        list_for_each_safe(pos, n, &tree->lt_locked_list) {
-                node = list_entry(pos, struct ll_lock_tree_node,
-                                  lt_locked_item);
+        if (ll_file_nolock(file))
+                RETURN(ERR_PTR(-EOPNOTSUPP));
 
-                rc = ll_extent_unlock(tree->lt_fd, inode,
-                                      ll_i2info(inode)->lli_smd, node->lt_mode,
-                                      &node->lt_lockh);
-                if (rc != 0) {
-                        /* XXX better message */
-                        CERROR("couldn't unlock %d\n", rc);
-                }
-                list_del(&node->lt_locked_item);
-                OBD_FREE(node, sizeof(*node));
-        }
+restart:
+       io = vvp_env_thread_io(env);
+        io->ci_obj = ll_i2info(inode)->lli_clob;
+        LASSERT(io->ci_obj != NULL);
 
-        while ((node = lt_least_node(tree))) {
-                rb_erase(&node->lt_node, &tree->lt_root);
-                OBD_FREE(node, sizeof(*node));
-        }
+        fio = &io->u.ci_fault;
+        fio->ft_index      = index;
+        fio->ft_executable = vma->vm_flags&VM_EXEC;
 
-        RETURN(rc);
-}
-int ll_tree_lock(struct ll_lock_tree *tree,
-                 struct ll_lock_tree_node *first_node, struct inode *inode,
-                 const char *buf, size_t count, int ast_flags)
-{
-        struct ll_lock_tree_node *node;
-        int rc = 0;
-        ENTRY;
+        /*
+         * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
+         * the kernel will not read other pages not covered by ldlm in
+         * filemap_nopage. we do our readahead in ll_readpage.
+         */
+        if (ra_flags != NULL)
+                *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
+        vma->vm_flags &= ~VM_SEQ_READ;
+        vma->vm_flags |= VM_RAND_READ;
 
-        tree->lt_root.rb_node = NULL;
-        INIT_LIST_HEAD(&tree->lt_locked_list);
-        if (first_node != NULL)
-                lt_insert(tree, first_node);
+        CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
+               fio->ft_index, fio->ft_executable);
 
-        if (mapping_mapped(inode->i_mapping)) {
-                rc = lt_get_mmap_locks(tree, inode, (unsigned long)buf, count);
-                if (rc)
-                        GOTO(out, rc);
-        }
+       rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+       if (rc == 0) {
+               struct vvp_io *vio = vvp_env_io(env);
+               struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
 
-        while ((node = lt_least_node(tree))) {
-                struct obd_service_time *stime;
-                stime = (node->lt_mode & LCK_PW) ?
-                        &ll_i2sbi(inode)->ll_write_stime :
-                        &ll_i2sbi(inode)->ll_read_stime;
-
-                rc = ll_extent_lock(tree->lt_fd, inode,
-                                    ll_i2info(inode)->lli_smd, node->lt_mode,
-                                    &node->lt_policy, &node->lt_lockh,
-                                    ast_flags, stime);
-                if (rc != 0)
-                        GOTO(out, rc);
-
-                rb_erase(&node->lt_node, &tree->lt_root);
-                list_add_tail(&node->lt_locked_item, &tree->lt_locked_list);
-        }
-        RETURN(rc);
-out:
-        ll_tree_unlock(tree, inode);
-        RETURN(rc);
-}
+               LASSERT(vio->vui_cl.cis_io == io);
 
-static ldlm_mode_t mode_from_vma(struct vm_area_struct *vma)
-{
-        /* we only want to hold PW locks if the mmap() can generate
-         * writes back to the file and that only happens in shared
-         * writable vmas */
-        if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
-                return LCK_PW;
-        return LCK_PR;
-}
+               /* mmap lock must be MANDATORY it has to cache
+                * pages. */
+               io->ci_lockreq = CILR_MANDATORY;
+               vio->vui_fd = fd;
+       } else {
+               LASSERT(rc < 0);
+               cl_io_fini(env, io);
+               if (io->ci_need_restart)
+                       goto restart;
 
-static void policy_from_vma(ldlm_policy_data_t *policy,
-                            struct vm_area_struct *vma, unsigned long addr,
-                            size_t count)
-{
-        policy->l_extent.start = ((addr - vma->vm_start) & PAGE_CACHE_MASK) +
-                                 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
-        policy->l_extent.end = (policy->l_extent.start + count - 1) |
-                               (PAGE_CACHE_SIZE - 1);
-}
-static struct vm_area_struct * our_vma(unsigned long addr, size_t count)
-{
-        struct mm_struct *mm = current->mm;
-        struct vm_area_struct *vma, *ret = NULL;
-        ENTRY;
+               io = ERR_PTR(rc);
+       }
 
-        spin_lock(&mm->page_table_lock);
-        for(vma = find_vma(mm, addr);
-            vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
-                if (vma->vm_ops && vma->vm_ops->nopage == ll_nopage) {
-                        ret = vma;
-                        break;
-                }
-        }
-        spin_unlock(&mm->page_table_lock);
-        RETURN(ret);
+       RETURN(io);
 }
 
-int lt_get_mmap_locks(struct ll_lock_tree *tree, struct inode *inode,
-                      unsigned long addr, size_t count)
+/* Sharing code of page_mkwrite method for rhel5 and rhel6 */
+static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
+                            bool *retry)
 {
-        struct vm_area_struct *vma;
-        struct ll_lock_tree_node *node;
-        ldlm_policy_data_t policy;
-        ENTRY;
-
-        if (count == 0)
-                RETURN(0);
-
-        /* we need to look up vmas on page aligned addresses */
-        count += addr & (PAGE_SIZE - 1);
-        addr -= addr & (PAGE_SIZE - 1);
-
-        while ((vma = our_vma(addr, count)) != NULL) {
-
-                policy_from_vma(&policy, vma, addr, count);
-                node = ll_node_from_inode(inode, policy.l_extent.start,
-                                          policy.l_extent.end,
-                                          mode_from_vma(vma));
-                if (IS_ERR(node)) {
-                        CERROR("not enough mem for lock_tree_node!\n");
-                        RETURN(-ENOMEM);
+       struct lu_env           *env;
+       struct cl_io            *io;
+       struct vvp_io           *vio;
+       int                      result;
+       __u16                    refcheck;
+       sigset_t                 set;
+       struct inode             *inode = NULL;
+       struct ll_inode_info     *lli;
+       ENTRY;
+
+       LASSERT(vmpage != NULL);
+       env = cl_env_get(&refcheck);
+       if (IS_ERR(env))
+               RETURN(PTR_ERR(env));
+
+       io = ll_fault_io_init(env, vma, vmpage->index, NULL);
+       if (IS_ERR(io))
+               GOTO(out, result = PTR_ERR(io));
+
+       result = io->ci_result;
+       if (result < 0)
+               GOTO(out_io, result);
+
+       io->u.ci_fault.ft_mkwrite = 1;
+       io->u.ci_fault.ft_writable = 1;
+
+       vio = vvp_env_io(env);
+       vio->u.fault.ft_vma    = vma;
+       vio->u.fault.ft_vmpage = vmpage;
+
+       set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+       inode = vvp_object_inode(io->ci_obj);
+       lli = ll_i2info(inode);
+
+       result = cl_io_loop(env, io);
+
+       cfs_restore_sigs(set);
+
+        if (result == 0) {
+                lock_page(vmpage);
+                if (vmpage->mapping == NULL) {
+                        unlock_page(vmpage);
+
+                        /* page was truncated and lock was cancelled, return
+                         * ENODATA so that VM_FAULT_NOPAGE will be returned
+                         * to handle_mm_fault(). */
+                        if (result == 0)
+                                result = -ENODATA;
+                } else if (!PageDirty(vmpage)) {
+                        /* race, the page has been cleaned by ptlrpcd after
+                         * it was unlocked, it has to be added into dirty
+                         * cache again otherwise this soon-to-dirty page won't
+                         * consume any grants, even worse if this page is being
+                         * transferred because it will break RPC checksum.
+                         */
+                        unlock_page(vmpage);
+
+                        CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has "
+                               "been written out, retry.\n",
+                               vmpage, vmpage->index);
+
+                        *retry = true;
+                        result = -EAGAIN;
                 }
-                lt_insert(tree, node);
 
-                if (vma->vm_end - addr >= count)
-                        break;
-                count -= vma->vm_end - addr;
-                addr = vma->vm_end;
+               if (result == 0)
+                       ll_file_set_flag(lli, LLIF_DATA_MODIFIED);
         }
-        RETURN(0);
+        EXIT;
+
+out_io:
+       cl_io_fini(env, io);
+out:
+       cl_env_put(env, &refcheck);
+       CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
+       LASSERT(ergo(result == 0, PageLocked(vmpage)));
+
+       /* if page has been unmapped, presumably due to lock reclaim for
+        * concurrent usage, add some delay before retrying to prevent
+        * entering live-lock situation with competitors
+        */
+       if (result == -ENODATA && inode != NULL) {
+               CDEBUG(D_MMAP, "delaying new page-fault for inode %p to "
+                              "prevent live-lock\n", inode);
+               msleep(10);
+       }
+
+       return result;
+}
+
+static inline int to_fault_error(int result)
+{
+       switch(result) {
+       case 0:
+               result = VM_FAULT_LOCKED;
+               break;
+       case -ENOMEM:
+               result = VM_FAULT_OOM;
+               break;
+       default:
+               result = VM_FAULT_SIGBUS;
+               break;
+       }
+       return result;
 }
-/* FIXME: there is a pagefault race goes as follow:
- * 1. A user process on node A accesses a portion of a mapped file,
- *    resulting in a page fault.  The pagefault handler invokes the
- *    ll_nopage function, which reads the page into memory.
- * 2. A user process on node B writes to the same portion of the file
- *    (either via mmap or write()), that cause node A to cancel the
- *    lock and truncate the page.
- * 3. Node A then executes the rest of do_no_page(), entering the
- *    now-invalid page into the PTEs.
+
+/**
+ * Lustre implementation of a vm_operations_struct::fault() method, called by
+ * VM to server page fault (both in kernel and user space).
  *
- * Make the whole do_no_page as a hook to cover both the page cache
- * and page mapping installing with dlm lock would eliminate this race.
+ * \param vma - is virtiual area struct related to page fault
+ * \param vmf - structure which describe type and address where hit fault
+ *
+ * \return allocated and filled _locked_ page for address
+ * \retval VM_FAULT_ERROR on general error
+ * \retval NOPAGE_OOM not have memory for allocate new page
  */
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int *type)
-#else
-struct page *ll_nopage(struct vm_area_struct *vma, unsigned long address,
-                       int unused)
-#endif
+static vm_fault_t ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-        struct file *filp = vma->vm_file;
-        struct ll_file_data *fd = filp->private_data;
-        struct inode *inode = filp->f_dentry->d_inode;
-        struct lustre_handle lockh = { 0 };
-        ldlm_policy_data_t policy;
-        ldlm_mode_t mode;
-        struct page *page;
-        struct obd_service_time *stime;
-        __u64 kms;
-        unsigned long pgoff, size, rand_read, seq_read;
-        int rc = 0;
-        ENTRY;
-
-        if (ll_i2info(inode)->lli_smd == NULL) {
-                CERROR("No lsm on fault?\n");
-                RETURN(NULL);
+       struct lu_env           *env;
+       struct cl_io            *io;
+       struct vvp_io           *vio = NULL;
+       struct page             *vmpage;
+       unsigned long            ra_flags;
+       int                      result = 0;
+       int                      fault_ret = 0;
+       __u16                    refcheck;
+       ENTRY;
+
+       env = cl_env_get(&refcheck);
+       if (IS_ERR(env))
+               RETURN(PTR_ERR(env));
+
+       if (ll_sbi_has_fast_read(ll_i2sbi(file_inode(vma->vm_file)))) {
+               /* do fast fault */
+               ll_cl_add(vma->vm_file, env, NULL, LCC_MMAP);
+               fault_ret = ll_filemap_fault(vma, vmf);
+               ll_cl_remove(vma->vm_file, env);
+
+               /* - If there is no error, then the page was found in cache and
+                *   uptodate;
+                * - If VM_FAULT_RETRY is set, the page existed but failed to
+                *   lock. It will return to kernel and retry;
+                * - Otherwise, it should try normal fault under DLM lock. */
+               if ((fault_ret & VM_FAULT_RETRY) ||
+                   !(fault_ret & VM_FAULT_ERROR))
+                       GOTO(out, result = 0);
+
+               fault_ret = 0;
+       }
+
+       io = ll_fault_io_init(env, vma, vmf->pgoff, &ra_flags);
+       if (IS_ERR(io))
+               GOTO(out, result = PTR_ERR(io));
+
+       result = io->ci_result;
+       if (result == 0) {
+               vio = vvp_env_io(env);
+               vio->u.fault.ft_vma       = vma;
+               vio->u.fault.ft_vmpage    = NULL;
+               vio->u.fault.ft_vmf = vmf;
+               vio->u.fault.ft_flags = 0;
+               vio->u.fault.ft_flags_valid = 0;
+
+               /* May call ll_readpage() */
+               ll_cl_add(vma->vm_file, env, io, LCC_MMAP);
+
+               result = cl_io_loop(env, io);
+
+               ll_cl_remove(vma->vm_file, env);
+
+               /* ft_flags are only valid if we reached
+                * the call to filemap_fault */
+               if (vio->u.fault.ft_flags_valid)
+                       fault_ret = vio->u.fault.ft_flags;
+
+               vmpage = vio->u.fault.ft_vmpage;
+               if (result != 0 && vmpage != NULL) {
+                       put_page(vmpage);
+                       vmf->page = NULL;
+               }
         }
+       cl_io_fini(env, io);
 
-        /* start and end the lock on the first and last bytes in the page */
-        policy_from_vma(&policy, vma, address, PAGE_CACHE_SIZE);
+       vma->vm_flags |= ra_flags;
 
-        CDEBUG(D_MMAP, "nopage vma %p inode %lu, locking ["LPU64", "LPU64"]\n",
-               vma, inode->i_ino, policy.l_extent.start,
-               policy.l_extent.end);
+out:
+       cl_env_put(env, &refcheck);
+       if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
+               fault_ret |= to_fault_error(result);
 
-        mode = mode_from_vma(vma);
-        stime = (mode & LCK_PW) ? &ll_i2sbi(inode)->ll_write_stime :
-                                  &ll_i2sbi(inode)->ll_read_stime;
+       CDEBUG(D_MMAP, "%s fault %d/%d\n", current->comm, fault_ret, result);
+       RETURN(fault_ret);
+}
 
-        rc = ll_extent_lock(fd, inode, ll_i2info(inode)->lli_smd, mode, &policy,
-                            &lockh, LDLM_FL_CBPENDING, stime);
-        if (rc != 0)
-                RETURN(NULL);
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static vm_fault_t ll_fault(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
+#else
+static vm_fault_t ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+#endif
+       int count = 0;
+       bool printed = false;
+       bool cached;
+       vm_fault_t result;
+       ktime_t kstart = ktime_get();
+       sigset_t set;
+
+       result = pcc_fault(vma, vmf, &cached);
+       if (cached)
+               goto out;
+
+       /* Only SIGKILL and SIGTERM is allowed for fault/nopage/mkwrite
+        * so that it can be killed by admin but not cause segfault by
+        * other signals. */
+       set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
+
+       /* make sure offset is not a negative number */
+       if (vmf->pgoff > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
+               return VM_FAULT_SIGBUS;
+restart:
+       result = ll_fault0(vma, vmf);
+       if (vmf->page &&
+           !(result & (VM_FAULT_RETRY | VM_FAULT_ERROR | VM_FAULT_LOCKED))) {
+                struct page *vmpage = vmf->page;
+
+                /* check if this page has been truncated */
+                lock_page(vmpage);
+                if (unlikely(vmpage->mapping == NULL)) { /* unlucky */
+                        unlock_page(vmpage);
+                       put_page(vmpage);
+                        vmf->page = NULL;
+
+                        if (!printed && ++count > 16) {
+                                CWARN("the page is under heavy contention,"
+                                      "maybe your app(%s) needs revising :-)\n",
+                                      current->comm);
+                                printed = true;
+                        }
 
-        /* XXX change inode size without i_sem hold! there is a race condition
-         *     with truncate path. (see ll_extent_lock) */
-        kms = lov_merge_size(ll_i2info(inode)->lli_smd, 1);
-        pgoff = ((address - vma->vm_start) >> PAGE_CACHE_SHIFT) + vma->vm_pgoff;
-        size = (kms + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+                        goto restart;
+                }
 
-        if (pgoff >= size)
-                ll_glimpse_size(inode);
-        else
-                inode->i_size = kms;
+                result |= VM_FAULT_LOCKED;
+        }
+       cfs_restore_sigs(set);
 
-        /* disable VM_SEQ_READ and use VM_RAND_READ to make sure that
-         * the kernel will not read other pages not covered by ldlm in
-         * filemap_nopage. we do our readahead in ll_readpage.
-         */
-        rand_read = vma->vm_flags & VM_RAND_READ;
-        seq_read = vma->vm_flags & VM_SEQ_READ;
-        vma->vm_flags &= ~ VM_SEQ_READ;
-        vma->vm_flags |= VM_RAND_READ;
+out:
+       if (vmf->page && result == VM_FAULT_LOCKED) {
+               ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
+                                 current->pid, LUSTRE_FPRIVATE(vma->vm_file),
+                                 cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+                                 READ);
+               ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+                                  LPROC_LL_FAULT,
+                                  ktime_us_delta(ktime_get(), kstart));
+       }
+
+       return result;
+}
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
-        page = filemap_nopage(vma, address, type);
+#ifdef HAVE_VM_OPS_USE_VM_FAULT_ONLY
+static vm_fault_t ll_page_mkwrite(struct vm_fault *vmf)
+{
+       struct vm_area_struct *vma = vmf->vma;
 #else
-        page = filemap_nopage(vma, address, unused);
+static vm_fault_t ll_page_mkwrite(struct vm_area_struct *vma,
+                                 struct vm_fault *vmf)
+{
 #endif
-        vma->vm_flags &= ~VM_RAND_READ;
-        vma->vm_flags |= (rand_read | seq_read);
+       int count = 0;
+       bool printed = false;
+       bool retry;
+       bool cached;
+       ktime_t kstart = ktime_get();
+       vm_fault_t result;
+
+       result = pcc_page_mkwrite(vma, vmf, &cached);
+       if (cached)
+               goto out;
+
+       file_update_time(vma->vm_file);
+       do {
+               retry = false;
+               result = ll_page_mkwrite0(vma, vmf->page, &retry);
+
+               if (!printed && ++count > 16) {
+                       const struct dentry *de = file_dentry(vma->vm_file);
+
+                       CWARN("app(%s): the page %lu of file "DFID" is under"
+                             " heavy contention\n",
+                             current->comm, vmf->pgoff,
+                             PFID(ll_inode2fid(de->d_inode)));
+                        printed = true;
+                }
+        } while (retry);
+
+        switch(result) {
+        case 0:
+                LASSERT(PageLocked(vmf->page));
+                result = VM_FAULT_LOCKED;
+                break;
+        case -ENODATA:
+        case -EFAULT:
+                result = VM_FAULT_NOPAGE;
+                break;
+        case -ENOMEM:
+                result = VM_FAULT_OOM;
+                break;
+        case -EAGAIN:
+                result = VM_FAULT_RETRY;
+                break;
+        default:
+                result = VM_FAULT_SIGBUS;
+                break;
+        }
 
-        ll_extent_unlock(fd, inode, ll_i2info(inode)->lli_smd, mode, &lockh);
-        RETURN(page);
+out:
+       if (result == VM_FAULT_LOCKED) {
+               ll_rw_stats_tally(ll_i2sbi(file_inode(vma->vm_file)),
+                                 current->pid, LUSTRE_FPRIVATE(vma->vm_file),
+                                 cl_offset(NULL, vmf->page->index), PAGE_SIZE,
+                                 WRITE);
+               ll_stats_ops_tally(ll_i2sbi(file_inode(vma->vm_file)),
+                                  LPROC_LL_MKWRITE,
+                                  ktime_us_delta(ktime_get(), kstart));
+       }
+
+       return result;
 }
 
-/* return the user space pointer that maps to a file offset via a vma */
-static inline unsigned long file_to_user(struct vm_area_struct *vma,
-                                         __u64 byte)
+/**
+ *  To avoid cancel the locks covering mmapped region for lock cache pressure,
+ *  we track the mapped vma count in vvp_object::vob_mmap_cnt.
+ */
+static void ll_vm_open(struct vm_area_struct * vma)
 {
-        return vma->vm_start +
-               (byte - ((__u64)vma->vm_pgoff << PAGE_CACHE_SHIFT));
-
+       struct inode *inode    = file_inode(vma->vm_file);
+       struct vvp_object *vob = cl_inode2vvp(inode);
+
+       ENTRY;
+       LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+       atomic_inc(&vob->vob_mmap_cnt);
+       pcc_vm_open(vma);
+       EXIT;
 }
 
-#define VMA_DEBUG(vma, fmt, arg...)                                          \
-        CDEBUG(D_MMAP, "vma(%p) start(%ld) end(%ld) pgoff(%ld) inode(%p): "  \
-               fmt, vma, vma->vm_start, vma->vm_end, vma->vm_pgoff,          \
-               vma->vm_file->f_dentry->d_inode, ## arg);
-
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
-/* [first, last] are the byte offsets affected.
- * vm_{start, end} are user addresses of the first byte of the mapping and
- *      the next byte beyond it
- * vm_pgoff is the page index of the first byte in the mapping */
-static void teardown_vmas(struct vm_area_struct *vma, __u64 first,
-                          __u64 last)
+/**
+ * Dual to ll_vm_open().
+ */
+static void ll_vm_close(struct vm_area_struct *vma)
 {
-        unsigned long address, len;
-        for (; vma ; vma = vma->vm_next_share) {
-                if (last >> PAGE_CACHE_SHIFT < vma->vm_pgoff)
-                        continue;
-                if (first >> PAGE_CACHE_SHIFT > (vma->vm_pgoff +
-                    ((vma->vm_end - vma->vm_start) >> PAGE_CACHE_SHIFT)))
-                        continue;
-
-                address = max((unsigned long)vma->vm_start,
-                              file_to_user(vma, first));
-                len = min((unsigned long)vma->vm_end,
-                          file_to_user(vma, last) + 1) - address;
-
-                VMA_DEBUG(vma, "zapping vma [address=%ld len=%ld]\n",
-                          address, len);
-                LASSERT(vma->vm_mm);
-                ll_zap_page_range(vma, address, len);
-        }
+       struct inode      *inode = file_inode(vma->vm_file);
+       struct vvp_object *vob   = cl_inode2vvp(inode);
+
+       ENTRY;
+       atomic_dec(&vob->vob_mmap_cnt);
+       LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+       pcc_vm_close(vma);
+       EXIT;
 }
-#endif
 
 /* XXX put nice comment here.  talk about __free_pte -> dirty pages and
  * nopage's reference passing to the pte */
-int ll_teardown_mmaps(struct address_space *mapping, __u64 first,
-                       __u64 last)
+int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
 {
         int rc = -ENOENT;
         ENTRY;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
+       LASSERTF(last > first, "last %llu first %llu\n", last, first);
         if (mapping_mapped(mapping)) {
                 rc = 0;
-                unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
-                                    last - first + 1, 1);
-        }
-#else
-        spin_lock(&mapping->i_shared_lock);
-        if (mapping->i_mmap != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap, first, last);
+               unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
+                                   last - first + 1, 1);
         }
-        if (mapping->i_mmap_shared != NULL) {
-                rc = 0;
-                teardown_vmas(mapping->i_mmap_shared, first, last);
-        }
-        spin_unlock(&mapping->i_shared_lock);
-#endif
 
         RETURN(rc);
 }
 
-static struct vm_operations_struct ll_file_vm_ops = {
-        .nopage         = ll_nopage,
+static const struct vm_operations_struct ll_file_vm_ops = {
+       .fault                  = ll_fault,
+       .page_mkwrite           = ll_page_mkwrite,
+       .open                   = ll_vm_open,
+       .close                  = ll_vm_close,
 };
 
-int ll_file_mmap(struct file * file, struct vm_area_struct * vma)
+int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
 {
-        int rc;
-        ENTRY;
-
-        rc = generic_file_mmap(file, vma);
-        if (rc == 0)
-                vma->vm_ops = &ll_file_vm_ops;
-
-        RETURN(rc);
+       struct inode *inode = file_inode(file);
+       ktime_t kstart = ktime_get();
+       bool cached;
+       int rc;
+
+       ENTRY;
+
+       if (ll_file_nolock(file))
+               RETURN(-EOPNOTSUPP);
+
+       rc = pcc_file_mmap(file, vma, &cached);
+       if (cached && rc != 0)
+               RETURN(rc);
+
+       rc = generic_file_mmap(file, vma);
+       if (rc == 0) {
+               vma->vm_ops = &ll_file_vm_ops;
+               vma->vm_ops->open(vma);
+               /* update the inode's size and mtime */
+               if (!cached)
+                       rc = ll_glimpse_size(inode);
+       }
+
+       if (!rc)
+               ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MMAP,
+                                  ktime_us_delta(ktime_get(), kstart));
+
+       RETURN(rc);
 }
-