Whamcloud - gitweb
LU-6179 llite: Implement ladvise lockahead
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index db53842..c0dad2d 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#ifndef __KERNEL__
-# error This file is kernel only.
-#endif
 
 #include <obd.h>
-#include <lustre_lite.h>
-
+#include "llite_internal.h"
 #include "vvp_internal.h"
 
 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
-                                const struct cl_io_slice *slice);
+                               const struct cl_io_slice *slice)
+{
+       struct vvp_io *vio;
+
+       vio = container_of(slice, struct vvp_io, vui_cl);
+       LASSERT(vio == vvp_env_io(env));
+
+       return vio;
+}
 
 /**
- * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ * True, if \a io is a normal io, False for splice_{read,write}
  */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
 {
-        struct vvp_io *vio = vvp_env_io(env);
+       struct vvp_io *vio = vvp_env_io(env);
 
-        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+       LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
 
-        return vio->cui_io_subtype == IO_NORMAL;
+       return vio->vui_io_subtype == IO_NORMAL;
 }
 
 /**
@@ -75,7 +75,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
                                struct inode *inode)
 {
        struct ll_inode_info    *lli = ll_i2info(inode);
-       struct ccc_io           *cio = ccc_env_io(env);
+       struct vvp_io           *vio = vvp_env_io(env);
        bool rc = true;
 
        switch (io->ci_type) {
@@ -83,9 +83,10 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
        case CIT_WRITE:
                /* don't need lock here to check lli_layout_gen as we have held
                 * extent lock and GROUP lock has to hold to swap layout */
-               if (lli->lli_layout_gen != cio->cui_layout_gen) {
+               if (ll_layout_version_get(lli) != vio->vui_layout_gen ||
+                   OBD_FAIL_CHECK_RESET(OBD_FAIL_LLITE_LOST_LAYOUT, 0)) {
                        io->ci_need_restart = 1;
-                       /* this will return application a short read/write */
+                       /* this will cause a short read/write */
                        io->ci_continue = 0;
                        rc = false;
                }
@@ -98,44 +99,294 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
        return rc;
 }
 
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+       struct inode *inode = vvp_object_inode(obj);
+
+       ll_inode_size_lock(inode);
+       cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+       struct inode *inode = vvp_object_inode(obj);
+
+       cl_object_attr_unlock(obj);
+       ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: i_size_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+                        struct cl_io *io, loff_t start, size_t count,
+                        int *exceed)
+{
+       struct cl_attr *attr  = vvp_env_thread_attr(env);
+       struct inode   *inode = vvp_object_inode(obj);
+       loff_t          pos   = start + count - 1;
+       loff_t kms;
+       int result;
+
+       /*
+        * Consistency guarantees: following possibilities exist for the
+        * relation between region being accessed and real file size at this
+        * moment:
+        *
+        *  (A): the region is completely inside of the file;
+        *
+        *  (B-x): x bytes of region are inside of the file, the rest is
+        *  outside;
+        *
+        *  (C): the region is completely outside of the file.
+        *
+        * This classification is stable under DLM lock already acquired by
+        * the caller, because to change the class, other client has to take
+        * DLM lock conflicting with our lock. Also, any updates to ->i_size
+        * by other threads on this client are serialized by
+        * ll_inode_size_lock(). This guarantees that short reads are handled
+        * correctly in the face of concurrent writes and truncates.
+        */
+       vvp_object_size_lock(obj);
+       result = cl_object_attr_get(env, obj, attr);
+       if (result == 0) {
+               kms = attr->cat_kms;
+               if (pos > kms) {
+                       /*
+                        * A glimpse is necessary to determine whether we
+                        * return a short read (B) or some zeroes at the end
+                        * of the buffer (C)
+                        */
+                       vvp_object_size_unlock(obj);
+                       result = cl_glimpse_lock(env, io, inode, obj, 0);
+                       if (result == 0 && exceed != NULL) {
+                               /* If objective page index exceed end-of-file
+                                * page index, return directly. Do not expect
+                                * kernel will check such case correctly.
+                                * linux-2.6.18-128.1.1 miss to do that.
+                                * --bug 17336 */
+                               loff_t size = i_size_read(inode);
+                               unsigned long cur_index = start >>
+                                       PAGE_SHIFT;
+
+                               if ((size == 0 && cur_index != 0) ||
+                                   (((size - 1) >> PAGE_SHIFT) <
+                                    cur_index))
+                                       *exceed = 1;
+                       }
+
+                       return result;
+               } else {
+                       /*
+                        * region is within kms and, hence, within real file
+                        * size (A). We need to increase i_size to cover the
+                        * read region so that generic_file_read() will do its
+                        * job, but that doesn't mean the kms size is
+                        * _correct_, it is only the _minimum_ size. If
+                        * someone does a stat they will get the correct size
+                        * which will always be >= the kms value here.
+                        * b=11081
+                        */
+                       if (i_size_read(inode) < kms) {
+                               i_size_write(inode, kms);
+                               CDEBUG(D_VFSTRACE,
+                                      DFID" updating i_size %llu\n",
+                                      PFID(lu_object_fid(&obj->co_lu)),
+                                      (__u64)i_size_read(inode));
+                       }
+               }
+       }
+
+       vvp_object_size_unlock(obj);
+
+       return result;
+}
+
 /*****************************************************************************
  *
  * io operations.
  *
  */
 
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+                                __u32 enqflags, enum cl_lock_mode mode,
+                                pgoff_t start, pgoff_t end)
+{
+       struct vvp_io          *vio   = vvp_env_io(env);
+       struct cl_lock_descr   *descr = &vio->vui_link.cill_descr;
+       struct cl_object       *obj   = io->ci_obj;
+
+       CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+       ENTRY;
+
+       CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+       memset(&vio->vui_link, 0, sizeof vio->vui_link);
+
+       if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+               descr->cld_mode = CLM_GROUP;
+               descr->cld_gid  = vio->vui_fd->fd_grouplock.lg_gid;
+               enqflags |= CEF_LOCK_MATCH;
+       } else {
+               descr->cld_mode  = mode;
+       }
+
+       descr->cld_obj   = obj;
+       descr->cld_start = start;
+       descr->cld_end   = end;
+       descr->cld_enq_flags = enqflags;
+
+       cl_io_lock_add(env, io, &vio->vui_link);
+
+       RETURN(0);
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+                          __u32 enqflags, enum cl_lock_mode mode,
+                          loff_t start, loff_t end)
+{
+       struct cl_object *obj = io->ci_obj;
+
+       return vvp_io_one_lock_index(env, io, enqflags, mode,
+                                    cl_index(obj, start), cl_index(obj, end));
+}
+
+static int vvp_io_write_iter_init(const struct lu_env *env,
+                                 const struct cl_io_slice *ios)
+{
+       struct vvp_io *vio = cl2vvp_io(env, ios);
+
+       cl_page_list_init(&vio->u.write.vui_queue);
+       vio->u.write.vui_written = 0;
+       vio->u.write.vui_from = 0;
+       vio->u.write.vui_to = PAGE_SIZE;
+
+       return 0;
+}
+
+static void vvp_io_write_iter_fini(const struct lu_env *env,
+                                  const struct cl_io_slice *ios)
+{
+       struct vvp_io *vio = cl2vvp_io(env, ios);
+
+       LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+}
+
 static int vvp_io_fault_iter_init(const struct lu_env *env,
                                   const struct cl_io_slice *ios)
 {
-        struct vvp_io *vio   = cl2vvp_io(env, ios);
-        struct inode  *inode = ccc_object_inode(ios->cis_obj);
+       struct vvp_io *vio   = cl2vvp_io(env, ios);
+       struct inode  *inode = vvp_object_inode(ios->cis_obj);
+
+       LASSERT(inode == file_inode(vio->vui_fd->fd_file));
+       vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
 
-        LASSERT(inode ==
-                cl2ccc_io(env, ios)->cui_fd->fd_file->f_dentry->d_inode);
-        vio->u.fault.ft_mtime = LTIME_S(inode->i_mtime);
-        return 0;
+       return 0;
 }
 
 static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
 {
        struct cl_io     *io  = ios->cis_io;
        struct cl_object *obj = io->ci_obj;
-       struct ccc_io    *cio = cl2ccc_io(env, ios);
+       struct vvp_io    *vio = cl2vvp_io(env, ios);
+       struct inode     *inode = vvp_object_inode(obj);
+       int rc;
+
+       CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+       CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
+              "need write layout %d, restore needed %d\n",
+              PFID(lu_object_fid(&obj->co_lu)),
+              io->ci_ignore_layout, io->ci_verify_layout,
+              vio->vui_layout_gen, io->ci_need_write_intent,
+              io->ci_restore_needed);
+
+       if (io->ci_restore_needed) {
+               /* file was detected release, we need to restore it
+                * before finishing the io
+                */
+               rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
+               /* if restore registration failed, no restart,
+                * we will return -ENODATA */
+               /* The layout will change after restore, so we need to
+                * block on layout lock held by the MDT
+                * as MDT will not send new layout in lvb (see LU-3124)
+                * we have to explicitly fetch it, all this will be done
+                * by ll_layout_refresh()
+                */
+               if (rc == 0) {
+                       io->ci_restore_needed = 0;
+                       io->ci_need_restart = 1;
+                       io->ci_verify_layout = 1;
+               } else {
+                       io->ci_restore_needed = 1;
+                       io->ci_need_restart = 0;
+                       io->ci_verify_layout = 0;
+                       io->ci_result = rc;
+               }
+       }
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+       /**
+        * dynamic layout change needed, send layout intent
+        * RPC.
+        */
+       if (io->ci_need_write_intent) {
+               loff_t start = 0;
+               loff_t end = OBD_OBJECT_EOF;
+
+               io->ci_need_write_intent = 0;
+
+               LASSERT(io->ci_type == CIT_WRITE ||
+                       cl_io_is_trunc(io) || cl_io_is_mkwrite(io));
+
+               if (io->ci_type == CIT_WRITE) {
+                       if (!cl_io_is_append(io)) {
+                               start = io->u.ci_rw.rw_range.cir_pos;
+                               end = start + io->u.ci_rw.rw_range.cir_count;
+                       }
+               } else if (cl_io_is_trunc(io)) {
+                       end = io->u.ci_setattr.sa_attr.lvb_size;
+               } else { /* mkwrite */
+                       pgoff_t index = io->u.ci_fault.ft_index;
+
+                       start = cl_offset(io->ci_obj, index);
+                       end = cl_offset(io->ci_obj, index + 1);
+               }
 
-       CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
-               io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+               CDEBUG(D_VFSTRACE, DFID" write layout, type %u [%llu, %llu)\n",
+                      PFID(lu_object_fid(&obj->co_lu)), io->ci_type,
+                      start, end);
+               rc = ll_layout_write_intent(inode, start, end);
+               io->ci_result = rc;
+               if (!rc)
+                       io->ci_need_restart = 1;
+       }
 
        if (!io->ci_ignore_layout && io->ci_verify_layout) {
                __u32 gen = 0;
 
                /* check layout version */
-               ll_layout_refresh(ccc_object_inode(obj), &gen);
-               io->ci_need_restart = cio->cui_layout_gen != gen;
-               if (io->ci_need_restart)
-                       CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
-                               cio->cui_layout_gen, gen);
+               ll_layout_refresh(inode, &gen);
+               io->ci_need_restart = vio->vui_layout_gen != gen;
+               if (io->ci_need_restart) {
+                       CDEBUG(D_VFSTRACE,
+                              DFID" layout changed from %d to %d.\n",
+                              PFID(lu_object_fid(&obj->co_lu)),
+                              vio->vui_layout_gen, gen);
+                       /* today successful restore is the only possible
+                        * case */
+                       /* restore was done, clear restoring state */
+                       ll_file_clear_flag(ll_i2info(vvp_object_inode(obj)),
+                                          LLIF_FILE_RESTORING);
+               }
        }
 }
 
@@ -145,7 +396,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
         struct cl_io   *io   = ios->cis_io;
         struct cl_page *page = io->u.ci_fault.ft_page;
 
-        CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+       CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
 
         if (page != NULL) {
                 lu_ref_del(&page->cp_reference, "fault", io);
@@ -155,7 +406,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
         vvp_io_fini(env, ios);
 }
 
-enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
+static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
 {
         /*
          * we only want to hold PW locks if the mmap() can generate
@@ -167,55 +418,50 @@ enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
         return CLM_READ;
 }
 
-static int vvp_mmap_locks(const struct lu_env *env,
-                          struct ccc_io *vio, struct cl_io *io)
+static int vvp_mmap_locks(const struct lu_env *env, struct cl_io *io)
 {
-        struct ccc_thread_info *cti = ccc_env_info(env);
-        struct mm_struct       *mm = current->mm;
-        struct vm_area_struct  *vma;
-        struct cl_lock_descr   *descr = &cti->cti_descr;
-        ldlm_policy_data_t      policy;
-        unsigned long           addr;
-        unsigned long           seg;
-        ssize_t                 count;
-        int                     result;
-        ENTRY;
-
-        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
-        if (!cl_is_normalio(env, io))
-                RETURN(0);
-
-        if (vio->cui_iov == NULL) /* nfs or loop back device write */
-                RETURN(0);
-
-        /* No MM (e.g. NFS)? No vmas too. */
-        if (mm == NULL)
-                RETURN(0);
-
-        for (seg = 0; seg < vio->cui_nrsegs; seg++) {
-                const struct iovec *iv = &vio->cui_iov[seg];
-
-                addr = (unsigned long)iv->iov_base;
-                count = iv->iov_len;
+       struct vvp_thread_info *vti = vvp_env_info(env);
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       struct cl_lock_descr *descr = &vti->vti_descr;
+       union ldlm_policy_data policy;
+       struct iovec iov;
+       struct iov_iter i;
+       int result = 0;
+       ENTRY;
+
+       LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+       if (!cl_is_normalio(env, io))
+               RETURN(0);
+
+       /* No MM (e.g. NFS)? No vmas too. */
+       if (mm == NULL)
+               RETURN(0);
+
+       iov_for_each(iov, i, io->u.ci_rw.rw_iter) {
+               unsigned long addr = (unsigned long)iov.iov_base;
+               size_t count = iov.iov_len;
+
                 if (count == 0)
                         continue;
 
-                count += addr & (~CFS_PAGE_MASK);
-                addr &= CFS_PAGE_MASK;
+               count += addr & ~PAGE_MASK;
+               addr &= PAGE_MASK;
 
                 down_read(&mm->mmap_sem);
                 while((vma = our_vma(mm, addr, count)) != NULL) {
-                        struct inode *inode = vma->vm_file->f_dentry->d_inode;
+                       struct dentry *de = file_dentry(vma->vm_file);
+                       struct inode *inode = de->d_inode;
                         int flags = CEF_MUST;
 
-                        if (ll_file_nolock(vma->vm_file)) {
-                                /*
-                                 * For no lock case, a lockless lock will be
-                                 * generated.
-                                 */
-                                flags = CEF_NEVER;
-                        }
+                       if (ll_file_nolock(vma->vm_file)) {
+                               /*
+                                * For no lock case is not allowed for mmap
+                                */
+                               result = -EINVAL;
+                               break;
+                       }
 
                         /*
                          * XXX: Required lock mode can be weakened: CIT_WRITE
@@ -236,57 +482,86 @@ static int vvp_mmap_locks(const struct lu_env *env,
                                descr->cld_mode, descr->cld_start,
                                descr->cld_end);
 
-                        if (result < 0)
-                                RETURN(result);
+                       if (result < 0)
+                               break;
 
-                        if (vma->vm_end - addr >= count)
-                                break;
+                       if (vma->vm_end - addr >= count)
+                               break;
 
-                        count -= vma->vm_end - addr;
-                        addr = vma->vm_end;
-                }
-                up_read(&mm->mmap_sem);
-        }
-        RETURN(0);
+                       count -= vma->vm_end - addr;
+                       addr = vma->vm_end;
+               }
+               up_read(&mm->mmap_sem);
+               if (result < 0)
+                       break;
+       }
+       RETURN(result);
+}
+
+static void vvp_io_advance(const struct lu_env *env,
+                          const struct cl_io_slice *ios,
+                          size_t nob)
+{
+       struct vvp_io    *vio = cl2vvp_io(env, ios);
+       struct cl_io     *io  = ios->cis_io;
+       struct cl_object *obj = ios->cis_io->ci_obj;
+
+       CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+       if (!cl_is_normalio(env, io))
+               return;
+
+       vio->vui_tot_count -= nob;
+       if (io->ci_pio) {
+               iov_iter_advance(&io->u.ci_rw.rw_iter, nob);
+               io->u.ci_rw.rw_iocb.ki_pos = io->u.ci_rw.rw_range.cir_pos;
+#ifdef HAVE_KIOCB_KI_LEFT
+               io->u.ci_rw.rw_iocb.ki_left = vio->vui_tot_count;
+#elif defined(HAVE_KI_NBYTES)
+               io->u.ci_rw.rw_iocb.ki_nbytes = vio->vui_tot_count;
+#endif
+       } else {
+               /* It was truncated to stripe size in vvp_io_rw_lock() */
+               iov_iter_reexpand(&io->u.ci_rw.rw_iter, vio->vui_tot_count);
+       }
 }
 
 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
                           enum cl_lock_mode mode, loff_t start, loff_t end)
 {
-        struct ccc_io *cio = ccc_env_io(env);
-        int result;
-        int ast_flags = 0;
+       int result;
+       int ast_flags = 0;
+
+       LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+       ENTRY;
 
-        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-        ENTRY;
+       if (cl_is_normalio(env, io))
+               iov_iter_truncate(&io->u.ci_rw.rw_iter,
+                                 io->u.ci_rw.rw_range.cir_count);
 
-        ccc_io_update_iov(env, cio, io);
+       if (io->u.ci_rw.rw_nonblock)
+               ast_flags |= CEF_NONBLOCK;
+       if (io->ci_lock_no_expand)
+               ast_flags |= CEF_LOCK_NO_EXPAND;
 
-        if (io->u.ci_rw.crw_nonblock)
-                ast_flags |= CEF_NONBLOCK;
-        result = vvp_mmap_locks(env, cio, io);
-        if (result == 0)
-                result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
-        RETURN(result);
+       result = vvp_mmap_locks(env, io);
+       if (result == 0)
+               result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
+
+       RETURN(result);
 }
 
 static int vvp_io_read_lock(const struct lu_env *env,
                             const struct cl_io_slice *ios)
 {
-        struct cl_io         *io  = ios->cis_io;
-        struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
-        int result;
-
-        ENTRY;
-        /* XXX: Layer violation, we shouldn't see lsm at llite level. */
-       if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
-                result = vvp_io_rw_lock(env, io, CLM_READ,
-                                        io->u.ci_rd.rd.crw_pos,
-                                        io->u.ci_rd.rd.crw_pos +
-                                        io->u.ci_rd.rd.crw_count - 1);
-        else
-                result = 0;
-        RETURN(result);
+       struct cl_io *io = ios->cis_io;
+       struct cl_io_range *range = &io->u.ci_rw.rw_range;
+       int rc;
+
+       ENTRY;
+       rc = vvp_io_rw_lock(env, io, CLM_READ, range->cir_pos,
+                           range->cir_pos + range->cir_count - 1);
+       RETURN(rc);
 }
 
 static int vvp_io_fault_lock(const struct lu_env *env,
@@ -297,26 +572,31 @@ static int vvp_io_fault_lock(const struct lu_env *env,
         /*
          * XXX LDLM_FL_CBPENDING
          */
-        return ccc_io_one_lock_index
-                (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
-                 io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+       return vvp_io_one_lock_index(env,
+                                    io, 0,
+                                    vvp_mode_from_vma(vio->u.fault.ft_vma),
+                                    io->u.ci_fault.ft_index,
+                                    io->u.ci_fault.ft_index);
 }
 
 static int vvp_io_write_lock(const struct lu_env *env,
                              const struct cl_io_slice *ios)
 {
-        struct cl_io *io = ios->cis_io;
-        loff_t start;
-        loff_t end;
-
-        if (io->u.ci_wr.wr_append) {
-                start = 0;
-                end   = OBD_OBJECT_EOF;
-        } else {
-                start = io->u.ci_wr.wr.crw_pos;
-                end   = start + io->u.ci_wr.wr.crw_count - 1;
-        }
-        return vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
+       struct cl_io *io = ios->cis_io;
+       loff_t start;
+       loff_t end;
+       int rc;
+
+       ENTRY;
+       if (io->u.ci_rw.rw_append) {
+               start = 0;
+               end   = OBD_OBJECT_EOF;
+       } else {
+               start = io->u.ci_rw.rw_range.cir_pos;
+               end   = start + io->u.ci_rw.rw_range.cir_count - 1;
+       }
+       rc = vvp_io_rw_lock(env, io, CLM_WRITE, start, end);
+       RETURN(rc);
 }
 
 static int vvp_io_setattr_iter_init(const struct lu_env *env,
@@ -333,7 +613,6 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env,
 static int vvp_io_setattr_lock(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
-       struct ccc_io *cio = ccc_env_io(env);
        struct cl_io  *io  = ios->cis_io;
        __u64 new_size;
        __u32 enqflags = 0;
@@ -343,45 +622,52 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
                 if (new_size == 0)
                         enqflags = CEF_DISCARD_DATA;
         } else {
-                if ((io->u.ci_setattr.sa_attr.lvb_mtime >=
-                     io->u.ci_setattr.sa_attr.lvb_ctime) ||
-                    (io->u.ci_setattr.sa_attr.lvb_atime >=
-                     io->u.ci_setattr.sa_attr.lvb_ctime))
-                        return 0;
-                new_size = 0;
-        }
-        cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
-        return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
-                               new_size, OBD_OBJECT_EOF);
+               unsigned int valid = io->u.ci_setattr.sa_valid;
+
+               if (!(valid & TIMES_SET_FLAGS))
+                       return 0;
+
+               if ((!(valid & ATTR_MTIME) ||
+                    io->u.ci_setattr.sa_attr.lvb_mtime >=
+                    io->u.ci_setattr.sa_attr.lvb_ctime) &&
+                   (!(valid & ATTR_ATIME) ||
+                    io->u.ci_setattr.sa_attr.lvb_atime >=
+                    io->u.ci_setattr.sa_attr.lvb_ctime))
+                       return 0;
+
+               new_size = 0;
+       }
+
+       return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
+                              new_size, OBD_OBJECT_EOF);
 }
 
 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
 {
        int     result;
+
        /*
         * Only ll_inode_size_lock is taken at this level.
         */
        ll_inode_size_lock(inode);
-       result = vmtruncate(inode, size);
-       ll_inode_size_unlock(inode);
+       result = inode_newsize_ok(inode, size);
+       if (result < 0) {
+               ll_inode_size_unlock(inode);
+               return result;
+       }
+       i_size_write(inode, size);
 
+       ll_truncate_pagecache(inode, size);
+       ll_inode_size_unlock(inode);
        return result;
 }
 
-static int vvp_io_setattr_trunc(const struct lu_env *env,
-                                const struct cl_io_slice *ios,
-                                struct inode *inode, loff_t size)
-{
-       inode_dio_wait(inode);
-       return 0;
-}
-
 static int vvp_io_setattr_time(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
         struct cl_io       *io    = ios->cis_io;
         struct cl_object   *obj   = io->ci_obj;
-        struct cl_attr     *attr  = ccc_env_thread_attr(env);
+       struct cl_attr     *attr  = vvp_env_thread_attr(env);
         int result;
         unsigned valid = CAT_CTIME;
 
@@ -395,313 +681,523 @@ static int vvp_io_setattr_time(const struct lu_env *env,
                 attr->cat_mtime = io->u.ci_setattr.sa_attr.lvb_mtime;
                 valid |= CAT_MTIME;
         }
-        result = cl_object_attr_set(env, obj, attr, valid);
-        cl_object_attr_unlock(obj);
+       result = cl_object_attr_update(env, obj, attr, valid);
+       cl_object_attr_unlock(obj);
 
-        return result;
+       return result;
 }
 
 static int vvp_io_setattr_start(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
-       struct cl_io    *io    = ios->cis_io;
-       struct inode    *inode = ccc_object_inode(io->ci_obj);
-
-       mutex_lock(&inode->i_mutex);
-       if (cl_io_is_trunc(io))
-               return vvp_io_setattr_trunc(env, ios, inode,
-                                           io->u.ci_setattr.sa_attr.lvb_size);
-       else
+       struct cl_io            *io    = ios->cis_io;
+       struct inode            *inode = vvp_object_inode(io->ci_obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
+
+       if (cl_io_is_trunc(io)) {
+               down_write(&lli->lli_trunc_sem);
+               inode_lock(inode);
+               inode_dio_wait(inode);
+       } else {
+               inode_lock(inode);
+       }
+
+       if (io->u.ci_setattr.sa_valid & TIMES_SET_FLAGS)
                return vvp_io_setattr_time(env, ios);
+
+       return 0;
 }
 
 static void vvp_io_setattr_end(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
-       struct cl_io *io    = ios->cis_io;
-       struct inode *inode = ccc_object_inode(io->ci_obj);
+       struct cl_io            *io    = ios->cis_io;
+       struct inode            *inode = vvp_object_inode(io->ci_obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
 
        if (cl_io_is_trunc(io)) {
                /* Truncate in memory pages - they must be clean pages
                 * because osc has already notified to destroy osc_extents. */
                vvp_do_vmtruncate(inode, io->u.ci_setattr.sa_attr.lvb_size);
                inode_dio_write_done(inode);
+               inode_unlock(inode);
+               up_write(&lli->lli_trunc_sem);
+       } else {
+               inode_unlock(inode);
        }
-       mutex_unlock(&inode->i_mutex);
 }
 
 static void vvp_io_setattr_fini(const struct lu_env *env,
                                const struct cl_io_slice *ios)
 {
+       bool restore_needed = ios->cis_io->ci_restore_needed;
+       struct inode *inode = vvp_object_inode(ios->cis_obj);
+
        vvp_io_fini(env, ios);
-}
 
-#ifdef HAVE_FILE_READV
-static ssize_t lustre_generic_file_read(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_readv(file, vio->cui_iov, vio->cui_nrsegs, ppos);
+       if (restore_needed && !ios->cis_io->ci_restore_needed) {
+               /* restore finished, set data modified flag for HSM */
+               ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
+       }
 }
 
-static ssize_t lustre_generic_file_write(struct file *file,
-                                         struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_writev(file, vio->cui_iov, vio->cui_nrsegs, ppos);
-}
-#else
-static ssize_t lustre_generic_file_read(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
+static int vvp_io_read_start(const struct lu_env *env,
+                            const struct cl_io_slice *ios)
 {
-        return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
-                                     vio->cui_nrsegs, *ppos);
-}
+       struct vvp_io           *vio   = cl2vvp_io(env, ios);
+       struct cl_io            *io    = ios->cis_io;
+       struct cl_object        *obj   = io->ci_obj;
+       struct inode            *inode = vvp_object_inode(obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
+       struct file             *file  = vio->vui_fd->fd_file;
+       struct cl_io_range      *range = &io->u.ci_rw.rw_range;
+       loff_t pos = range->cir_pos; /* for generic_file_splice_read() only */
+       size_t tot = vio->vui_tot_count;
+       int exceed = 0;
+       int result;
+
+       CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+       CDEBUG(D_VFSTRACE, "%s: read [%llu, %llu)\n",
+               file_dentry(file)->d_name.name,
+               range->cir_pos, range->cir_pos + range->cir_count);
+
+       if (vio->vui_io_subtype == IO_NORMAL)
+               down_read(&lli->lli_trunc_sem);
 
-static ssize_t lustre_generic_file_write(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
-                                      vio->cui_nrsegs, *ppos);
+       if (!can_populate_pages(env, io, inode))
+               return 0;
+
+       result = vvp_prep_size(env, obj, io, range->cir_pos, tot, &exceed);
+       if (result != 0)
+               return result;
+       else if (exceed != 0)
+               goto out;
+
+       LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
+                        "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
+                        inode->i_ino, range->cir_count, range->cir_pos,
+                        i_size_read(inode));
+
+       /* turn off the kernel's read-ahead */
+       vio->vui_fd->fd_file->f_ra.ra_pages = 0;
+
+       /* initialize read-ahead window once per syscall */
+       if (!vio->vui_ra_valid) {
+               vio->vui_ra_valid = true;
+               vio->vui_ra_start = cl_index(obj, range->cir_pos);
+               vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+               ll_ras_enter(file);
+       }
+
+       /* BUG: 5972 */
+       file_accessed(file);
+       switch (vio->vui_io_subtype) {
+       case IO_NORMAL:
+               LASSERTF(io->u.ci_rw.rw_iocb.ki_pos == range->cir_pos,
+                        "ki_pos %lld [%lld, %lld)\n",
+                        io->u.ci_rw.rw_iocb.ki_pos,
+                        range->cir_pos, range->cir_pos + range->cir_count);
+               result = generic_file_read_iter(&io->u.ci_rw.rw_iocb,
+                                               &io->u.ci_rw.rw_iter);
+               break;
+       case IO_SPLICE:
+               result = generic_file_splice_read(file, &pos,
+                                                 vio->u.splice.vui_pipe,
+                                                 range->cir_count,
+                                                 vio->u.splice.vui_flags);
+               /* LU-1109: do splice read stripe by stripe otherwise if it
+                * may make nfsd stuck if this read occupied all internal pipe
+                * buffers. */
+               io->ci_continue = 0;
+               break;
+       default:
+               CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
+               LBUG();
+       }
+
+out:
+       if (result >= 0) {
+               if (result < range->cir_count)
+                       io->ci_continue = 0;
+               io->ci_nob += result;
+               ll_rw_stats_tally(ll_i2sbi(inode), current->pid, vio->vui_fd,
+                                 range->cir_pos, result, READ);
+               result = 0;
+       }
+
+       return result;
 }
-#endif
 
-static int vvp_io_read_start(const struct lu_env *env,
-                             const struct cl_io_slice *ios)
+static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
+                             struct cl_page_list *plist, int from, int to)
 {
-        struct vvp_io     *vio   = cl2vvp_io(env, ios);
-        struct ccc_io     *cio   = cl2ccc_io(env, ios);
-        struct cl_io      *io    = ios->cis_io;
-        struct cl_object  *obj   = io->ci_obj;
-        struct inode      *inode = ccc_object_inode(obj);
-        struct ll_ra_read *bead  = &vio->cui_bead;
-        struct file       *file  = cio->cui_fd->fd_file;
+       struct cl_2queue *queue = &io->ci_queue;
+       struct cl_page *page;
+       unsigned int bytes = 0;
+       int rc = 0;
+       ENTRY;
+
+       if (plist->pl_nr == 0)
+               RETURN(0);
+
+       if (from > 0 || to != PAGE_SIZE) {
+               page = cl_page_list_first(plist);
+               if (plist->pl_nr == 1) {
+                       cl_page_clip(env, page, from, to);
+               } else {
+                       if (from > 0)
+                               cl_page_clip(env, page, from, PAGE_SIZE);
+                       if (to != PAGE_SIZE) {
+                               page = cl_page_list_last(plist);
+                               cl_page_clip(env, page, 0, to);
+                       }
+               }
+       }
 
-        int     result;
-        loff_t  pos = io->u.ci_rd.rd.crw_pos;
-        long    cnt = io->u.ci_rd.rd.crw_count;
-        long    tot = cio->cui_tot_count;
-        int     exceed = 0;
+       cl_2queue_init(queue);
+       cl_page_list_splice(plist, &queue->c2_qin);
+       rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+       /* plist is not sorted any more */
+       cl_page_list_splice(&queue->c2_qin, plist);
+       cl_page_list_splice(&queue->c2_qout, plist);
+       cl_2queue_fini(env, queue);
 
-        CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
+       if (rc == 0) {
+               /* calculate bytes */
+               bytes = plist->pl_nr << PAGE_SHIFT;
+               bytes -= from + PAGE_SIZE - to;
 
-       if (!can_populate_pages(env, io, inode))
-               return 0;
+               while (plist->pl_nr > 0) {
+                       page = cl_page_list_first(plist);
+                       cl_page_list_del(env, plist, page);
 
-        result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
-        if (result != 0)
-                return result;
-        else if (exceed != 0)
-                goto out;
+                       cl_page_clip(env, page, 0, PAGE_SIZE);
 
-        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu,
-                        "Read ino %lu, %lu bytes, offset %lld, size %llu\n",
-                        inode->i_ino, cnt, pos, i_size_read(inode));
+                       SetPageUptodate(cl_page_vmpage(page));
+                       cl_page_disown(env, io, page);
 
-        /* turn off the kernel's read-ahead */
-        cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+                       lu_ref_del(&page->cp_reference, "cl_io", io);
+                       cl_page_put(env, page);
+               }
+       }
 
-        /* initialize read-ahead window once per syscall */
-        if (!vio->cui_ra_window_set) {
-                vio->cui_ra_window_set = 1;
-                bead->lrr_start = cl_index(obj, pos);
-                /*
-                 * XXX: explicit CFS_PAGE_SIZE
-                 */
-                bead->lrr_count = cl_index(obj, tot + CFS_PAGE_SIZE - 1);
-                ll_ra_read_in(file, bead);
-        }
+       RETURN(bytes > 0 ? bytes : rc);
+}
 
-        /* BUG: 5972 */
-        file_accessed(file);
-        switch (vio->cui_io_subtype) {
-        case IO_NORMAL:
-                 result = lustre_generic_file_read(file, cio, &pos);
-                 break;
-#ifdef HAVE_KERNEL_SENDFILE
-        case IO_SENDFILE:
-                result = generic_file_sendfile(file, &pos, cnt,
-                                vio->u.sendfile.cui_actor,
-                                vio->u.sendfile.cui_target);
-                break;
-#endif
-#ifdef HAVE_KERNEL_SPLICE_READ
-        case IO_SPLICE:
-                result = generic_file_splice_read(file, &pos,
-                                vio->u.splice.cui_pipe, cnt,
-                                vio->u.splice.cui_flags);
-                /* LU-1109: do splice read stripe by stripe otherwise if it
-                 * may make nfsd stuck if this read occupied all internal pipe
-                 * buffers. */
-                io->ci_continue = 0;
-                break;
-#endif
-        default:
-                CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
-                LBUG();
-        }
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+                               struct cl_page *page)
+{
+       struct page *vmpage = page->cp_vmpage;
 
-out:
-        if (result >= 0) {
-                if (result < cnt)
-                        io->ci_continue = 0;
-                io->ci_nob += result;
-                ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
-                                  cio->cui_fd, pos, result, 0);
-                result = 0;
-        }
-        return result;
+       SetPageUptodate(vmpage);
+       set_page_dirty(vmpage);
+
+       cl_page_disown(env, io, page);
+
+       lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+       cl_page_put(env, page);
 }
 
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+                                  struct cl_page_list *plist)
 {
-       struct vvp_io *vio = cl2vvp_io(env, ios);
-       struct ccc_io *cio = cl2ccc_io(env, ios);
+       struct cl_page *page;
+       pgoff_t index = CL_PAGE_EOF;
 
-       if (vio->cui_ra_window_set)
-               ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+       cl_page_list_for_each(page, plist) {
+               struct vvp_page *vpg = cl_object_page_slice(obj, page);
 
-       vvp_io_fini(env, ios);
+               if (index == CL_PAGE_EOF) {
+                       index = vvp_index(vpg);
+                       continue;
+               }
+
+               ++index;
+               if (index == vvp_index(vpg))
+                       continue;
+
+               return false;
+       }
+       return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+       struct cl_object *obj = io->ci_obj;
+       struct inode *inode = vvp_object_inode(obj);
+       struct vvp_io *vio = vvp_env_io(env);
+       struct cl_page_list *queue = &vio->u.write.vui_queue;
+       struct cl_page *page;
+       int rc = 0;
+       int bytes = 0;
+       unsigned int npages = vio->u.write.vui_queue.pl_nr;
+       ENTRY;
+
+       if (npages == 0)
+               RETURN(0);
+
+       CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+               npages, vio->u.write.vui_from, vio->u.write.vui_to);
+
+       LASSERT(page_list_sanity_check(obj, queue));
+
+       /* submit IO with async write */
+       rc = cl_io_commit_async(env, io, queue,
+                               vio->u.write.vui_from, vio->u.write.vui_to,
+                               write_commit_callback);
+       npages -= queue->pl_nr; /* already committed pages */
+       if (npages > 0) {
+               /* calculate how many bytes were written */
+               bytes = npages << PAGE_SHIFT;
+
+               /* first page */
+               bytes -= vio->u.write.vui_from;
+               if (queue->pl_nr == 0) /* last page */
+                       bytes -= PAGE_SIZE - vio->u.write.vui_to;
+               LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+               vio->u.write.vui_written += bytes;
+
+               CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+                       npages, bytes, vio->u.write.vui_written);
+
+               /* the first page must have been written. */
+               vio->u.write.vui_from = 0;
+       }
+       LASSERT(page_list_sanity_check(obj, queue));
+       LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+       /* out of quota, try sync write */
+       if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+               rc = vvp_io_commit_sync(env, io, queue,
+                                       vio->u.write.vui_from,
+                                       vio->u.write.vui_to);
+               if (rc > 0) {
+                       vio->u.write.vui_written += rc;
+                       rc = 0;
+               }
+       }
+
+       /* update inode size */
+       ll_merge_attr(env, inode);
+
+       /* Now the pages in queue were failed to commit, discard them
+        * unless they were dirtied before. */
+       while (queue->pl_nr > 0) {
+               page = cl_page_list_first(queue);
+               cl_page_list_del(env, queue, page);
+
+               if (!PageDirty(cl_page_vmpage(page)))
+                       cl_page_discard(env, io, page);
+
+               cl_page_disown(env, io, page);
+
+               lu_ref_del(&page->cp_reference, "cl_io", io);
+               cl_page_put(env, page);
+       }
+       cl_page_list_fini(env, queue);
+
+       RETURN(rc);
 }
 
 static int vvp_io_write_start(const struct lu_env *env,
                               const struct cl_io_slice *ios)
 {
-        struct ccc_io      *cio   = cl2ccc_io(env, ios);
-        struct cl_io       *io    = ios->cis_io;
-        struct cl_object   *obj   = io->ci_obj;
-        struct inode       *inode = ccc_object_inode(obj);
-        struct file        *file  = cio->cui_fd->fd_file;
-        ssize_t result = 0;
-        loff_t pos = io->u.ci_wr.wr.crw_pos;
-        size_t cnt = io->u.ci_wr.wr.crw_count;
-
-        ENTRY;
+       struct vvp_io           *vio   = cl2vvp_io(env, ios);
+       struct cl_io            *io    = ios->cis_io;
+       struct cl_object        *obj   = io->ci_obj;
+       struct inode            *inode = vvp_object_inode(obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
+       struct file             *file  = vio->vui_fd->fd_file;
+       struct cl_io_range      *range = &io->u.ci_rw.rw_range;
+       bool                     lock_inode = !lli->lli_inode_locked &&
+                                             !IS_NOSEC(inode);
+       ssize_t                  result = 0;
+       ENTRY;
+
+       if (vio->vui_io_subtype == IO_NORMAL)
+               down_read(&lli->lli_trunc_sem);
 
        if (!can_populate_pages(env, io, inode))
-               return 0;
+               RETURN(0);
 
-        if (cl_io_is_append(io)) {
-                /*
-                 * PARALLEL IO This has to be changed for parallel IO doing
-                 * out-of-order writes.
-                 */
-                pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
-#ifndef HAVE_FILE_WRITEV
-                cio->cui_iocb->ki_pos = pos;
+       if (cl_io_is_append(io)) {
+               /*
+                * PARALLEL IO This has to be changed for parallel IO doing
+                * out-of-order writes.
+                */
+               ll_merge_attr(env, inode);
+               range->cir_pos = i_size_read(inode);
+               io->u.ci_rw.rw_iocb.ki_pos = range->cir_pos;
+       } else {
+               LASSERTF(io->u.ci_rw.rw_iocb.ki_pos == range->cir_pos,
+                        "ki_pos %lld [%lld, %lld)\n",
+                        io->u.ci_rw.rw_iocb.ki_pos,
+                        range->cir_pos, range->cir_pos + range->cir_count);
+       }
+
+       CDEBUG(D_VFSTRACE, "%s: write [%llu, %llu)\n",
+               file_dentry(file)->d_name.name,
+               range->cir_pos, range->cir_pos + range->cir_count);
+
+       /* The maximum Lustre file size is variable, based on the OST maximum
+        * object size and number of stripes.  This needs another check in
+        * addition to the VFS checks earlier. */
+       if (range->cir_pos + range->cir_count > ll_file_maxbytes(inode)) {
+               CDEBUG(D_INODE,
+                      "%s: file %s ("DFID") offset %llu > maxbytes %llu\n",
+                      ll_get_fsname(inode->i_sb, NULL, 0),
+                      file_dentry(file)->d_name.name,
+                      PFID(ll_inode2fid(inode)),
+                      range->cir_pos + range->cir_count,
+                      ll_file_maxbytes(inode));
+               RETURN(-EFBIG);
+       }
+
+       /* Tests to verify we take the i_mutex correctly */
+       if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_SEC) && !lock_inode)
+               RETURN(-EINVAL);
+
+       if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_IMUTEX_NOSEC) && lock_inode)
+               RETURN(-EINVAL);
+
+       /*
+        * When using the locked AIO function (generic_file_aio_write())
+        * testing has shown the inode mutex to be a limiting factor
+        * with multi-threaded single shared file performance. To get
+        * around this, we now use the lockless version. To maintain
+        * consistency, proper locking to protect against writes,
+        * trucates, etc. is handled in the higher layers of lustre.
+        */
+       if (lock_inode)
+               inode_lock(inode);
+       result = __generic_file_write_iter(&io->u.ci_rw.rw_iocb,
+                                          &io->u.ci_rw.rw_iter);
+       if (lock_inode)
+               inode_unlock(inode);
+
+       if (result > 0 || result == -EIOCBQUEUED)
+#ifdef HAVE_GENERIC_WRITE_SYNC_2ARGS
+               result = generic_write_sync(&io->u.ci_rw.rw_iocb, result);
+#else
+       {
+               ssize_t err;
+
+               err = generic_write_sync(io->u.ci_rw.rw_iocb.ki_filp,
+                                        range->cir_pos, result);
+               if (err < 0 && result > 0)
+                       result = err;
+       }
 #endif
-        }
 
-        CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+       if (result > 0) {
+               result = vvp_io_write_commit(env, io);
+               if (vio->u.write.vui_written > 0) {
+                       result = vio->u.write.vui_written;
+                       CDEBUG(D_VFSTRACE, "%s: write nob %zd, result: %zd\n",
+                               file_dentry(file)->d_name.name,
+                               io->ci_nob, result);
+                       io->ci_nob += result;
+               }
+       }
+       if (result > 0) {
+               ll_file_set_flag(ll_i2info(inode), LLIF_DATA_MODIFIED);
 
-        if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
-                result = 0;
-        else
-                result = lustre_generic_file_write(file, cio, &pos);
-
-        if (result > 0) {
-                if (result < cnt)
-                        io->ci_continue = 0;
-                io->ci_nob += result;
-                ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
-                                  cio->cui_fd, pos, result, 0);
-                result = 0;
-        }
-        RETURN(result);
+               if (result < range->cir_count)
+                       io->ci_continue = 0;
+               ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
+                                 vio->vui_fd, range->cir_pos, result, WRITE);
+               result = 0;
+       }
+
+       RETURN(result);
 }
 
-#ifndef HAVE_VM_OP_FAULT
-static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
+static void vvp_io_rw_end(const struct lu_env *env,
+                         const struct cl_io_slice *ios)
 {
-        cfs_page_t *vmpage;
-
-        vmpage = filemap_nopage(cfio->ft_vma, cfio->nopage.ft_address,
-                                cfio->nopage.ft_type);
-
-        if (vmpage == NOPAGE_SIGBUS) {
-                CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",
-                       cfio->nopage.ft_address,(long)cfio->nopage.ft_type);
-                return -EFAULT;
-        } else if (vmpage == NOPAGE_OOM) {
-                CDEBUG(D_PAGE, "got addr %lu type %lx - OOM\n",
-                       cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
-                return -ENOMEM;
-        }
-
-        LL_CDEBUG_PAGE(D_PAGE, vmpage, "got addr %lu type %lx\n",
-                       cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
-
-        cfio->ft_vmpage = vmpage;
-        lock_page(vmpage);
+       struct vvp_io           *vio = cl2vvp_io(env, ios);
+       struct inode            *inode = vvp_object_inode(ios->cis_obj);
+       struct ll_inode_info    *lli = ll_i2info(inode);
 
-        return 0;
+       if (vio->vui_io_subtype == IO_NORMAL)
+               up_read(&lli->lli_trunc_sem);
 }
-#else
+
 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 {
-        struct vm_fault *vmf = cfio->fault.ft_vmf;
+       struct vm_fault *vmf = cfio->ft_vmf;
 
-        cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+       cfio->ft_flags = ll_filemap_fault(cfio->ft_vma, vmf);
+       cfio->ft_flags_valid = 1;
 
-        if (vmf->page) {
-                LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
-                               vmf->virtual_address);
-                if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
-                        lock_page(vmf->page);
-                        cfio->fault.ft_flags &= VM_FAULT_LOCKED;
-                }
+       if (vmf->page) {
+               LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
+                              get_vmf_address(vmf));
+               if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
+                       lock_page(vmf->page);
+                       cfio->ft_flags |= VM_FAULT_LOCKED;
+               }
 
-                cfio->ft_vmpage = vmf->page;
-                return 0;
-        }
+               cfio->ft_vmpage = vmf->page;
 
-        if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
-                CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
-                return -EFAULT;
-        }
+               return 0;
+       }
 
-        if (cfio->fault.ft_flags & VM_FAULT_OOM) {
-                CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
-                return -ENOMEM;
-        }
+       if (cfio->ft_flags & VM_FAULT_SIGBUS) {
+               CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", get_vmf_address(vmf));
+               return -EFAULT;
+       }
 
-        if (cfio->fault.ft_flags & VM_FAULT_RETRY)
-                return -EAGAIN;
+       if (cfio->ft_flags & VM_FAULT_OOM) {
+               CDEBUG(D_PAGE, "got addr %p - OOM\n", get_vmf_address(vmf));
+               return -ENOMEM;
+       }
+
+       if (cfio->ft_flags & VM_FAULT_RETRY)
+               return -EAGAIN;
+
+       CERROR("unknown error in page fault %d\n", cfio->ft_flags);
 
-        CERROR("unknow error in page fault %d!\n", cfio->fault.ft_flags);
-        return -EINVAL;
+       return -EINVAL;
 }
 
-#endif
+static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
+                                   struct cl_page *page)
+{
+       set_page_dirty(page->cp_vmpage);
+}
 
 static int vvp_io_fault_start(const struct lu_env *env,
                               const struct cl_io_slice *ios)
 {
-       struct vvp_io       *vio     = cl2vvp_io(env, ios);
-       struct cl_io        *io      = ios->cis_io;
-       struct cl_object    *obj     = io->ci_obj;
-       struct inode        *inode   = ccc_object_inode(obj);
-       struct cl_fault_io  *fio     = &io->u.ci_fault;
-       struct vvp_fault_io *cfio    = &vio->u.fault;
-       loff_t               offset;
-       int                  result  = 0;
-       cfs_page_t          *vmpage  = NULL;
-       struct cl_page      *page;
-       loff_t               size;
-       pgoff_t              last; /* last page in a file data region */
-
-        if (fio->ft_executable &&
-            LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
-                CWARN("binary "DFID
-                      " changed while waiting for the page fault lock\n",
-                      PFID(lu_object_fid(&obj->co_lu)));
+       struct vvp_io           *vio   = cl2vvp_io(env, ios);
+       struct cl_io            *io    = ios->cis_io;
+       struct cl_object        *obj   = io->ci_obj;
+       struct inode            *inode = vvp_object_inode(obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
+       struct cl_fault_io      *fio   = &io->u.ci_fault;
+       struct vvp_fault_io     *cfio  = &vio->u.fault;
+       loff_t                   offset;
+       int                      result = 0;
+       struct page             *vmpage = NULL;
+       struct cl_page          *page;
+       loff_t                   size;
+       pgoff_t                  last_index;
+       ENTRY;
+
+       down_read(&lli->lli_trunc_sem);
 
         /* offset of the last byte on the page */
         offset = cl_offset(obj, fio->ft_index + 1) - 1;
         LASSERT(cl_index(obj, offset) == fio->ft_index);
-        result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
-        if (result != 0)
-                return result;
+       result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
+       if (result != 0)
+               RETURN(result);
 
        /* must return locked page */
        if (fio->ft_mkwrite) {
@@ -710,7 +1206,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
        } else {
                result = vvp_io_kernel_fault(cfio);
                if (result != 0)
-                       return result;
+                       RETURN(result);
        }
 
        vmpage = cfio->ft_vmpage;
@@ -731,16 +1227,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
                 GOTO(out, result = +1);
         }
 
+       last_index = cl_index(obj, size - 1);
 
        if (fio->ft_mkwrite ) {
-               pgoff_t last_index;
                /*
                 * Capture the size while holding the lli_trunc_sem from above
                 * we want to make sure that we complete the mkwrite action
                 * while holding this lock. We need to make sure that we are
                 * not past the end of the file.
                 */
-               last_index = cl_index(obj, size - 1);
                if (last_index < fio->ft_index) {
                        CDEBUG(D_PAGE,
                                "llite: mkwrite and truncate race happened: "
@@ -761,32 +1256,39 @@ static int vvp_io_fault_start(const struct lu_env *env,
                }
        }
 
-        page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
-        if (IS_ERR(page))
-                GOTO(out, result = PTR_ERR(page));
-
-        /* if page is going to be written, we should add this page into cache
-         * earlier. */
-        if (fio->ft_mkwrite) {
-                wait_on_page_writeback(vmpage);
-                if (set_page_dirty(vmpage)) {
-                        struct ccc_page *cp;
-
-                        /* vvp_page_assume() calls wait_on_page_writeback(). */
-                        cl_page_assume(env, io, page);
-
-                        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
-                        vvp_write_pending(cl2ccc(obj), cp);
+       page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
+       if (IS_ERR(page))
+               GOTO(out, result = PTR_ERR(page));
 
-                        /* Do not set Dirty bit here so that in case IO is
-                         * started before the page is really made dirty, we
-                         * still have chance to detect it. */
-                        result = cl_page_cache_add(env, io, page, CRT_WRITE);
+       /* if page is going to be written, we should add this page into cache
+        * earlier. */
+       if (fio->ft_mkwrite) {
+               wait_on_page_writeback(vmpage);
+               if (!PageDirty(vmpage)) {
+                       struct cl_page_list *plist = &io->ci_queue.c2_qin;
+                       struct vvp_page *vpg = cl_object_page_slice(obj, page);
+                       int to = PAGE_SIZE;
+
+                       /* vvp_page_assume() calls wait_on_page_writeback(). */
+                       cl_page_assume(env, io, page);
+
+                       cl_page_list_init(plist);
+                       cl_page_list_add(plist, page);
+
+                       /* size fixup */
+                       if (last_index == vvp_index(vpg))
+                               to = size & ~PAGE_MASK;
+
+                       /* Do not set Dirty bit here so that in case IO is
+                        * started before the page is really made dirty, we
+                        * still have chance to detect it. */
+                       result = cl_io_commit_async(env, io, plist, 0, to,
+                                                   mkwrite_commit_callback);
                        LASSERT(cl_page_is_owned(page, io));
+                       cl_page_list_fini(env, plist);
 
                        vmpage = NULL;
                        if (result < 0) {
-                               cl_page_unmap(env, io, page);
                                cl_page_discard(env, io, page);
                                cl_page_disown(env, io, page);
 
@@ -801,15 +1303,14 @@ static int vvp_io_fault_start(const struct lu_env *env,
                }
        }
 
-       last = cl_index(obj, size - 1);
        /*
         * The ft_index is only used in the case of
         * a mkwrite action. We need to check
         * our assertions are correct, since
         * we should have caught this above
         */
-       LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
-        if (fio->ft_index == last)
+       LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
+       if (fio->ft_index == last_index)
                 /*
                  * Last page is mapped partially.
                  */
@@ -825,12 +1326,23 @@ out:
        /* return unlocked vmpage to avoid deadlocking */
        if (vmpage != NULL)
                unlock_page(vmpage);
-#ifdef HAVE_VM_OP_FAULT
-       cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
-#endif
+
+       cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
        return result;
 }
 
+static void vvp_io_fault_end(const struct lu_env *env,
+                            const struct cl_io_slice *ios)
+{
+       struct inode            *inode = vvp_object_inode(ios->cis_obj);
+       struct ll_inode_info    *lli   = ll_i2info(inode);
+
+       CLOBINVRNT(env, ios->cis_io->ci_obj,
+                  vvp_object_invariant(ios->cis_io->ci_obj));
+       up_read(&lli->lli_trunc_sem);
+}
+
 static int vvp_io_fsync_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
 {
@@ -840,303 +1352,43 @@ static int vvp_io_fsync_start(const struct lu_env *env,
        return 0;
 }
 
-static int vvp_io_read_page(const struct lu_env *env,
-                            const struct cl_io_slice *ios,
-                            const struct cl_page_slice *slice)
-{
-        struct cl_io              *io     = ios->cis_io;
-        struct cl_object          *obj    = slice->cpl_obj;
-        struct ccc_page           *cp     = cl2ccc_page(slice);
-        struct cl_page            *page   = slice->cpl_page;
-        struct inode              *inode  = ccc_object_inode(obj);
-        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
-        struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
-        struct ll_readahead_state *ras    = &fd->fd_ras;
-        cfs_page_t                *vmpage = cp->cpg_page;
-        struct cl_2queue          *queue  = &io->ci_queue;
-        int rc;
-
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-        LASSERT(slice->cpl_obj == obj);
-
-        ENTRY;
-
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ras_update(sbi, inode, ras, page->cp_index,
-                           cp->cpg_defer_uptodate);
-
-        /* Sanity check whether the page is protected by a lock. */
-        rc = cl_page_is_under_lock(env, io, page);
-        if (rc != -EBUSY) {
-                CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
-                               rc == -ENODATA ? "without a lock" :
-                               "match failed", rc);
-                if (rc != -ENODATA)
-                        RETURN(rc);
-        }
-
-        if (cp->cpg_defer_uptodate) {
-                cp->cpg_ra_used = 1;
-                cl_page_export(env, page, 1);
-        }
-        /*
-         * Add page into the queue even when it is marked uptodate above.
-         * this will unlock it automatically as part of cl_page_list_disown().
-         */
-        cl_2queue_add(queue, page);
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ll_readahead(env, io, ras,
-                             vmpage->mapping, &queue->c2_qin, fd->fd_flags);
-
-        RETURN(0);
-}
-
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
-                            struct cl_page *page, struct ccc_page *cp,
-                            enum cl_req_type crt)
-{
-        struct cl_2queue  *queue;
-        int result;
-
-        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
-        queue = &io->ci_queue;
-        cl_2queue_init_page(queue, page);
-
-       result = cl_io_submit_sync(env, io, crt, queue, 0);
-        LASSERT(cl_page_is_owned(page, io));
-
-        if (crt == CRT_READ)
-                /*
-                 * in CRT_WRITE case page is left locked even in case of
-                 * error.
-                 */
-                cl_page_list_disown(env, io, &queue->c2_qin);
-        cl_2queue_fini(env, queue);
-
-        return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
-                                  struct cl_object *obj, struct cl_page *pg,
-                                  struct ccc_page *cp,
-                                  unsigned from, unsigned to)
+static int vvp_io_read_ahead(const struct lu_env *env,
+                            const struct cl_io_slice *ios,
+                            pgoff_t start, struct cl_read_ahead *ra)
 {
-        struct cl_attr *attr   = ccc_env_thread_attr(env);
-        loff_t          offset = cl_offset(obj, pg->cp_index);
-        int             result;
-
-        cl_object_attr_lock(obj);
-        result = cl_object_attr_get(env, obj, attr);
-        cl_object_attr_unlock(obj);
-        if (result == 0) {
-                /*
-                 * If are writing to a new page, no need to read old data.
-                 * The extent locking will have updated the KMS, and for our
-                 * purposes here we can treat it like i_size.
-                 */
-                if (attr->cat_kms <= offset) {
-                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
-
-                        memset(kaddr, 0, cl_page_size(obj));
-                        ll_kunmap_atomic(kaddr, KM_USER0);
-                } else if (cp->cpg_defer_uptodate)
-                        cp->cpg_ra_used = 1;
-                else
-                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
-                /*
-                 * In older implementations, obdo_refresh_inode is called here
-                 * to update the inode because the write might modify the
-                 * object info at OST. However, this has been proven useless,
-                 * since LVB functions will be called when user space program
-                 * tries to retrieve inode attribute.  Also, see bug 15909 for
-                 * details. -jay
-                 */
-                if (result == 0)
-                        cl_page_export(env, pg, 1);
-        }
-        return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
-                                const struct cl_io_slice *ios,
-                                const struct cl_page_slice *slice,
-                                unsigned from, unsigned to)
-{
-        struct cl_object *obj    = slice->cpl_obj;
-        struct ccc_page  *cp     = cl2ccc_page(slice);
-        struct cl_page   *pg     = slice->cpl_page;
-        cfs_page_t       *vmpage = cp->cpg_page;
-
-        int result;
+       int result = 0;
+       ENTRY;
 
-        ENTRY;
+       if (ios->cis_io->ci_type == CIT_READ ||
+           ios->cis_io->ci_type == CIT_FAULT) {
+               struct vvp_io *vio = cl2vvp_io(env, ios);
 
-        LINVRNT(cl_page_is_vmlocked(env, pg));
-        LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
-        result = 0;
-
-        CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
-        if (!PageUptodate(vmpage)) {
-                /*
-                 * We're completely overwriting an existing page, so _don't_
-                 * set it up to date until commit_write
-                 */
-                if (from == 0 && to == CFS_PAGE_SIZE) {
-                        CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
-                        POISON_PAGE(page, 0x11);
-                } else
-                        result = vvp_io_prepare_partial(env, ios->cis_io, obj,
-                                                        pg, cp, from, to);
-        } else
-                CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
-        RETURN(result);
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
-                               const struct cl_io_slice *ios,
-                               const struct cl_page_slice *slice,
-                               unsigned from, unsigned to)
-{
-        struct cl_object  *obj    = slice->cpl_obj;
-        struct cl_io      *io     = ios->cis_io;
-        struct ccc_page   *cp     = cl2ccc_page(slice);
-        struct cl_page    *pg     = slice->cpl_page;
-        struct inode      *inode  = ccc_object_inode(obj);
-        struct ll_sb_info *sbi    = ll_i2sbi(inode);
-       struct ll_inode_info *lli = ll_i2info(inode);
-        cfs_page_t        *vmpage = cp->cpg_page;
-
-        int    result;
-        int    tallyop;
-        loff_t size;
-
-        ENTRY;
-
-        LINVRNT(cl_page_is_vmlocked(env, pg));
-        LASSERT(vmpage->mapping->host == inode);
-
-        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n");
-        CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
-        /*
-         * queue a write for some time in the future the first time we
-         * dirty the page.
-         *
-         * This is different from what other file systems do: they usually
-         * just mark page (and some of its buffers) dirty and rely on
-         * balance_dirty_pages() to start a write-back. Lustre wants write-back
-         * to be started earlier for the following reasons:
-         *
-         *     (1) with a large number of clients we need to limit the amount
-         *     of cached data on the clients a lot;
-         *
-         *     (2) large compute jobs generally want compute-only then io-only
-         *     and the IO should complete as quickly as possible;
-         *
-         *     (3) IO is batched up to the RPC size and is async until the
-         *     client max cache is hit
-         *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
-         *
-         */
-        if (!PageDirty(vmpage)) {
-                tallyop = LPROC_LL_DIRTY_MISSES;
-                result = cl_page_cache_add(env, io, pg, CRT_WRITE);
-                if (result == 0) {
-                        /* page was added into cache successfully. */
-                        set_page_dirty(vmpage);
-                        vvp_write_pending(cl2ccc(obj), cp);
-                } else if (result == -EDQUOT) {
-                        pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
-                        bool need_clip = true;
-
-                        /*
-                         * Client ran out of disk space grant. Possible
-                         * strategies are:
-                         *
-                         *     (a) do a sync write, renewing grant;
-                         *
-                         *     (b) stop writing on this stripe, switch to the
-                         *     next one.
-                         *
-                         * (b) is a part of "parallel io" design that is the
-                         * ultimate goal. (a) is what "old" client did, and
-                         * what the new code continues to do for the time
-                         * being.
-                         */
-                        if (last_index > pg->cp_index) {
-                                to = CFS_PAGE_SIZE;
-                                need_clip = false;
-                        } else if (last_index == pg->cp_index) {
-                                int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
-                                if (to < size_to)
-                                        to = size_to;
-                        }
-                        if (need_clip)
-                                cl_page_clip(env, pg, 0, to);
-                        result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
-                        if (result)
-                                CERROR("Write page %lu of inode %p failed %d\n",
-                                       pg->cp_index, inode, result);
-                }
-        } else {
-                tallyop = LPROC_LL_DIRTY_HITS;
-                result = 0;
-        }
-        ll_stats_ops_tally(sbi, tallyop, 1);
-
-       /* Inode should be marked DIRTY even if no new page was marked DIRTY
-        * because page could have been not flushed between 2 modifications.
-        * It is important the file is marked DIRTY as soon as the I/O is done
-        * Indeed, when cache is flushed, file could be already closed and it
-        * is too late to warn the MDT.
-        * It is acceptable that file is marked DIRTY even if I/O is dropped
-        * for some reasons before being flushed to OST.
-        */
-       if (result == 0) {
-               spin_lock(&lli->lli_lock);
-               lli->lli_flags |= LLIF_DATA_MODIFIED;
-               spin_unlock(&lli->lli_lock);
+               if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+                       ra->cra_end = CL_PAGE_EOF;
+                       result = +1; /* no need to call down */
+               }
        }
 
-        size = cl_offset(obj, pg->cp_index) + to;
-
-       ll_inode_size_lock(inode);
-        if (result == 0) {
-                if (size > i_size_read(inode)) {
-                        cl_isize_write_nolock(inode, size);
-                        CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
-                               PFID(lu_object_fid(&obj->co_lu)),
-                               (unsigned long)size);
-                }
-                cl_page_export(env, pg, 1);
-        } else {
-                if (size > i_size_read(inode))
-                        cl_page_discard(env, io, pg);
-        }
-       ll_inode_size_unlock(inode);
        RETURN(result);
 }
 
 static const struct cl_io_operations vvp_io_ops = {
-        .op = {
-                [CIT_READ] = {
-                        .cio_fini      = vvp_io_read_fini,
-                        .cio_lock      = vvp_io_read_lock,
-                        .cio_start     = vvp_io_read_start,
-                        .cio_advance   = ccc_io_advance
-                },
+       .op = {
+               [CIT_READ] = {
+                       .cio_fini       = vvp_io_fini,
+                       .cio_lock       = vvp_io_read_lock,
+                       .cio_start      = vvp_io_read_start,
+                       .cio_end        = vvp_io_rw_end,
+                       .cio_advance    = vvp_io_advance,
+               },
                 [CIT_WRITE] = {
-                        .cio_fini      = vvp_io_fini,
-                        .cio_lock      = vvp_io_write_lock,
-                        .cio_start     = vvp_io_write_start,
-                        .cio_advance   = ccc_io_advance
+                       .cio_fini      = vvp_io_fini,
+                       .cio_iter_init = vvp_io_write_iter_init,
+                       .cio_iter_fini = vvp_io_write_iter_fini,
+                       .cio_lock      = vvp_io_write_lock,
+                       .cio_start     = vvp_io_write_start,
+                       .cio_end       = vvp_io_rw_end,
+                       .cio_advance   = vvp_io_advance,
                 },
                 [CIT_SETATTR] = {
                         .cio_fini       = vvp_io_setattr_fini,
@@ -1150,49 +1402,51 @@ static const struct cl_io_operations vvp_io_ops = {
                         .cio_iter_init = vvp_io_fault_iter_init,
                         .cio_lock      = vvp_io_fault_lock,
                         .cio_start     = vvp_io_fault_start,
-                        .cio_end       = ccc_io_end
+                       .cio_end       = vvp_io_fault_end,
                 },
                [CIT_FSYNC] = {
-                       .cio_start  = vvp_io_fsync_start,
-                       .cio_fini   = vvp_io_fini
+                       .cio_start      = vvp_io_fsync_start,
+                       .cio_fini       = vvp_io_fini
+               },
+               [CIT_MISC] = {
+                       .cio_fini       = vvp_io_fini
+               },
+               [CIT_LADVISE] = {
+                       .cio_fini       = vvp_io_fini
                },
-                [CIT_MISC] = {
-                        .cio_fini   = vvp_io_fini
-                }
-        },
-        .cio_read_page     = vvp_io_read_page,
-        .cio_prepare_write = vvp_io_prepare_write,
-        .cio_commit_write  = vvp_io_commit_write
+       },
+       .cio_read_ahead = vvp_io_read_ahead
 };
 
 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
                 struct cl_io *io)
 {
        struct vvp_io      *vio   = vvp_env_io(env);
-       struct ccc_io      *cio   = ccc_env_io(env);
-       struct inode       *inode = ccc_object_inode(obj);
-        int                 result;
+       struct inode       *inode = vvp_object_inode(obj);
+       int                 result;
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-        ENTRY;
+       CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+       ENTRY;
 
-        CL_IO_SLICE_CLEAN(cio, cui_cl);
-        cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
-        vio->cui_ra_window_set = 0;
+       CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
+              "restore needed %d\n",
+              PFID(lu_object_fid(&obj->co_lu)),
+              io->ci_ignore_layout, io->ci_verify_layout,
+              vio->vui_layout_gen, io->ci_restore_needed);
+
+       CL_IO_SLICE_CLEAN(vio, vui_cl);
+       cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+       vio->vui_ra_valid = false;
        result = 0;
        if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
-               size_t count;
                struct ll_inode_info *lli = ll_i2info(inode);
 
-                count = io->u.ci_rw.crw_count;
-                /* "If nbyte is 0, read() will return 0 and have no other
-                 *  results."  -- Single Unix Spec */
-                if (count == 0)
-                        result = 1;
-                else {
-                        cio->cui_tot_count = count;
-                        cio->cui_tot_nrsegs = 0;
-                }
+               vio->vui_tot_count = io->u.ci_rw.rw_range.cir_count;
+               /* "If nbyte is 0, read() will return 0 and have no other
+                *  results."  -- Single Unix Spec */
+               if (vio->vui_tot_count == 0)
+                       result = 1;
+
                /* for read/write, we store the jobid in the inode, and
                 * it'll be fetched by osc when building RPC.
                 *
@@ -1205,26 +1459,22 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
                        io->ci_lockreq = CILR_MANDATORY;
        }
 
-       /* ignore layout change for generic CIT_MISC but not for glimpse.
-        * io context for glimpse must set ci_verify_layout to true,
-        * see cl_glimpse_size0() for details. */
-       if (io->ci_type == CIT_MISC && !io->ci_verify_layout)
-               io->ci_ignore_layout = 1;
-
        /* Enqueue layout lock and get layout version. We need to do this
         * even for operations requiring to open file, such as read and write,
         * because it might not grant layout lock in IT_OPEN. */
-       if (result == 0 && !io->ci_ignore_layout)
-               result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+       if (result == 0 && !io->ci_ignore_layout) {
+               result = ll_layout_refresh(inode, &vio->vui_layout_gen);
+               if (result == -ENOENT)
+                       /* If the inode on MDS has been removed, but the objects
+                        * on OSTs haven't been destroyed (async unlink), layout
+                        * fetch will return -ENOENT, we'd ingore this error
+                        * and continue with dirty flush. LU-3230. */
+                       result = 0;
+               if (result < 0)
+                       CERROR("%s: refresh file layout " DFID " error %d.\n",
+                               ll_get_fsname(inode->i_sb, NULL, 0),
+                               PFID(lu_object_fid(&obj->co_lu)), result);
+       }
 
        RETURN(result);
 }
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
-                                const struct cl_io_slice *slice)
-{
-        /* Caling just for assertion */
-        cl2ccc_io(env, slice);
-        return vvp_env_io(env);
-}
-