Whamcloud - gitweb
LU-2675 llite: remove lli_lvb
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index 7aded2e..df9a83a 100644 (file)
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#ifndef __KERNEL__
-# error This file is kernel only.
-#endif
 
 #include <obd.h>
-#include <lustre_lite.h>
-
 #include "vvp_internal.h"
 
 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
                                 const struct cl_io_slice *slice);
 
 /**
- * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ * True, if \a io is a normal io, False for splice_{read,write}
  */
 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
 {
@@ -83,7 +78,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
        case CIT_WRITE:
                /* don't need lock here to check lli_layout_gen as we have held
                 * extent lock and GROUP lock has to hold to swap layout */
-               if (lli->lli_layout_gen != cio->cui_layout_gen) {
+               if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
                        io->ci_need_restart = 1;
                        /* this will return application a short read/write */
                        io->ci_continue = 0;
@@ -104,6 +99,27 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
  *
  */
 
+static int vvp_io_write_iter_init(const struct lu_env *env,
+                                 const struct cl_io_slice *ios)
+{
+       struct ccc_io *cio = cl2ccc_io(env, ios);
+
+       cl_page_list_init(&cio->u.write.cui_queue);
+       cio->u.write.cui_written = 0;
+       cio->u.write.cui_from = 0;
+       cio->u.write.cui_to = PAGE_SIZE;
+
+       return 0;
+}
+
+static void vvp_io_write_iter_fini(const struct lu_env *env,
+                                  const struct cl_io_slice *ios)
+{
+       struct ccc_io *cio = cl2ccc_io(env, ios);
+
+       LASSERT(cio->u.write.cui_queue.pl_nr == 0);
+}
+
 static int vvp_io_fault_iter_init(const struct lu_env *env,
                                   const struct cl_io_slice *ios)
 {
@@ -121,21 +137,60 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
        struct cl_io     *io  = ios->cis_io;
        struct cl_object *obj = io->ci_obj;
        struct ccc_io    *cio = cl2ccc_io(env, ios);
+       struct inode     *inode = ccc_object_inode(obj);
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+       CLOBINVRNT(env, obj, ccc_object_invariant(obj));
 
-       CDEBUG(D_VFSTRACE, "ignore/verify layout %d/%d, layout version %d.\n",
-               io->ci_ignore_layout, io->ci_verify_layout, cio->cui_layout_gen);
+       CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
+                          "restore needed %d\n",
+              PFID(lu_object_fid(&obj->co_lu)),
+              io->ci_ignore_layout, io->ci_verify_layout,
+              cio->cui_layout_gen, io->ci_restore_needed);
+
+       if (io->ci_restore_needed == 1) {
+               int     rc;
+
+               /* file was detected release, we need to restore it
+                * before finishing the io
+                */
+               rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
+               /* if restore registration failed, no restart,
+                * we will return -ENODATA */
+               /* The layout will change after restore, so we need to
+                * block on layout lock hold by the MDT
+                * as MDT will not send new layout in lvb (see LU-3124)
+                * we have to explicitly fetch it, all this will be done
+                * by ll_layout_refresh()
+                */
+               if (rc == 0) {
+                       io->ci_restore_needed = 0;
+                       io->ci_need_restart = 1;
+                       io->ci_verify_layout = 1;
+               } else {
+                       io->ci_restore_needed = 1;
+                       io->ci_need_restart = 0;
+                       io->ci_verify_layout = 0;
+                       io->ci_result = rc;
+               }
+       }
 
        if (!io->ci_ignore_layout && io->ci_verify_layout) {
                __u32 gen = 0;
 
                /* check layout version */
-               ll_layout_refresh(ccc_object_inode(obj), &gen);
+               ll_layout_refresh(inode, &gen);
                io->ci_need_restart = cio->cui_layout_gen != gen;
-               if (io->ci_need_restart)
-                       CDEBUG(D_VFSTRACE, "layout changed from %d to %d.\n",
-                               cio->cui_layout_gen, gen);
+               if (io->ci_need_restart) {
+                       CDEBUG(D_VFSTRACE,
+                              DFID" layout changed from %d to %d.\n",
+                              PFID(lu_object_fid(&obj->co_lu)),
+                              cio->cui_layout_gen, gen);
+                       /* today successful restore is the only possible
+                        * case */
+                       /* restore was done, clear restoring state */
+                       ll_i2info(ccc_object_inode(obj))->lli_flags &=
+                               ~LLIF_FILE_RESTORING;
+               }
        }
 }
 
@@ -155,7 +210,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
         vvp_io_fini(env, ios);
 }
 
-enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
+static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
 {
         /*
          * we only want to hold PW locks if the mmap() can generate
@@ -178,7 +233,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
         unsigned long           addr;
         unsigned long           seg;
         ssize_t                 count;
-        int                     result;
+       int                     result = 0;
         ENTRY;
 
         LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
@@ -209,13 +264,13 @@ static int vvp_mmap_locks(const struct lu_env *env,
                         struct inode *inode = vma->vm_file->f_dentry->d_inode;
                         int flags = CEF_MUST;
 
-                        if (ll_file_nolock(vma->vm_file)) {
-                                /*
-                                 * For no lock case, a lockless lock will be
-                                 * generated.
-                                 */
-                                flags = CEF_NEVER;
-                        }
+                       if (ll_file_nolock(vma->vm_file)) {
+                               /*
+                                * For no lock case is not allowed for mmap
+                                */
+                               result = -EINVAL;
+                               break;
+                       }
 
                         /*
                          * XXX: Required lock mode can be weakened: CIT_WRITE
@@ -236,18 +291,20 @@ static int vvp_mmap_locks(const struct lu_env *env,
                                descr->cld_mode, descr->cld_start,
                                descr->cld_end);
 
-                        if (result < 0)
-                                RETURN(result);
+                       if (result < 0)
+                               break;
 
-                        if (vma->vm_end - addr >= count)
-                                break;
+                       if (vma->vm_end - addr >= count)
+                               break;
 
-                        count -= vma->vm_end - addr;
-                        addr = vma->vm_end;
-                }
-                up_read(&mm->mmap_sem);
-        }
-        RETURN(0);
+                       count -= vma->vm_end - addr;
+                       addr = vma->vm_end;
+               }
+               up_read(&mm->mmap_sem);
+               if (result < 0)
+                       break;
+       }
+       RETURN(result);
 }
 
 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
@@ -273,20 +330,14 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
 static int vvp_io_read_lock(const struct lu_env *env,
                             const struct cl_io_slice *ios)
 {
-        struct cl_io         *io  = ios->cis_io;
-        struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
-        int result;
+       struct cl_io            *io  = ios->cis_io;
+       struct cl_io_rw_common  *rd = &io->u.ci_rd.rd;
+       int result;
 
-        ENTRY;
-        /* XXX: Layer violation, we shouldn't see lsm at llite level. */
-       if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
-                result = vvp_io_rw_lock(env, io, CLM_READ,
-                                        io->u.ci_rd.rd.crw_pos,
-                                        io->u.ci_rd.rd.crw_pos +
-                                        io->u.ci_rd.rd.crw_count - 1);
-        else
-                result = 0;
-        RETURN(result);
+       ENTRY;
+       result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
+                               rd->crw_pos + rd->crw_count - 1);
+       RETURN(result);
 }
 
 static int vvp_io_fault_lock(const struct lu_env *env,
@@ -358,7 +409,6 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
 {
        int     result;
-       loff_t oldsize;
 
        /*
         * Only ll_inode_size_lock is taken at this level.
@@ -369,10 +419,9 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
                ll_inode_size_unlock(inode);
                return result;
        }
-       oldsize = inode->i_size;
        i_size_write(inode, size);
 
-       truncate_pagecache(inode, oldsize, size);
+       ll_truncate_pagecache(inode, size);
        ll_inode_size_unlock(inode);
        return result;
 }
@@ -447,34 +496,6 @@ static void vvp_io_setattr_fini(const struct lu_env *env,
        vvp_io_fini(env, ios);
 }
 
-#ifdef HAVE_FILE_READV
-static ssize_t lustre_generic_file_read(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_readv(file, vio->cui_iov, vio->cui_nrsegs, ppos);
-}
-
-static ssize_t lustre_generic_file_write(struct file *file,
-                                         struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_writev(file, vio->cui_iov, vio->cui_nrsegs, ppos);
-}
-#else
-static ssize_t lustre_generic_file_read(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
-                                     vio->cui_nrsegs, *ppos);
-}
-
-static ssize_t lustre_generic_file_write(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
-                                      vio->cui_nrsegs, *ppos);
-}
-#endif
-
 static int vvp_io_read_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
 {
@@ -516,10 +537,7 @@ static int vvp_io_read_start(const struct lu_env *env,
         if (!vio->cui_ra_window_set) {
                 vio->cui_ra_window_set = 1;
                 bead->lrr_start = cl_index(obj, pos);
-                /*
-                 * XXX: explicit CFS_PAGE_SIZE
-                 */
-                bead->lrr_count = cl_index(obj, tot + CFS_PAGE_SIZE - 1);
+               bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
                 ll_ra_read_in(file, bead);
         }
 
@@ -527,16 +545,11 @@ static int vvp_io_read_start(const struct lu_env *env,
         file_accessed(file);
         switch (vio->cui_io_subtype) {
         case IO_NORMAL:
-                 result = lustre_generic_file_read(file, cio, &pos);
-                 break;
-#ifdef HAVE_KERNEL_SENDFILE
-        case IO_SENDFILE:
-                result = generic_file_sendfile(file, &pos, cnt,
-                                vio->u.sendfile.cui_actor,
-                                vio->u.sendfile.cui_target);
-                break;
-#endif
-#ifdef HAVE_KERNEL_SPLICE_READ
+               LASSERT(cio->cui_iocb->ki_pos == pos);
+               result = generic_file_aio_read(cio->cui_iocb,
+                                              cio->cui_iov, cio->cui_nrsegs,
+                                              cio->cui_iocb->ki_pos);
+               break;
         case IO_SPLICE:
                 result = generic_file_splice_read(file, &pos,
                                 vio->u.splice.cui_pipe, cnt,
@@ -546,7 +559,6 @@ static int vvp_io_read_start(const struct lu_env *env,
                  * buffers. */
                 io->ci_continue = 0;
                 break;
-#endif
         default:
                 CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
                 LBUG();
@@ -576,6 +588,189 @@ static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice
        vvp_io_fini(env, ios);
 }
 
+static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
+                             struct cl_page_list *plist, int from, int to)
+{
+       struct cl_2queue *queue = &io->ci_queue;
+       struct cl_page *page;
+       unsigned int bytes = 0;
+       int rc = 0;
+       ENTRY;
+
+       if (plist->pl_nr == 0)
+               RETURN(0);
+
+       if (from > 0 || to != PAGE_SIZE) {
+               page = cl_page_list_first(plist);
+               if (plist->pl_nr == 1) {
+                       cl_page_clip(env, page, from, to);
+               } else {
+                       if (from > 0)
+                               cl_page_clip(env, page, from, PAGE_SIZE);
+                       if (to != PAGE_SIZE) {
+                               page = cl_page_list_last(plist);
+                               cl_page_clip(env, page, 0, to);
+                       }
+               }
+       }
+
+       cl_2queue_init(queue);
+       cl_page_list_splice(plist, &queue->c2_qin);
+       rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
+
+       /* plist is not sorted any more */
+       cl_page_list_splice(&queue->c2_qin, plist);
+       cl_page_list_splice(&queue->c2_qout, plist);
+       cl_2queue_fini(env, queue);
+
+       if (rc == 0) {
+               /* calculate bytes */
+               bytes = plist->pl_nr << PAGE_SHIFT;
+               bytes -= from + PAGE_SIZE - to;
+
+               while (plist->pl_nr > 0) {
+                       page = cl_page_list_first(plist);
+                       cl_page_list_del(env, plist, page);
+
+                       cl_page_clip(env, page, 0, PAGE_SIZE);
+
+                       SetPageUptodate(cl_page_vmpage(page));
+                       cl_page_disown(env, io, page);
+
+                       /* held in ll_cl_init() */
+                       lu_ref_del(&page->cp_reference, "cl_io", io);
+                       cl_page_put(env, page);
+               }
+       }
+
+       RETURN(bytes > 0 ? bytes : rc);
+}
+
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+                               struct cl_page *page)
+{
+       struct ccc_page *cp;
+       struct page *vmpage = page->cp_vmpage;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+       SetPageUptodate(vmpage);
+       set_page_dirty(vmpage);
+
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
+
+       cl_page_disown(env, io, page);
+
+       /* held in ll_cl_init() */
+       lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
+       cl_page_put(env, page);
+}
+
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+                                  struct cl_page_list *plist)
+{
+       struct cl_page *page;
+       pgoff_t index = CL_PAGE_EOF;
+
+       cl_page_list_for_each(page, plist) {
+               struct ccc_page *cp = cl_object_page_slice(obj, page);
+
+               if (index == CL_PAGE_EOF) {
+                       index = ccc_index(cp);
+                       continue;
+               }
+
+               ++index;
+               if (index == ccc_index(cp))
+                       continue;
+
+               return false;
+       }
+       return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+       struct cl_object *obj = io->ci_obj;
+       struct inode *inode = ccc_object_inode(obj);
+       struct ccc_io *cio = ccc_env_io(env);
+       struct cl_page_list *queue = &cio->u.write.cui_queue;
+       struct cl_page *page;
+       int rc = 0;
+       int bytes = 0;
+       unsigned int npages = cio->u.write.cui_queue.pl_nr;
+       ENTRY;
+
+       if (npages == 0)
+               RETURN(0);
+
+       CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+               npages, cio->u.write.cui_from, cio->u.write.cui_to);
+
+       LASSERT(page_list_sanity_check(obj, queue));
+
+       /* submit IO with async write */
+       rc = cl_io_commit_async(env, io, queue,
+                               cio->u.write.cui_from, cio->u.write.cui_to,
+                               write_commit_callback);
+       npages -= queue->pl_nr; /* already committed pages */
+       if (npages > 0) {
+               /* calculate how many bytes were written */
+               bytes = npages << PAGE_SHIFT;
+
+               /* first page */
+               bytes -= cio->u.write.cui_from;
+               if (queue->pl_nr == 0) /* last page */
+                       bytes -= PAGE_SIZE - cio->u.write.cui_to;
+               LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+               cio->u.write.cui_written += bytes;
+
+               CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+                       npages, bytes, cio->u.write.cui_written);
+
+               /* the first page must have been written. */
+               cio->u.write.cui_from = 0;
+       }
+       LASSERT(page_list_sanity_check(obj, queue));
+       LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+       /* out of quota, try sync write */
+       if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+               rc = vvp_io_commit_sync(env, io, queue,
+                                       cio->u.write.cui_from,
+                                       cio->u.write.cui_to);
+               if (rc > 0) {
+                       cio->u.write.cui_written += rc;
+                       rc = 0;
+               }
+       }
+
+       /* update inode size */
+       ll_merge_attr(env, inode);
+
+       /* Now the pages in queue were failed to commit, discard them
+        * unless they were dirtied before. */
+       while (queue->pl_nr > 0) {
+               page = cl_page_list_first(queue);
+               cl_page_list_del(env, queue, page);
+
+               if (!PageDirty(cl_page_vmpage(page)))
+                       cl_page_discard(env, io, page);
+
+               cl_page_disown(env, io, page);
+
+               /* held in ll_cl_init() */
+               lu_ref_del(&page->cp_reference, "cl_io", io);
+               cl_page_put(env, page);
+       }
+       cl_page_list_fini(env, queue);
+
+       RETURN(rc);
+}
+
 static int vvp_io_write_start(const struct lu_env *env,
                               const struct cl_io_slice *ios)
 {
@@ -583,7 +778,6 @@ static int vvp_io_write_start(const struct lu_env *env,
         struct cl_io       *io    = ios->cis_io;
         struct cl_object   *obj   = io->ci_obj;
         struct inode       *inode = ccc_object_inode(obj);
-        struct file        *file  = cio->cui_fd->fd_file;
         ssize_t result = 0;
         loff_t pos = io->u.ci_wr.wr.crw_pos;
         size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -591,30 +785,66 @@ static int vvp_io_write_start(const struct lu_env *env,
         ENTRY;
 
        if (!can_populate_pages(env, io, inode))
-               return 0;
+               RETURN(0);
 
         if (cl_io_is_append(io)) {
                 /*
                  * PARALLEL IO This has to be changed for parallel IO doing
                  * out-of-order writes.
                  */
+               ll_merge_attr(env, inode);
                 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
-#ifndef HAVE_FILE_WRITEV
                 cio->cui_iocb->ki_pos = pos;
-#endif
-        }
+        } else {
+               LASSERT(cio->cui_iocb->ki_pos == pos);
+       }
 
-        CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+       CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
 
-        if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
-                result = 0;
-        else
-                result = lustre_generic_file_write(file, cio, &pos);
+       if (cio->cui_iov == NULL) {
+               /* from a temp io in ll_cl_init(). */
+               result = 0;
+       } else {
+               /*
+                * When using the locked AIO function (generic_file_aio_write())
+                * testing has shown the inode mutex to be a limiting factor
+                * with multi-threaded single shared file performance. To get
+                * around this, we now use the lockless version. To maintain
+                * consistency, proper locking to protect against writes,
+                * trucates, etc. is handled in the higher layers of lustre.
+                */
+               result = __generic_file_aio_write(cio->cui_iocb,
+                                                 cio->cui_iov, cio->cui_nrsegs,
+                                                 &cio->cui_iocb->ki_pos);
+               if (result > 0 || result == -EIOCBQUEUED) {
+                       ssize_t err;
+
+                       err = generic_write_sync(cio->cui_iocb->ki_filp,
+                                                pos, result);
+                       if (err < 0 && result > 0)
+                               result = err;
+               }
 
+       }
+       if (result > 0) {
+               result = vvp_io_write_commit(env, io);
+               if (cio->u.write.cui_written > 0) {
+                       result = cio->u.write.cui_written;
+                       io->ci_nob += result;
+
+                       CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+                               io->ci_nob, result);
+               }
+       }
        if (result > 0) {
+               struct ll_inode_info *lli = ll_i2info(inode);
+
+               spin_lock(&lli->lli_lock);
+               lli->lli_flags |= LLIF_DATA_MODIFIED;
+               spin_unlock(&lli->lli_lock);
+
                if (result < cnt)
                        io->ci_continue = 0;
-               io->ci_nob += result;
                ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
                                  cio->cui_fd, pos, result, WRITE);
                result = 0;
@@ -623,45 +853,19 @@ static int vvp_io_write_start(const struct lu_env *env,
        RETURN(result);
 }
 
-#ifndef HAVE_VM_OP_FAULT
-static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
-{
-        cfs_page_t *vmpage;
-
-        vmpage = filemap_nopage(cfio->ft_vma, cfio->nopage.ft_address,
-                                cfio->nopage.ft_type);
-
-        if (vmpage == NOPAGE_SIGBUS) {
-                CDEBUG(D_PAGE, "got addr %lu type %lx - SIGBUS\n",
-                       cfio->nopage.ft_address,(long)cfio->nopage.ft_type);
-                return -EFAULT;
-        } else if (vmpage == NOPAGE_OOM) {
-                CDEBUG(D_PAGE, "got addr %lu type %lx - OOM\n",
-                       cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
-                return -ENOMEM;
-        }
-
-        LL_CDEBUG_PAGE(D_PAGE, vmpage, "got addr %lu type %lx\n",
-                       cfio->nopage.ft_address, (long)cfio->nopage.ft_type);
-
-        cfio->ft_vmpage = vmpage;
-        lock_page(vmpage);
-
-        return 0;
-}
-#else
 static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 {
         struct vm_fault *vmf = cfio->fault.ft_vmf;
 
         cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+       cfio->fault.ft_flags_valid = 1;
 
         if (vmf->page) {
                 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
                                vmf->virtual_address);
                 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
                         lock_page(vmf->page);
-                        cfio->fault.ft_flags &= VM_FAULT_LOCKED;
+                       cfio->fault.ft_flags |= VM_FAULT_LOCKED;
                 }
 
                 cfio->ft_vmpage = vmf->page;
@@ -685,7 +889,17 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
         return -EINVAL;
 }
 
-#endif
+static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
+                                   struct cl_page *page)
+{
+       struct ccc_page *cp;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+       set_page_dirty(page->cp_vmpage);
+
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
+}
 
 static int vvp_io_fault_start(const struct lu_env *env,
                               const struct cl_io_slice *ios)
@@ -698,10 +912,11 @@ static int vvp_io_fault_start(const struct lu_env *env,
        struct vvp_fault_io *cfio    = &vio->u.fault;
        loff_t               offset;
        int                  result  = 0;
-       cfs_page_t          *vmpage  = NULL;
+       struct page          *vmpage  = NULL;
        struct cl_page      *page;
        loff_t               size;
-       pgoff_t              last; /* last page in a file data region */
+       pgoff_t              last_index;
+       ENTRY;
 
         if (fio->ft_executable &&
             LTIME_S(inode->i_mtime) != vio->u.fault.ft_mtime)
@@ -713,8 +928,8 @@ static int vvp_io_fault_start(const struct lu_env *env,
         offset = cl_offset(obj, fio->ft_index + 1) - 1;
         LASSERT(cl_index(obj, offset) == fio->ft_index);
         result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
-        if (result != 0)
-                return result;
+       if (result != 0)
+               RETURN(result);
 
        /* must return locked page */
        if (fio->ft_mkwrite) {
@@ -723,7 +938,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
        } else {
                result = vvp_io_kernel_fault(cfio);
                if (result != 0)
-                       return result;
+                       RETURN(result);
        }
 
        vmpage = cfio->ft_vmpage;
@@ -744,16 +959,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
                 GOTO(out, result = +1);
         }
 
+       last_index = cl_index(obj, size - 1);
 
        if (fio->ft_mkwrite ) {
-               pgoff_t last_index;
                /*
                 * Capture the size while holding the lli_trunc_sem from above
                 * we want to make sure that we complete the mkwrite action
                 * while holding this lock. We need to make sure that we are
                 * not past the end of the file.
                 */
-               last_index = cl_index(obj, size - 1);
                if (last_index < fio->ft_index) {
                        CDEBUG(D_PAGE,
                                "llite: mkwrite and truncate race happened: "
@@ -774,32 +988,39 @@ static int vvp_io_fault_start(const struct lu_env *env,
                }
        }
 
-        page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
-        if (IS_ERR(page))
-                GOTO(out, result = PTR_ERR(page));
-
-        /* if page is going to be written, we should add this page into cache
-         * earlier. */
-        if (fio->ft_mkwrite) {
-                wait_on_page_writeback(vmpage);
-                if (set_page_dirty(vmpage)) {
-                        struct ccc_page *cp;
+       page = cl_page_find(env, obj, fio->ft_index, vmpage, CPT_CACHEABLE);
+       if (IS_ERR(page))
+               GOTO(out, result = PTR_ERR(page));
 
-                        /* vvp_page_assume() calls wait_on_page_writeback(). */
-                        cl_page_assume(env, io, page);
-
-                        cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
-                        vvp_write_pending(cl2ccc(obj), cp);
-
-                        /* Do not set Dirty bit here so that in case IO is
-                         * started before the page is really made dirty, we
-                         * still have chance to detect it. */
-                        result = cl_page_cache_add(env, io, page, CRT_WRITE);
+       /* if page is going to be written, we should add this page into cache
+        * earlier. */
+       if (fio->ft_mkwrite) {
+               wait_on_page_writeback(vmpage);
+               if (!PageDirty(vmpage)) {
+                       struct cl_page_list *plist = &io->ci_queue.c2_qin;
+                       struct ccc_page *cp = cl_object_page_slice(obj, page);
+                       int to = PAGE_SIZE;
+
+                       /* vvp_page_assume() calls wait_on_page_writeback(). */
+                       cl_page_assume(env, io, page);
+
+                       cl_page_list_init(plist);
+                       cl_page_list_add(plist, page);
+
+                       /* size fixup */
+                       if (last_index == ccc_index(cp))
+                               to = size & ~CFS_PAGE_MASK;
+
+                       /* Do not set Dirty bit here so that in case IO is
+                        * started before the page is really made dirty, we
+                        * still have chance to detect it. */
+                       result = cl_io_commit_async(env, io, plist, 0, to,
+                                                   mkwrite_commit_callback);
                        LASSERT(cl_page_is_owned(page, io));
+                       cl_page_list_fini(env, plist);
 
                        vmpage = NULL;
                        if (result < 0) {
-                               cl_page_unmap(env, io, page);
                                cl_page_discard(env, io, page);
                                cl_page_disown(env, io, page);
 
@@ -814,15 +1035,14 @@ static int vvp_io_fault_start(const struct lu_env *env,
                }
        }
 
-       last = cl_index(obj, size - 1);
        /*
         * The ft_index is only used in the case of
         * a mkwrite action. We need to check
         * our assertions are correct, since
         * we should have caught this above
         */
-       LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
-        if (fio->ft_index == last)
+       LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
+       if (fio->ft_index == last_index)
                 /*
                  * Last page is mapped partially.
                  */
@@ -838,9 +1058,7 @@ out:
        /* return unlocked vmpage to avoid deadlocking */
        if (vmpage != NULL)
                unlock_page(vmpage);
-#ifdef HAVE_VM_OP_FAULT
        cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
-#endif
        return result;
 }
 
@@ -857,284 +1075,38 @@ static int vvp_io_read_page(const struct lu_env *env,
                             const struct cl_io_slice *ios,
                             const struct cl_page_slice *slice)
 {
-        struct cl_io              *io     = ios->cis_io;
-        struct cl_object          *obj    = slice->cpl_obj;
-        struct ccc_page           *cp     = cl2ccc_page(slice);
-        struct cl_page            *page   = slice->cpl_page;
-        struct inode              *inode  = ccc_object_inode(obj);
-        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
-        struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
-        struct ll_readahead_state *ras    = &fd->fd_ras;
-        cfs_page_t                *vmpage = cp->cpg_page;
-        struct cl_2queue          *queue  = &io->ci_queue;
-        int rc;
-
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-        LASSERT(slice->cpl_obj == obj);
-
-        ENTRY;
-
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ras_update(sbi, inode, ras, page->cp_index,
-                           cp->cpg_defer_uptodate);
-
-        /* Sanity check whether the page is protected by a lock. */
-        rc = cl_page_is_under_lock(env, io, page);
-        if (rc != -EBUSY) {
-                CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
-                               rc == -ENODATA ? "without a lock" :
-                               "match failed", rc);
-                if (rc != -ENODATA)
-                        RETURN(rc);
-        }
+       struct cl_io              *io     = ios->cis_io;
+       struct ccc_page           *cp     = cl2ccc_page(slice);
+       struct cl_page            *page   = slice->cpl_page;
+       struct inode              *inode  = ccc_object_inode(slice->cpl_obj);
+       struct ll_sb_info         *sbi    = ll_i2sbi(inode);
+       struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
+       struct ll_readahead_state *ras    = &fd->fd_ras;
+       struct cl_2queue          *queue  = &io->ci_queue;
+
+       ENTRY;
+
+       if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+           sbi->ll_ra_info.ra_max_pages > 0)
+               ras_update(sbi, inode, ras, ccc_index(cp),
+                          cp->cpg_defer_uptodate);
 
         if (cp->cpg_defer_uptodate) {
                 cp->cpg_ra_used = 1;
                 cl_page_export(env, page, 1);
         }
+
         /*
          * Add page into the queue even when it is marked uptodate above.
          * this will unlock it automatically as part of cl_page_list_disown().
          */
         cl_2queue_add(queue, page);
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ll_readahead(env, io, ras,
-                             vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+       if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+           sbi->ll_ra_info.ra_max_pages > 0)
+               ll_readahead(env, io, &queue->c2_qin, ras,
+                            cp->cpg_defer_uptodate);
 
-        RETURN(0);
-}
-
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
-                            struct cl_page *page, struct ccc_page *cp,
-                            enum cl_req_type crt)
-{
-        struct cl_2queue  *queue;
-        int result;
-
-        LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
-        queue = &io->ci_queue;
-        cl_2queue_init_page(queue, page);
-
-       result = cl_io_submit_sync(env, io, crt, queue, 0);
-        LASSERT(cl_page_is_owned(page, io));
-
-        if (crt == CRT_READ)
-                /*
-                 * in CRT_WRITE case page is left locked even in case of
-                 * error.
-                 */
-                cl_page_list_disown(env, io, &queue->c2_qin);
-        cl_2queue_fini(env, queue);
-
-        return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
-                                  struct cl_object *obj, struct cl_page *pg,
-                                  struct ccc_page *cp,
-                                  unsigned from, unsigned to)
-{
-        struct cl_attr *attr   = ccc_env_thread_attr(env);
-        loff_t          offset = cl_offset(obj, pg->cp_index);
-        int             result;
-
-        cl_object_attr_lock(obj);
-        result = cl_object_attr_get(env, obj, attr);
-        cl_object_attr_unlock(obj);
-        if (result == 0) {
-                /*
-                 * If are writing to a new page, no need to read old data.
-                 * The extent locking will have updated the KMS, and for our
-                 * purposes here we can treat it like i_size.
-                 */
-                if (attr->cat_kms <= offset) {
-                        char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
-
-                        memset(kaddr, 0, cl_page_size(obj));
-                        ll_kunmap_atomic(kaddr, KM_USER0);
-                } else if (cp->cpg_defer_uptodate)
-                        cp->cpg_ra_used = 1;
-                else
-                        result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
-                /*
-                 * In older implementations, obdo_refresh_inode is called here
-                 * to update the inode because the write might modify the
-                 * object info at OST. However, this has been proven useless,
-                 * since LVB functions will be called when user space program
-                 * tries to retrieve inode attribute.  Also, see bug 15909 for
-                 * details. -jay
-                 */
-                if (result == 0)
-                        cl_page_export(env, pg, 1);
-        }
-        return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
-                                const struct cl_io_slice *ios,
-                                const struct cl_page_slice *slice,
-                                unsigned from, unsigned to)
-{
-        struct cl_object *obj    = slice->cpl_obj;
-        struct ccc_page  *cp     = cl2ccc_page(slice);
-        struct cl_page   *pg     = slice->cpl_page;
-        cfs_page_t       *vmpage = cp->cpg_page;
-
-        int result;
-
-        ENTRY;
-
-        LINVRNT(cl_page_is_vmlocked(env, pg));
-        LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
-        result = 0;
-
-        CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
-        if (!PageUptodate(vmpage)) {
-                /*
-                 * We're completely overwriting an existing page, so _don't_
-                 * set it up to date until commit_write
-                 */
-                if (from == 0 && to == CFS_PAGE_SIZE) {
-                        CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
-                        POISON_PAGE(page, 0x11);
-                } else
-                        result = vvp_io_prepare_partial(env, ios->cis_io, obj,
-                                                        pg, cp, from, to);
-        } else
-                CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
-        RETURN(result);
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
-                               const struct cl_io_slice *ios,
-                               const struct cl_page_slice *slice,
-                               unsigned from, unsigned to)
-{
-        struct cl_object  *obj    = slice->cpl_obj;
-        struct cl_io      *io     = ios->cis_io;
-        struct ccc_page   *cp     = cl2ccc_page(slice);
-        struct cl_page    *pg     = slice->cpl_page;
-        struct inode      *inode  = ccc_object_inode(obj);
-        struct ll_sb_info *sbi    = ll_i2sbi(inode);
-       struct ll_inode_info *lli = ll_i2info(inode);
-        cfs_page_t        *vmpage = cp->cpg_page;
-
-        int    result;
-        int    tallyop;
-        loff_t size;
-
-        ENTRY;
-
-        LINVRNT(cl_page_is_vmlocked(env, pg));
-        LASSERT(vmpage->mapping->host == inode);
-
-        LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n");
-        CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
-        /*
-         * queue a write for some time in the future the first time we
-         * dirty the page.
-         *
-         * This is different from what other file systems do: they usually
-         * just mark page (and some of its buffers) dirty and rely on
-         * balance_dirty_pages() to start a write-back. Lustre wants write-back
-         * to be started earlier for the following reasons:
-         *
-         *     (1) with a large number of clients we need to limit the amount
-         *     of cached data on the clients a lot;
-         *
-         *     (2) large compute jobs generally want compute-only then io-only
-         *     and the IO should complete as quickly as possible;
-         *
-         *     (3) IO is batched up to the RPC size and is async until the
-         *     client max cache is hit
-         *     (/proc/fs/lustre/osc/OSC.../max_dirty_mb)
-         *
-         */
-        if (!PageDirty(vmpage)) {
-                tallyop = LPROC_LL_DIRTY_MISSES;
-                result = cl_page_cache_add(env, io, pg, CRT_WRITE);
-                if (result == 0) {
-                        /* page was added into cache successfully. */
-                        set_page_dirty(vmpage);
-                        vvp_write_pending(cl2ccc(obj), cp);
-                } else if (result == -EDQUOT) {
-                        pgoff_t last_index = i_size_read(inode) >> CFS_PAGE_SHIFT;
-                        bool need_clip = true;
-
-                        /*
-                         * Client ran out of disk space grant. Possible
-                         * strategies are:
-                         *
-                         *     (a) do a sync write, renewing grant;
-                         *
-                         *     (b) stop writing on this stripe, switch to the
-                         *     next one.
-                         *
-                         * (b) is a part of "parallel io" design that is the
-                         * ultimate goal. (a) is what "old" client did, and
-                         * what the new code continues to do for the time
-                         * being.
-                         */
-                        if (last_index > pg->cp_index) {
-                                to = CFS_PAGE_SIZE;
-                                need_clip = false;
-                        } else if (last_index == pg->cp_index) {
-                                int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
-                                if (to < size_to)
-                                        to = size_to;
-                        }
-                        if (need_clip)
-                                cl_page_clip(env, pg, 0, to);
-                        result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
-                        if (result)
-                                CERROR("Write page %lu of inode %p failed %d\n",
-                                       pg->cp_index, inode, result);
-                }
-        } else {
-                tallyop = LPROC_LL_DIRTY_HITS;
-                result = 0;
-        }
-        ll_stats_ops_tally(sbi, tallyop, 1);
-
-       /* Inode should be marked DIRTY even if no new page was marked DIRTY
-        * because page could have been not flushed between 2 modifications.
-        * It is important the file is marked DIRTY as soon as the I/O is done
-        * Indeed, when cache is flushed, file could be already closed and it
-        * is too late to warn the MDT.
-        * It is acceptable that file is marked DIRTY even if I/O is dropped
-        * for some reasons before being flushed to OST.
-        */
-       if (result == 0) {
-               spin_lock(&lli->lli_lock);
-               lli->lli_flags |= LLIF_DATA_MODIFIED;
-               spin_unlock(&lli->lli_lock);
-       }
-
-        size = cl_offset(obj, pg->cp_index) + to;
-
-       ll_inode_size_lock(inode);
-        if (result == 0) {
-                if (size > i_size_read(inode)) {
-                        cl_isize_write_nolock(inode, size);
-                        CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
-                               PFID(lu_object_fid(&obj->co_lu)),
-                               (unsigned long)size);
-                }
-                cl_page_export(env, pg, 1);
-        } else {
-                if (size > i_size_read(inode))
-                        cl_page_discard(env, io, pg);
-        }
-       ll_inode_size_unlock(inode);
-       RETURN(result);
+       RETURN(0);
 }
 
 static const struct cl_io_operations vvp_io_ops = {
@@ -1146,10 +1118,12 @@ static const struct cl_io_operations vvp_io_ops = {
                         .cio_advance   = ccc_io_advance
                 },
                 [CIT_WRITE] = {
-                        .cio_fini      = vvp_io_fini,
-                        .cio_lock      = vvp_io_write_lock,
-                        .cio_start     = vvp_io_write_start,
-                        .cio_advance   = ccc_io_advance
+                       .cio_fini      = vvp_io_fini,
+                       .cio_iter_init = vvp_io_write_iter_init,
+                       .cio_iter_fini = vvp_io_write_iter_fini,
+                       .cio_lock      = vvp_io_write_lock,
+                       .cio_start     = vvp_io_write_start,
+                       .cio_advance   = ccc_io_advance
                 },
                 [CIT_SETATTR] = {
                         .cio_fini       = vvp_io_setattr_fini,
@@ -1174,8 +1148,6 @@ static const struct cl_io_operations vvp_io_ops = {
                 }
         },
         .cio_read_page     = vvp_io_read_page,
-        .cio_prepare_write = vvp_io_prepare_write,
-        .cio_commit_write  = vvp_io_commit_write
 };
 
 int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
@@ -1189,6 +1161,12 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
         CLOBINVRNT(env, obj, ccc_object_invariant(obj));
         ENTRY;
 
+       CDEBUG(D_VFSTRACE, DFID" ignore/verify layout %d/%d, layout version %d "
+                          "restore needed %d\n",
+              PFID(lu_object_fid(&obj->co_lu)),
+              io->ci_ignore_layout, io->ci_verify_layout,
+              cio->cui_layout_gen, io->ci_restore_needed);
+
         CL_IO_SLICE_CLEAN(cio, cui_cl);
         cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
         vio->cui_ra_window_set = 0;
@@ -1206,6 +1184,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
                         cio->cui_tot_count = count;
                         cio->cui_tot_nrsegs = 0;
                 }
+
                /* for read/write, we store the jobid in the inode, and
                 * it'll be fetched by osc when building RPC.
                 *
@@ -1251,4 +1230,3 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env,
         cl2ccc_io(env, slice);
         return vvp_env_io(env);
 }
-