Whamcloud - gitweb
LU-2675 llite: remove lli_lvb
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index 798329e..df9a83a 100644 (file)
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#ifndef __KERNEL__
-# error This file is kernel only.
-#endif
 
 #include <obd.h>
-#include <lustre_lite.h>
-
 #include "vvp_internal.h"
 
 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
@@ -238,7 +233,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
         unsigned long           addr;
         unsigned long           seg;
         ssize_t                 count;
-        int                     result;
+       int                     result = 0;
         ENTRY;
 
         LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
@@ -269,13 +264,13 @@ static int vvp_mmap_locks(const struct lu_env *env,
                         struct inode *inode = vma->vm_file->f_dentry->d_inode;
                         int flags = CEF_MUST;
 
-                        if (ll_file_nolock(vma->vm_file)) {
-                                /*
-                                 * For no lock case, a lockless lock will be
-                                 * generated.
-                                 */
-                                flags = CEF_NEVER;
-                        }
+                       if (ll_file_nolock(vma->vm_file)) {
+                               /*
+                                * For no lock case is not allowed for mmap
+                                */
+                               result = -EINVAL;
+                               break;
+                       }
 
                         /*
                          * XXX: Required lock mode can be weakened: CIT_WRITE
@@ -296,20 +291,20 @@ static int vvp_mmap_locks(const struct lu_env *env,
                                descr->cld_mode, descr->cld_start,
                                descr->cld_end);
 
-                       if (result < 0) {
-                               up_read(&mm->mmap_sem);
-                               RETURN(result);
-                       }
+                       if (result < 0)
+                               break;
 
-                        if (vma->vm_end - addr >= count)
-                                break;
+                       if (vma->vm_end - addr >= count)
+                               break;
 
-                        count -= vma->vm_end - addr;
-                        addr = vma->vm_end;
-                }
-                up_read(&mm->mmap_sem);
-        }
-        RETURN(0);
+                       count -= vma->vm_end - addr;
+                       addr = vma->vm_end;
+               }
+               up_read(&mm->mmap_sem);
+               if (result < 0)
+                       break;
+       }
+       RETURN(result);
 }
 
 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
@@ -335,7 +330,7 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
 static int vvp_io_read_lock(const struct lu_env *env,
                             const struct cl_io_slice *ios)
 {
-       struct cl_io            *io = ios->cis_io;
+       struct cl_io            *io  = ios->cis_io;
        struct cl_io_rw_common  *rd = &io->u.ci_rd.rd;
        int result;
 
@@ -609,11 +604,13 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
                page = cl_page_list_first(plist);
                if (plist->pl_nr == 1) {
                        cl_page_clip(env, page, from, to);
-               } else if (from > 0) {
-                       cl_page_clip(env, page, from, PAGE_SIZE);
                } else {
-                       page = cl_page_list_last(plist);
-                       cl_page_clip(env, page, 0, to);
+                       if (from > 0)
+                               cl_page_clip(env, page, from, PAGE_SIZE);
+                       if (to != PAGE_SIZE) {
+                               page = cl_page_list_last(plist);
+                               cl_page_clip(env, page, 0, to);
+                       }
                }
        }
 
@@ -665,7 +662,7 @@ static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
        cl_page_disown(env, io, page);
 
        /* held in ll_cl_init() */
-       lu_ref_del(&page->cp_reference, "cl_io", io);
+       lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
        cl_page_put(env, page);
 }
 
@@ -752,7 +749,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        }
 
        /* update inode size */
-       ll_merge_lvb(env, inode);
+       ll_merge_attr(env, inode);
 
        /* Now the pages in queue were failed to commit, discard them
         * unless they were dirtied before. */
@@ -795,20 +792,40 @@ static int vvp_io_write_start(const struct lu_env *env,
                  * PARALLEL IO This has to be changed for parallel IO doing
                  * out-of-order writes.
                  */
+               ll_merge_attr(env, inode);
                 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
                 cio->cui_iocb->ki_pos = pos;
         } else {
                LASSERT(cio->cui_iocb->ki_pos == pos);
        }
 
-        CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+       CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
 
-        if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
-                result = 0;
-        else
-               result = generic_file_aio_write(cio->cui_iocb,
-                                               cio->cui_iov, cio->cui_nrsegs,
-                                               cio->cui_iocb->ki_pos);
+       if (cio->cui_iov == NULL) {
+               /* from a temp io in ll_cl_init(). */
+               result = 0;
+       } else {
+               /*
+                * When using the locked AIO function (generic_file_aio_write())
+                * testing has shown the inode mutex to be a limiting factor
+                * with multi-threaded single shared file performance. To get
+                * around this, we now use the lockless version. To maintain
+                * consistency, proper locking to protect against writes,
+                * trucates, etc. is handled in the higher layers of lustre.
+                */
+               result = __generic_file_aio_write(cio->cui_iocb,
+                                                 cio->cui_iov, cio->cui_nrsegs,
+                                                 &cio->cui_iocb->ki_pos);
+               if (result > 0 || result == -EIOCBQUEUED) {
+                       ssize_t err;
+
+                       err = generic_write_sync(cio->cui_iocb->ki_filp,
+                                                pos, result);
+                       if (err < 0 && result > 0)
+                               result = err;
+               }
+
+       }
        if (result > 0) {
                result = vvp_io_write_commit(env, io);
                if (cio->u.write.cui_written > 0) {
@@ -841,6 +858,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
         struct vm_fault *vmf = cfio->fault.ft_vmf;
 
         cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+       cfio->fault.ft_flags_valid = 1;
 
         if (vmf->page) {
                 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",