Whamcloud - gitweb
LU-2675 llite: remove lli_lvb
[fs/lustre-release.git] / lustre / llite / vvp_io.c
index 380a7e5..df9a83a 100644 (file)
 
 #define DEBUG_SUBSYSTEM S_LLITE
 
-#ifndef __KERNEL__
-# error This file is kernel only.
-#endif
 
 #include <obd.h>
-#include <lustre_lite.h>
-
 #include "vvp_internal.h"
 
 static struct vvp_io *cl2vvp_io(const struct lu_env *env,
                                 const struct cl_io_slice *slice);
 
 /**
- * True, if \a io is a normal io, False for sendfile() / splice_{read|write}
+ * True, if \a io is a normal io, False for splice_{read,write}
  */
 int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
 {
@@ -83,7 +78,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
        case CIT_WRITE:
                /* don't need lock here to check lli_layout_gen as we have held
                 * extent lock and GROUP lock has to hold to swap layout */
-               if (lli->lli_layout_gen != cio->cui_layout_gen) {
+               if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
                        io->ci_need_restart = 1;
                        /* this will return application a short read/write */
                        io->ci_continue = 0;
@@ -215,7 +210,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
         vvp_io_fini(env, ios);
 }
 
-enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
+static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
 {
         /*
          * we only want to hold PW locks if the mmap() can generate
@@ -238,7 +233,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
         unsigned long           addr;
         unsigned long           seg;
         ssize_t                 count;
-        int                     result;
+       int                     result = 0;
         ENTRY;
 
         LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
@@ -269,13 +264,13 @@ static int vvp_mmap_locks(const struct lu_env *env,
                         struct inode *inode = vma->vm_file->f_dentry->d_inode;
                         int flags = CEF_MUST;
 
-                        if (ll_file_nolock(vma->vm_file)) {
-                                /*
-                                 * For no lock case, a lockless lock will be
-                                 * generated.
-                                 */
-                                flags = CEF_NEVER;
-                        }
+                       if (ll_file_nolock(vma->vm_file)) {
+                               /*
+                                * For no lock case is not allowed for mmap
+                                */
+                               result = -EINVAL;
+                               break;
+                       }
 
                         /*
                          * XXX: Required lock mode can be weakened: CIT_WRITE
@@ -296,18 +291,20 @@ static int vvp_mmap_locks(const struct lu_env *env,
                                descr->cld_mode, descr->cld_start,
                                descr->cld_end);
 
-                        if (result < 0)
-                                RETURN(result);
+                       if (result < 0)
+                               break;
 
-                        if (vma->vm_end - addr >= count)
-                                break;
+                       if (vma->vm_end - addr >= count)
+                               break;
 
-                        count -= vma->vm_end - addr;
-                        addr = vma->vm_end;
-                }
-                up_read(&mm->mmap_sem);
-        }
-        RETURN(0);
+                       count -= vma->vm_end - addr;
+                       addr = vma->vm_end;
+               }
+               up_read(&mm->mmap_sem);
+               if (result < 0)
+                       break;
+       }
+       RETURN(result);
 }
 
 static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
@@ -333,20 +330,14 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
 static int vvp_io_read_lock(const struct lu_env *env,
                             const struct cl_io_slice *ios)
 {
-        struct cl_io         *io  = ios->cis_io;
-        struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
-        int result;
+       struct cl_io            *io  = ios->cis_io;
+       struct cl_io_rw_common  *rd = &io->u.ci_rd.rd;
+       int result;
 
-        ENTRY;
-        /* XXX: Layer violation, we shouldn't see lsm at llite level. */
-       if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
-                result = vvp_io_rw_lock(env, io, CLM_READ,
-                                        io->u.ci_rd.rd.crw_pos,
-                                        io->u.ci_rd.rd.crw_pos +
-                                        io->u.ci_rd.rd.crw_count - 1);
-        else
-                result = 0;
-        RETURN(result);
+       ENTRY;
+       result = vvp_io_rw_lock(env, io, CLM_READ, rd->crw_pos,
+                               rd->crw_pos + rd->crw_count - 1);
+       RETURN(result);
 }
 
 static int vvp_io_fault_lock(const struct lu_env *env,
@@ -418,7 +409,6 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
 static int vvp_do_vmtruncate(struct inode *inode, size_t size)
 {
        int     result;
-       loff_t oldsize;
 
        /*
         * Only ll_inode_size_lock is taken at this level.
@@ -429,10 +419,9 @@ static int vvp_do_vmtruncate(struct inode *inode, size_t size)
                ll_inode_size_unlock(inode);
                return result;
        }
-       oldsize = inode->i_size;
        i_size_write(inode, size);
 
-       truncate_pagecache(inode, oldsize, size);
+       ll_truncate_pagecache(inode, size);
        ll_inode_size_unlock(inode);
        return result;
 }
@@ -507,20 +496,6 @@ static void vvp_io_setattr_fini(const struct lu_env *env,
        vvp_io_fini(env, ios);
 }
 
-static ssize_t lustre_generic_file_read(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_aio_read(vio->cui_iocb, vio->cui_iov,
-                                     vio->cui_nrsegs, *ppos);
-}
-
-static ssize_t lustre_generic_file_write(struct file *file,
-                                        struct ccc_io *vio, loff_t *ppos)
-{
-        return generic_file_aio_write(vio->cui_iocb, vio->cui_iov,
-                                      vio->cui_nrsegs, *ppos);
-}
-
 static int vvp_io_read_start(const struct lu_env *env,
                              const struct cl_io_slice *ios)
 {
@@ -570,8 +545,11 @@ static int vvp_io_read_start(const struct lu_env *env,
         file_accessed(file);
         switch (vio->cui_io_subtype) {
         case IO_NORMAL:
-                 result = lustre_generic_file_read(file, cio, &pos);
-                 break;
+               LASSERT(cio->cui_iocb->ki_pos == pos);
+               result = generic_file_aio_read(cio->cui_iocb,
+                                              cio->cui_iov, cio->cui_nrsegs,
+                                              cio->cui_iocb->ki_pos);
+               break;
         case IO_SPLICE:
                 result = generic_file_splice_read(file, &pos,
                                 vio->u.splice.cui_pipe, cnt,
@@ -622,14 +600,18 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
        if (plist->pl_nr == 0)
                RETURN(0);
 
-       if (from != 0) {
+       if (from > 0 || to != PAGE_SIZE) {
                page = cl_page_list_first(plist);
-               cl_page_clip(env, page, from,
-                            plist->pl_nr == 1 ? to : PAGE_SIZE);
-       }
-       if (to != PAGE_SIZE && plist->pl_nr > 1) {
-               page = cl_page_list_last(plist);
-               cl_page_clip(env, page, 0, to);
+               if (plist->pl_nr == 1) {
+                       cl_page_clip(env, page, from, to);
+               } else {
+                       if (from > 0)
+                               cl_page_clip(env, page, from, PAGE_SIZE);
+                       if (to != PAGE_SIZE) {
+                               page = cl_page_list_last(plist);
+                               cl_page_clip(env, page, 0, to);
+                       }
+               }
        }
 
        cl_2queue_init(queue);
@@ -652,7 +634,7 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 
                        cl_page_clip(env, page, 0, PAGE_SIZE);
 
-                       SetPageUptodate(cl_page_vmpage(env, page));
+                       SetPageUptodate(cl_page_vmpage(page));
                        cl_page_disown(env, io, page);
 
                        /* held in ll_cl_init() */
@@ -667,39 +649,40 @@ static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
 static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
                                struct cl_page *page)
 {
-       const struct cl_page_slice *slice;
        struct ccc_page *cp;
-       struct page *vmpage;
-
-       slice = cl_page_at(page, &vvp_device_type);
-       cp = cl2ccc_page(slice);
-       vmpage = cp->cpg_page;
+       struct page *vmpage = page->cp_vmpage;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
 
        SetPageUptodate(vmpage);
        set_page_dirty(vmpage);
-       vvp_write_pending(cl2ccc(slice->cpl_obj), cp);
+
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
 
        cl_page_disown(env, io, page);
 
        /* held in ll_cl_init() */
-       lu_ref_del(&page->cp_reference, "cl_io", io);
+       lu_ref_del(&page->cp_reference, "cl_io", cl_io_top(io));
        cl_page_put(env, page);
 }
 
 /* make sure the page list is contiguous */
-static bool page_list_sanity_check(struct cl_page_list *plist)
+static bool page_list_sanity_check(struct cl_object *obj,
+                                  struct cl_page_list *plist)
 {
        struct cl_page *page;
        pgoff_t index = CL_PAGE_EOF;
 
        cl_page_list_for_each(page, plist) {
+               struct ccc_page *cp = cl_object_page_slice(obj, page);
+
                if (index == CL_PAGE_EOF) {
-                       index = page->cp_index;
+                       index = ccc_index(cp);
                        continue;
                }
 
                ++index;
-               if (index == page->cp_index)
+               if (index == ccc_index(cp))
                        continue;
 
                return false;
@@ -726,7 +709,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
                npages, cio->u.write.cui_from, cio->u.write.cui_to);
 
-       LASSERT(page_list_sanity_check(queue));
+       LASSERT(page_list_sanity_check(obj, queue));
 
        /* submit IO with async write */
        rc = cl_io_commit_async(env, io, queue,
@@ -751,7 +734,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
                /* the first page must have been written. */
                cio->u.write.cui_from = 0;
        }
-       LASSERT(page_list_sanity_check(queue));
+       LASSERT(page_list_sanity_check(obj, queue));
        LASSERT(ergo(rc == 0, queue->pl_nr == 0));
 
        /* out of quota, try sync write */
@@ -766,7 +749,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
        }
 
        /* update inode size */
-       ll_merge_lvb(env, inode);
+       ll_merge_attr(env, inode);
 
        /* Now the pages in queue were failed to commit, discard them
         * unless they were dirtied before. */
@@ -774,7 +757,7 @@ int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
                page = cl_page_list_first(queue);
                cl_page_list_del(env, queue, page);
 
-               if (!PageDirty(cl_page_vmpage(env, page)))
+               if (!PageDirty(cl_page_vmpage(page)))
                        cl_page_discard(env, io, page);
 
                cl_page_disown(env, io, page);
@@ -795,7 +778,6 @@ static int vvp_io_write_start(const struct lu_env *env,
         struct cl_io       *io    = ios->cis_io;
         struct cl_object   *obj   = io->ci_obj;
         struct inode       *inode = ccc_object_inode(obj);
-        struct file        *file  = cio->cui_fd->fd_file;
         ssize_t result = 0;
         loff_t pos = io->u.ci_wr.wr.crw_pos;
         size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -803,24 +785,47 @@ static int vvp_io_write_start(const struct lu_env *env,
         ENTRY;
 
        if (!can_populate_pages(env, io, inode))
-               return 0;
+               RETURN(0);
 
         if (cl_io_is_append(io)) {
                 /*
                  * PARALLEL IO This has to be changed for parallel IO doing
                  * out-of-order writes.
                  */
+               ll_merge_attr(env, inode);
                 pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
                 cio->cui_iocb->ki_pos = pos;
-        }
+        } else {
+               LASSERT(cio->cui_iocb->ki_pos == pos);
+       }
 
-        CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
+       CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
 
-        if (cio->cui_iov == NULL) /* from a temp io in ll_cl_init(). */
-                result = 0;
-        else
-                result = lustre_generic_file_write(file, cio, &pos);
+       if (cio->cui_iov == NULL) {
+               /* from a temp io in ll_cl_init(). */
+               result = 0;
+       } else {
+               /*
+                * When using the locked AIO function (generic_file_aio_write())
+                * testing has shown the inode mutex to be a limiting factor
+                * with multi-threaded single shared file performance. To get
+                * around this, we now use the lockless version. To maintain
+                * consistency, proper locking to protect against writes,
+                * trucates, etc. is handled in the higher layers of lustre.
+                */
+               result = __generic_file_aio_write(cio->cui_iocb,
+                                                 cio->cui_iov, cio->cui_nrsegs,
+                                                 &cio->cui_iocb->ki_pos);
+               if (result > 0 || result == -EIOCBQUEUED) {
+                       ssize_t err;
+
+                       err = generic_write_sync(cio->cui_iocb->ki_filp,
+                                                pos, result);
+                       if (err < 0 && result > 0)
+                               result = err;
+               }
 
+       }
        if (result > 0) {
                result = vvp_io_write_commit(env, io);
                if (cio->u.write.cui_written > 0) {
@@ -853,13 +858,14 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
         struct vm_fault *vmf = cfio->fault.ft_vmf;
 
         cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
+       cfio->fault.ft_flags_valid = 1;
 
         if (vmf->page) {
                 LL_CDEBUG_PAGE(D_PAGE, vmf->page, "got addr %p type NOPAGE\n",
                                vmf->virtual_address);
                 if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
                         lock_page(vmf->page);
-                        cfio->fault.ft_flags &= VM_FAULT_LOCKED;
+                       cfio->fault.ft_flags |= VM_FAULT_LOCKED;
                 }
 
                 cfio->ft_vmpage = vmf->page;
@@ -886,16 +892,13 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
 static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
                                    struct cl_page *page)
 {
-       const struct cl_page_slice *slice;
        struct ccc_page *cp;
-       struct page *vmpage;
+       struct cl_object *clob = cl_io_top(io)->ci_obj;
 
-       slice = cl_page_at(page, &vvp_device_type);
-       cp = cl2ccc_page(slice);
-       vmpage = cp->cpg_page;
+       set_page_dirty(page->cp_vmpage);
 
-       set_page_dirty(vmpage);
-       vvp_write_pending(cl2ccc(slice->cpl_obj), cp);
+       cp = cl2ccc_page(cl_object_page_slice(clob, page));
+       vvp_write_pending(cl2ccc(clob), cp);
 }
 
 static int vvp_io_fault_start(const struct lu_env *env,
@@ -995,6 +998,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                wait_on_page_writeback(vmpage);
                if (!PageDirty(vmpage)) {
                        struct cl_page_list *plist = &io->ci_queue.c2_qin;
+                       struct ccc_page *cp = cl_object_page_slice(obj, page);
                        int to = PAGE_SIZE;
 
                        /* vvp_page_assume() calls wait_on_page_writeback(). */
@@ -1004,7 +1008,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
                        cl_page_list_add(plist, page);
 
                        /* size fixup */
-                       if (last_index == page->cp_index)
+                       if (last_index == ccc_index(cp))
                                to = size & ~CFS_PAGE_MASK;
 
                        /* Do not set Dirty bit here so that in case IO is
@@ -1071,53 +1075,38 @@ static int vvp_io_read_page(const struct lu_env *env,
                             const struct cl_io_slice *ios,
                             const struct cl_page_slice *slice)
 {
-        struct cl_io              *io     = ios->cis_io;
-        struct cl_object          *obj    = slice->cpl_obj;
-        struct ccc_page           *cp     = cl2ccc_page(slice);
-        struct cl_page            *page   = slice->cpl_page;
-        struct inode              *inode  = ccc_object_inode(obj);
-        struct ll_sb_info         *sbi    = ll_i2sbi(inode);
-        struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
-        struct ll_readahead_state *ras    = &fd->fd_ras;
-       struct page                *vmpage = cp->cpg_page;
-        struct cl_2queue          *queue  = &io->ci_queue;
-        int rc;
+       struct cl_io              *io     = ios->cis_io;
+       struct ccc_page           *cp     = cl2ccc_page(slice);
+       struct cl_page            *page   = slice->cpl_page;
+       struct inode              *inode  = ccc_object_inode(slice->cpl_obj);
+       struct ll_sb_info         *sbi    = ll_i2sbi(inode);
+       struct ll_file_data       *fd     = cl2ccc_io(env, ios)->cui_fd;
+       struct ll_readahead_state *ras    = &fd->fd_ras;
+       struct cl_2queue          *queue  = &io->ci_queue;
 
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-        LASSERT(slice->cpl_obj == obj);
+       ENTRY;
 
-        ENTRY;
-
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ras_update(sbi, inode, ras, page->cp_index,
-                           cp->cpg_defer_uptodate);
-
-        /* Sanity check whether the page is protected by a lock. */
-        rc = cl_page_is_under_lock(env, io, page);
-        if (rc != -EBUSY) {
-                CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
-                               rc == -ENODATA ? "without a lock" :
-                               "match failed", rc);
-                if (rc != -ENODATA)
-                        RETURN(rc);
-        }
+       if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+           sbi->ll_ra_info.ra_max_pages > 0)
+               ras_update(sbi, inode, ras, ccc_index(cp),
+                          cp->cpg_defer_uptodate);
 
         if (cp->cpg_defer_uptodate) {
                 cp->cpg_ra_used = 1;
                 cl_page_export(env, page, 1);
         }
+
         /*
          * Add page into the queue even when it is marked uptodate above.
          * this will unlock it automatically as part of cl_page_list_disown().
          */
         cl_2queue_add(queue, page);
-        if (sbi->ll_ra_info.ra_max_pages_per_file &&
-            sbi->ll_ra_info.ra_max_pages)
-                ll_readahead(env, io, ras,
-                             vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+       if (sbi->ll_ra_info.ra_max_pages_per_file > 0 &&
+           sbi->ll_ra_info.ra_max_pages > 0)
+               ll_readahead(env, io, &queue->c2_qin, ras,
+                            cp->cpg_defer_uptodate);
 
-        RETURN(0);
+       RETURN(0);
 }
 
 static const struct cl_io_operations vvp_io_ops = {