Whamcloud - gitweb
LU-185 LBUG: (cl_page.c:1362:cl_page_completion()) ...
[fs/lustre-release.git] / lustre / llite / rw26.c
index d3b26b4..25ade0e 100644 (file)
@@ -26,7 +26,7 @@
  * GPL HEADER END
  */
 /*
- * Copyright  2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  */
 /*
@@ -187,7 +187,7 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
         *max_pages = (user_addr + size + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
         *max_pages -= user_addr >> CFS_PAGE_SHIFT;
 
-        OBD_ALLOC_WAIT(*pages, *max_pages * sizeof(**pages));
+        OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
         if (*pages) {
                 down_read(&current->mm->mmap_sem);
                 result = get_user_pages(current, current->mm, user_addr,
@@ -195,7 +195,7 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
                                         NULL);
                 up_read(&current->mm->mmap_sem);
                 if (unlikely(result <= 0))
-                        OBD_FREE(*pages, *max_pages * sizeof(**pages));
+                        OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
         }
 
         return result;
@@ -215,7 +215,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
                 page_cache_release(pages[i]);
         }
 
-        OBD_FREE(pages, npages * sizeof(*pages));
+        OBD_FREE_LARGE(pages, npages * sizeof(*pages));
 }
 
 ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -232,6 +232,8 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
         int page_count      = pv->ldp_nr;
         struct page **pages = pv->ldp_pages;
         long page_size      = cl_page_size(obj);
+        bool do_io;
+        int  io_pages       = 0;
         ENTRY;
 
         queue = &io->ci_queue;
@@ -239,6 +241,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
         for (i = 0; i < page_count; i++) {
                 if (pv->ldp_offsets)
                     file_offset = pv->ldp_offsets[i];
+
                 LASSERT(!(file_offset & (page_size - 1)));
                 clp = cl_page_find(env, obj, cl_index(obj, file_offset),
                                    pv->ldp_pages[i], CPT_TRANSIENT);
@@ -247,14 +250,17 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                         break;
                 }
 
+                rc = cl_page_own(env, io, clp);
+                if (rc) {
+                        LASSERT(clp->cp_state == CPS_FREEING);
+                        cl_page_put(env, clp);
+                        break;
+                }
+
+                do_io = true;
+
                 /* check the page type: if the page is a host page, then do
                  * write directly */
-                /*
-                 * Very rare case that the host pages can be found for
-                 * directIO case, since linux kernel truncated all covered
-                 * pages before getting here. So, to make the OST happy(to
-                 * write a contiguous region), all pages are issued
-                 * here. -jay */
                 if (clp->cp_type == CPT_CACHEABLE) {
                         cfs_page_t *vmpage = cl_page_vmpage(env, clp);
                         cfs_page_t *src_page;
@@ -275,43 +281,41 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
                          * cl_io_submit()->...->vvp_page_prep_write(). */
                         if (rw == WRITE)
                                 set_page_dirty(vmpage);
+
+                        if (rw == READ) {
+                                /* do not issue the page for read, since it
+                                 * may reread a ra page which has NOT uptodate
+                                 * bit set. */
+                                cl_page_disown(env, io, clp);
+                                do_io = false;
+                        }
+                }
+
+                if (likely(do_io)) {
+                        cl_2queue_add(queue, clp);
+
                         /*
-                         * If direct-io read finds up-to-date page in the
-                         * cache, just copy it to the user space. Page will be
-                         * filtered out by vvp_page_prep_read(). This
-                         * preserves an invariant, that page is read at most
-                         * once, see cl_page_flags::CPF_READ_COMPLETED.
+                         * Set page clip to tell transfer formation engine
+                         * that page has to be sent even if it is beyond KMS.
                          */
-                }
+                        cl_page_clip(env, clp, 0, min(size, page_size));
 
-                rc = cl_page_own(env, io, clp);
-                if (rc) {
-                        LASSERT(clp->cp_state == CPS_FREEING);
-                        cl_page_put(env, clp);
-                        break;
+                        ++io_pages;
                 }
 
-                cl_2queue_add(queue, clp);
-
-                /* drop the reference count for cl_page_find, so that the page
-                 * will be freed in cl_2queue_fini. */
+                /* drop the reference count for cl_page_find */
                 cl_page_put(env, clp);
-                /*
-                 * Set page clip to tell transfer formation engine that page
-                 * has to be sent even if it is beyond KMS.
-                 */
-                cl_page_clip(env, clp, 0, min(size, page_size));
                 size -= page_size;
                 file_offset += page_size;
         }
 
-        if (rc == 0) {
+        if (rc == 0 && io_pages) {
                 rc = cl_io_submit_sync(env, io,
                                        rw == READ ? CRT_READ : CRT_WRITE,
                                        queue, CRP_NORMAL, 0);
-                if (rc == 0)
-                        rc = pv->ldp_size;
         }
+        if (rc == 0)
+                rc = pv->ldp_size;
 
         cl_2queue_discard(env, io, queue);
         cl_2queue_disown(env, io, queue);
@@ -468,6 +472,54 @@ out:
         RETURN(tot_bytes ? : result);
 }
 
+#ifdef HAVE_KERNEL_WRITE_BEGIN_END
+static int ll_write_begin(struct file *file, struct address_space *mapping,
+                         loff_t pos, unsigned len, unsigned flags,
+                         struct page **pagep, void **fsdata)
+{
+        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+        struct page *page;
+        int rc;
+        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+        ENTRY;
+
+        page = grab_cache_page_write_begin(mapping, index, flags);
+        if (!page)
+                RETURN(-ENOMEM);
+
+        *pagep = page;
+
+        rc = ll_prepare_write(file, page, from, from + len);
+        if (rc) {
+                unlock_page(page);
+                page_cache_release(page);
+        }
+        RETURN(rc);
+}
+
+static int ll_write_end(struct file *file, struct address_space *mapping,
+                        loff_t pos, unsigned len, unsigned copied,
+                        struct page *page, void *fsdata)
+{
+        unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+        int rc;
+        rc = ll_commit_write(file, page, from, from + copied);
+
+        unlock_page(page);
+        page_cache_release(page);
+        return rc?rc:copied;
+}
+#endif
+
+#ifdef CONFIG_MIGRATION
+int ll_migratepage(struct address_space *mapping,
+                   struct page *newpage, struct page *page)
+{
+        /* Always fail page migration until we have a proper implementation */
+        return -EIO;
+}
+#endif
+
 struct address_space_operations ll_aops = {
         .readpage       = ll_readpage,
 //        .readpages      = ll_readpages,
@@ -476,9 +528,17 @@ struct address_space_operations ll_aops = {
         .writepages     = generic_writepages,
         .set_page_dirty = ll_set_page_dirty,
         .sync_page      = NULL,
+#ifdef HAVE_KERNEL_WRITE_BEGIN_END
+        .write_begin    = ll_write_begin,
+        .write_end      = ll_write_end,
+#else
         .prepare_write  = ll_prepare_write,
         .commit_write   = ll_commit_write,
+#endif
         .invalidatepage = ll_invalidatepage,
         .releasepage    = (void *)ll_releasepage,
+#ifdef CONFIG_MIGRATION
+        .migratepage    = ll_migratepage,
+#endif
         .bmap           = NULL
 };