Whamcloud - gitweb
LU-11568 ldlm: Remove use of SLAB_DESTROY_BY_RCU for ldlm lock slab
[fs/lustre-release.git] / lustre / llite / vvp_page.c
index 8cc68ef..064e86e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2011, 2014, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
  *
  */
 
-static void vvp_page_fini_common(struct vvp_page *vpg)
+static void vvp_page_fini_common(struct vvp_page *vpg, struct pagevec *pvec)
 {
        struct page *vmpage = vpg->vpg_page;
 
        LASSERT(vmpage != NULL);
-       page_cache_release(vmpage);
+       if (pvec) {
+               if (!pagevec_add(pvec, vmpage))
+                       pagevec_release(pvec);
+       } else {
+               put_page(vmpage);
+       }
 }
 
 static void vvp_page_fini(const struct lu_env *env,
-                         struct cl_page_slice *slice)
+                         struct cl_page_slice *slice,
+                         struct pagevec *pvec)
 {
        struct vvp_page *vpg     = cl2vvp_page(slice);
        struct page     *vmpage  = vpg->vpg_page;
@@ -77,7 +79,7 @@ static void vvp_page_fini(const struct lu_env *env,
         * VPG_FREEING state.
         */
        LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
-       vvp_page_fini_common(vpg);
+       vvp_page_fini_common(vpg, pvec);
 }
 
 static int vvp_page_own(const struct lu_env *env,
@@ -171,7 +173,6 @@ static void vvp_page_delete(const struct lu_env *env,
        refc = atomic_dec_return(&page->cp_ref);
        LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
 
-       ClearPageUptodate(vmpage);
        ClearPagePrivate(vmpage);
        vmpage->private = 0;
        /*
@@ -214,11 +215,15 @@ static int vvp_page_prep_write(const struct lu_env *env,
                                struct cl_io *unused)
 {
        struct page *vmpage = cl2vm_page(slice);
+       struct cl_page *pg = slice->cpl_page;
 
        LASSERT(PageLocked(vmpage));
        LASSERT(!PageDirty(vmpage));
 
-       set_page_writeback(vmpage);
+       /* ll_writepage path is not a sync write, so need to set page writeback
+        * flag */
+       if (pg->cp_sync_io == NULL)
+               set_page_writeback(vmpage);
 
        return 0;
 }
@@ -243,8 +248,8 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
                else
                        set_bit(AS_EIO, &inode->i_mapping->flags);
 
-               if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
-                    obj->vob_discard_page_warned == 0) {
+               if ((ioret == -ESHUTDOWN || ioret == -EINTR ||
+                    ioret == -EIO) && obj->vob_discard_page_warned == 0) {
                        obj->vob_discard_page_warned = 1;
                        ll_dirty_page_discard_warn(vmpage, ioret);
                }
@@ -270,8 +275,14 @@ static void vvp_page_completion_read(const struct lu_env *env,
        if (ioret == 0)  {
                if (!vpg->vpg_defer_uptodate)
                        cl_page_export(env, page, 1);
-       } else {
+       } else if (vpg->vpg_defer_uptodate) {
                vpg->vpg_defer_uptodate = 0;
+               if (ioret == -EWOULDBLOCK) {
+                       /* mirror read failed, it needs to destroy the page
+                        * because subpage would be from wrong osc when trying
+                        * to read from a new mirror */
+                       ll_invalidate_page(vmpage);
+               }
        }
 
        if (page->cp_sync_io == NULL)
@@ -289,19 +300,21 @@ static void vvp_page_completion_write(const struct lu_env *env,
        struct page     *vmpage = vpg->vpg_page;
        ENTRY;
 
-       LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
-       LASSERT(PageWriteback(vmpage));
-
        CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
 
-       /*
-        * Only mark the page error only when it's an async write because
-        * applications won't wait for IO to finish.
-        */
-       if (pg->cp_sync_io == NULL)
+       if (pg->cp_sync_io != NULL) {
+               LASSERT(PageLocked(vmpage));
+               LASSERT(!PageWriteback(vmpage));
+       } else {
+               LASSERT(PageWriteback(vmpage));
+               /*
+                * Only mark the page error only when it's an async write
+                * because applications won't wait for IO to finish.
+                */
                vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
 
-       end_page_writeback(vmpage);
+               end_page_writeback(vmpage);
+       }
        EXIT;
 }
 
@@ -346,23 +359,6 @@ static int vvp_page_make_ready(const struct lu_env *env,
        RETURN(result);
 }
 
-static int vvp_page_is_under_lock(const struct lu_env *env,
-                                 const struct cl_page_slice *slice,
-                                 struct cl_io *io, pgoff_t *max_index)
-{
-       ENTRY;
-
-       if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
-           io->ci_type == CIT_FAULT) {
-               struct vvp_io *vio = vvp_env_io(env);
-
-               if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
-                       *max_index = CL_PAGE_EOF;
-       }
-       RETURN(0);
-}
-
-
 static int vvp_page_print(const struct lu_env *env,
                          const struct cl_page_slice *slice,
                          void *cookie, lu_printer_t printer)
@@ -409,7 +405,6 @@ static const struct cl_page_operations vvp_page_ops = {
        .cpo_is_vmlocked   = vvp_page_is_vmlocked,
        .cpo_fini          = vvp_page_fini,
        .cpo_print         = vvp_page_print,
-       .cpo_is_under_lock = vvp_page_is_under_lock,
        .io = {
                [CRT_READ] = {
                        .cpo_prep       = vvp_page_prep_read,
@@ -486,9 +481,9 @@ static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
        struct inode    *inode = vvp_object_inode(slice->cpl_obj);
        int     locked;
 
-       locked = !mutex_trylock(&inode->i_mutex);
+       locked = !inode_trylock(inode);
        if (!locked)
-               mutex_unlock(&inode->i_mutex);
+               inode_unlock(inode);
        return locked ? -EBUSY : -ENODATA;
 }
 
@@ -501,13 +496,14 @@ vvp_transient_page_completion(const struct lu_env *env,
 }
 
 static void vvp_transient_page_fini(const struct lu_env *env,
-                                   struct cl_page_slice *slice)
+                                   struct cl_page_slice *slice,
+                                   struct pagevec *pvec)
 {
        struct vvp_page *vpg = cl2vvp_page(slice);
        struct cl_page *clp = slice->cpl_page;
        struct vvp_object *clobj = cl2vvp(clp->cp_obj);
 
-       vvp_page_fini_common(vpg);
+       vvp_page_fini_common(vpg, pvec);
        atomic_dec(&clobj->vob_transient_pages);
 }
 
@@ -520,7 +516,6 @@ static const struct cl_page_operations vvp_transient_page_ops = {
        .cpo_fini               = vvp_transient_page_fini,
        .cpo_is_vmlocked        = vvp_transient_page_is_vmlocked,
        .cpo_print              = vvp_page_print,
-       .cpo_is_under_lock      = vvp_page_is_under_lock,
        .io = {
                [CRT_READ] = {
                        .cpo_prep       = vvp_transient_page_prep,
@@ -542,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
        CLOBINVRNT(env, obj, vvp_object_invariant(obj));
 
        vpg->vpg_page = vmpage;
-       page_cache_get(vmpage);
+       get_page(vmpage);
 
        if (page->cp_type == CPT_CACHEABLE) {
                /* in cache, decref in vvp_page_delete */