Whamcloud - gitweb
LU-1756 kernel: cleanup lustre_compat25.h
[fs/lustre-release.git] / lustre / llite / vvp_page.c
index d199ad6..c07cb29 100644 (file)
@@ -1,6 +1,4 @@
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
  * GPL HEADER START
  *
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  * GPL HEADER END
  */
 /*
- * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, Whamcloud, Inc.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
@@ -36,6 +36,7 @@
  * Implementation of cl_page for VVP layer.
  *
  *   Author: Nikita Danilov <nikita.danilov@sun.com>
+ *   Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
  */
 
 #define DEBUG_SUBSYSTEM S_LLITE
@@ -78,19 +79,34 @@ static void vvp_page_fini(const struct lu_env *env,
         vvp_page_fini_common(cp);
 }
 
-static void vvp_page_own(const struct lu_env *env,
-                         const struct cl_page_slice *slice, struct cl_io *_)
+static int vvp_page_own(const struct lu_env *env,
+                        const struct cl_page_slice *slice, struct cl_io *io,
+                        int nonblock)
 {
         struct ccc_page *vpg    = cl2ccc_page(slice);
         cfs_page_t      *vmpage = vpg->cpg_page;
 
-        LASSERT(vmpage != NULL);
+       LASSERT(vmpage != NULL);
+       if (nonblock) {
+               if (!trylock_page(vmpage))
+                       return -EAGAIN;
+
+                if (unlikely(PageWriteback(vmpage))) {
+                        unlock_page(vmpage);
+                        return -EAGAIN;
+                }
+
+                return 0;
+        }
+
         lock_page(vmpage);
         wait_on_page_writeback(vmpage);
+        return 0;
 }
 
 static void vvp_page_assume(const struct lu_env *env,
-                            const struct cl_page_slice *slice, struct cl_io *_)
+                            const struct cl_page_slice *slice,
+                            struct cl_io *unused)
 {
         cfs_page_t *vmpage = cl2vm_page(slice);
 
@@ -101,7 +117,7 @@ static void vvp_page_assume(const struct lu_env *env,
 
 static void vvp_page_unassume(const struct lu_env *env,
                               const struct cl_page_slice *slice,
-                              struct cl_io *_)
+                              struct cl_io *unused)
 {
         cfs_page_t *vmpage = cl2vm_page(slice);
 
@@ -121,7 +137,8 @@ static void vvp_page_disown(const struct lu_env *env,
 }
 
 static void vvp_page_discard(const struct lu_env *env,
-                             const struct cl_page_slice *slice, struct cl_io *_)
+                             const struct cl_page_slice *slice,
+                             struct cl_io *unused)
 {
         cfs_page_t           *vmpage  = cl2vm_page(slice);
         struct address_space *mapping = vmpage->mapping;
@@ -141,7 +158,8 @@ static void vvp_page_discard(const struct lu_env *env,
 }
 
 static int vvp_page_unmap(const struct lu_env *env,
-                          const struct cl_page_slice *slice, struct cl_io *_)
+                          const struct cl_page_slice *slice,
+                          struct cl_io *unused)
 {
         cfs_page_t *vmpage = cl2vm_page(slice);
         __u64       offset = vmpage->index << CFS_PAGE_SHIFT;
@@ -176,13 +194,17 @@ static void vvp_page_delete(const struct lu_env *env,
 }
 
 static void vvp_page_export(const struct lu_env *env,
-                            const struct cl_page_slice *slice)
+                            const struct cl_page_slice *slice,
+                            int uptodate)
 {
         cfs_page_t *vmpage = cl2vm_page(slice);
 
         LASSERT(vmpage != NULL);
         LASSERT(PageLocked(vmpage));
-        SetPageUptodate(vmpage);
+        if (uptodate)
+                SetPageUptodate(vmpage);
+        else
+                ClearPageUptodate(vmpage);
 }
 
 static int vvp_page_is_vmlocked(const struct lu_env *env,
@@ -193,7 +215,7 @@ static int vvp_page_is_vmlocked(const struct lu_env *env,
 
 static int vvp_page_prep_read(const struct lu_env *env,
                               const struct cl_page_slice *slice,
-                              struct cl_io *_)
+                              struct cl_io *unused)
 {
         ENTRY;
         /* Skip the page already marked as PG_uptodate. */
@@ -202,17 +224,17 @@ static int vvp_page_prep_read(const struct lu_env *env,
 
 static int vvp_page_prep_write(const struct lu_env *env,
                                const struct cl_page_slice *slice,
-                               struct cl_io *_)
+                               struct cl_io *unused)
 {
-        cfs_page_t *vmpage = cl2vm_page(slice);
-        int result;
+       cfs_page_t *vmpage = cl2vm_page(slice);
 
-        if (clear_page_dirty_for_io(vmpage)) {
-                set_page_writeback(vmpage);
-                result = 0;
-        } else
-                result = -EALREADY;
-        return result;
+       LASSERT(PageLocked(vmpage));
+       LASSERT(!PageDirty(vmpage));
+
+       set_page_writeback(vmpage);
+       vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+
+       return 0;
 }
 
 /**
@@ -234,66 +256,32 @@ static void vvp_vmpage_error(struct inode *inode, cfs_page_t *vmpage, int ioret)
         }
 }
 
-static void vvp_page_completion_common(const struct lu_env *env,
-                                       struct ccc_page *cp, int ioret)
-{
-        struct cl_page    *clp    = cp->cpg_cl.cpl_page;
-        cfs_page_t        *vmpage = cp->cpg_page;
-        struct inode      *inode  = ccc_object_inode(clp->cp_obj);
-        struct cl_sync_io *anchor = cp->cpg_sync_io;
-
-        LINVRNT(cl_page_is_vmlocked(env, clp));
-        KLASSERT(!PageWriteback(vmpage));
-
-        vvp_vmpage_error(inode, vmpage, ioret);
-
-        if (anchor != NULL) {
-                cp->cpg_sync_io  = NULL;
-                cl_sync_io_note(anchor, ioret);
-        } else if (clp->cp_type == CPT_CACHEABLE)
-                unlock_page(vmpage);
-}
-
 static void vvp_page_completion_read(const struct lu_env *env,
                                      const struct cl_page_slice *slice,
                                      int ioret)
 {
-        struct ccc_page *cp    = cl2ccc_page(slice);
-        struct cl_page  *page  = cl_page_top(slice->cpl_page);
-        struct inode    *inode = ccc_object_inode(page->cp_obj);
+        struct ccc_page *cp     = cl2ccc_page(slice);
+        cfs_page_t      *vmpage = cp->cpg_page;
+        struct cl_page  *page   = cl_page_top(slice->cpl_page);
+        struct inode    *inode  = ccc_object_inode(page->cp_obj);
         ENTRY;
 
+        LASSERT(PageLocked(vmpage));
         CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
 
         if (cp->cpg_defer_uptodate)
                 ll_ra_count_put(ll_i2sbi(inode), 1);
 
         if (ioret == 0)  {
-                /* XXX: do we need this for transient pages? */
                 if (!cp->cpg_defer_uptodate)
-                        cl_page_export(env, page);
+                        cl_page_export(env, page, 1);
         } else
                 cp->cpg_defer_uptodate = 0;
-        vvp_page_completion_common(env, cp, ioret);
 
-        EXIT;
-}
-
-static void vvp_page_completion_write_common(const struct lu_env *env,
-                                             const struct cl_page_slice *slice,
-                                             int ioret)
-{
-        struct ccc_page *cp = cl2ccc_page(slice);
+        if (page->cp_sync_io == NULL)
+                unlock_page(vmpage);
 
-        if (ioret == 0) {
-                cp->cpg_write_queued = 0;
-                /*
-                 * Only ioret == 0, write succeed, then this page could be
-                 * deleted from the pending_writing count.
-                 */
-                vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
-        }
-        vvp_page_completion_common(env, cp, ioret);
+        EXIT;
 }
 
 static void vvp_page_completion_write(const struct lu_env *env,
@@ -303,18 +291,34 @@ static void vvp_page_completion_write(const struct lu_env *env,
         struct ccc_page *cp     = cl2ccc_page(slice);
         struct cl_page  *pg     = slice->cpl_page;
         cfs_page_t      *vmpage = cp->cpg_page;
-
         ENTRY;
 
-        LINVRNT(cl_page_is_vmlocked(env, pg));
+        LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
         LASSERT(PageWriteback(vmpage));
 
         CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
 
-        end_page_writeback(vmpage);
-        LASSERT(!PageWriteback(vmpage));
+        /*
+         * TODO: Actually it makes sense to add the page into oap pending
+         * list again and so that we don't need to take the page out from
+         * SoM write pending list, if we just meet a recoverable error,
+         * -ENOMEM, etc.
+         * To implement this, we just need to return a non zero value in
+         * ->cpo_completion method. The underlying transfer should be notified
+         * and then re-add the page into pending transfer queue.  -jay
+         */
+
+        cp->cpg_write_queued = 0;
+        vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+
+        /*
+         * Only mark the page error only when it's an async write because
+         * applications won't wait for IO to finish.
+         */
+        if (pg->cp_sync_io == NULL)
+                vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
 
-        vvp_page_completion_write_common(env, slice, ioret);
+        end_page_writeback(vmpage);
         EXIT;
 }
 
@@ -333,35 +337,32 @@ static void vvp_page_completion_write(const struct lu_env *env,
  * truncated. Skip it.
  */
 static int vvp_page_make_ready(const struct lu_env *env,
-                               const struct cl_page_slice *slice)
+                              const struct cl_page_slice *slice)
 {
-        cfs_page_t *vmpage = cl2vm_page(slice);
-        struct cl_page *pg = slice->cpl_page;
-        int result;
-
-        result = -EAGAIN;
-        /* we're trying to write, but the page is locked.. come back later */
-        if (!TestSetPageLocked(vmpage)) {
-                if (pg->cp_state == CPS_CACHED) {
-                        /*
-                         * We can cancel IO if page wasn't dirty after all.
-                         */
-                        clear_page_dirty_for_io(vmpage);
-                        /*
-                         * This actually clears the dirty bit in the radix
-                         * tree.
-                         */
-                        set_page_writeback(vmpage);
-
-                        CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
-                        result = 0;
-                } else
-                        /*
-                         * Page was concurrently truncated.
-                         */
-                        LASSERT(pg->cp_state == CPS_FREEING);
-        }
-        RETURN(result);
+       cfs_page_t *vmpage = cl2vm_page(slice);
+       struct cl_page *pg = slice->cpl_page;
+       int result = 0;
+
+       lock_page(vmpage);
+       if (clear_page_dirty_for_io(vmpage)) {
+               LASSERT(pg->cp_state == CPS_CACHED);
+               /* This actually clears the dirty bit in the radix
+                * tree. */
+               set_page_writeback(vmpage);
+               vvp_write_pending(cl2ccc(slice->cpl_obj),
+                               cl2ccc_page(slice));
+               CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
+       } else if (pg->cp_state == CPS_PAGEOUT) {
+               /* is it possible for osc_flush_async_page() to already
+                * make it ready? */
+               result = -EALREADY;
+       } else {
+               CL_PAGE_DEBUG(D_ERROR, env, pg, "Unexpecting page state %d.\n",
+                             pg->cp_state);
+               LBUG();
+       }
+       unlock_page(vmpage);
+       RETURN(result);
 }
 
 static int vvp_page_print(const struct lu_env *env,
@@ -416,43 +417,44 @@ static const struct cl_page_operations vvp_page_ops = {
 
 static void vvp_transient_page_verify(const struct cl_page *page)
 {
-        struct inode *inode = ccc_object_inode(page->cp_obj);
+       struct inode *inode = ccc_object_inode(page->cp_obj);
 
-        LASSERT(!TRYLOCK_INODE_MUTEX(inode));
-        /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
+       LASSERT(!mutex_trylock(&inode->i_mutex));
+       /* LASSERT_SEM_LOCKED(&inode->i_alloc_sem); */
 }
 
-static void vvp_transient_page_own(const struct lu_env *env,
-                                   const struct cl_page_slice *slice,
-                                   struct cl_io *_)
+static int vvp_transient_page_own(const struct lu_env *env,
+                                  const struct cl_page_slice *slice,
+                                  struct cl_io *unused, int nonblock)
 {
         vvp_transient_page_verify(slice->cpl_page);
+        return 0;
 }
 
 static void vvp_transient_page_assume(const struct lu_env *env,
                                       const struct cl_page_slice *slice,
-                                      struct cl_io *_)
+                                      struct cl_io *unused)
 {
         vvp_transient_page_verify(slice->cpl_page);
 }
 
 static void vvp_transient_page_unassume(const struct lu_env *env,
                                         const struct cl_page_slice *slice,
-                                        struct cl_io *_)
+                                        struct cl_io *unused)
 {
         vvp_transient_page_verify(slice->cpl_page);
 }
 
 static void vvp_transient_page_disown(const struct lu_env *env,
                                       const struct cl_page_slice *slice,
-                                      struct cl_io *_)
+                                      struct cl_io *unused)
 {
         vvp_transient_page_verify(slice->cpl_page);
 }
 
 static void vvp_transient_page_discard(const struct lu_env *env,
                                        const struct cl_page_slice *slice,
-                                       struct cl_io *_)
+                                       struct cl_io *unused)
 {
         struct cl_page *page = slice->cpl_page;
 
@@ -465,37 +467,35 @@ static void vvp_transient_page_discard(const struct lu_env *env,
 }
 
 static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
-                                          const struct cl_page_slice *slice)
+                                         const struct cl_page_slice *slice)
 {
-        struct inode    *inode = ccc_object_inode(slice->cpl_obj);
-        int              locked;
+       struct inode    *inode = ccc_object_inode(slice->cpl_obj);
+       int     locked;
 
-        locked = !TRYLOCK_INODE_MUTEX(inode);
-        if (!locked)
-                UNLOCK_INODE_MUTEX(inode);
-        return locked ? -EBUSY : -ENODATA;
+       locked = !mutex_trylock(&inode->i_mutex);
+       if (!locked)
+               mutex_unlock(&inode->i_mutex);
+       return locked ? -EBUSY : -ENODATA;
 }
 
 static void
-vvp_transient_page_completion_write(const struct lu_env *env,
-                                    const struct cl_page_slice *slice,
-                                    int ioret)
+vvp_transient_page_completion(const struct lu_env *env,
+                              const struct cl_page_slice *slice,
+                              int ioret)
 {
         vvp_transient_page_verify(slice->cpl_page);
-        vvp_page_completion_write_common(env, slice, ioret);
 }
 
-
 static void vvp_transient_page_fini(const struct lu_env *env,
-                                    struct cl_page_slice *slice)
+                                   struct cl_page_slice *slice)
 {
-        struct ccc_page *cp = cl2ccc_page(slice);
-        struct cl_page *clp = slice->cpl_page;
-        struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+       struct ccc_page *cp = cl2ccc_page(slice);
+       struct cl_page *clp = slice->cpl_page;
+       struct ccc_object *clobj = cl2ccc(clp->cp_obj);
 
-        vvp_page_fini_common(cp);
-        LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
-        clobj->cob_transient_pages--;
+       vvp_page_fini_common(cp);
+       LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+       clobj->cob_transient_pages--;
 }
 
 static const struct cl_page_operations vvp_transient_page_ops = {
@@ -512,45 +512,45 @@ static const struct cl_page_operations vvp_transient_page_ops = {
         .io = {
                 [CRT_READ] = {
                         .cpo_prep        = ccc_transient_page_prep,
-                        .cpo_completion  = vvp_page_completion_read,
+                        .cpo_completion  = vvp_transient_page_completion,
                 },
                 [CRT_WRITE] = {
                         .cpo_prep        = ccc_transient_page_prep,
-                        .cpo_completion  = vvp_transient_page_completion_write,
+                        .cpo_completion  = vvp_transient_page_completion,
                 }
         }
 };
 
 struct cl_page *vvp_page_init(const struct lu_env *env, struct cl_object *obj,
-                              struct cl_page *page, cfs_page_t *vmpage)
+                             struct cl_page *page, cfs_page_t *vmpage)
 {
-        struct ccc_page *cpg;
-        int result;
-
-        CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
-        OBD_SLAB_ALLOC_PTR(cpg, vvp_page_kmem);
-        if (cpg != NULL) {
-                cpg->cpg_page = vmpage;
-                page_cache_get(vmpage);
-
-                CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
-                if (page->cp_type == CPT_CACHEABLE) {
-                        SetPagePrivate(vmpage);
-                        vmpage->private = (unsigned long)page;
-                        cl_page_slice_add(page, &cpg->cpg_cl, obj,
-                                          &vvp_page_ops);
-                } else {
-                        struct ccc_object *clobj = cl2ccc(obj);
-
-                        LASSERT(!TRYLOCK_INODE_MUTEX(clobj->cob_inode));
-                        cl_page_slice_add(page, &cpg->cpg_cl, obj,
-                                          &vvp_transient_page_ops);
-                        clobj->cob_transient_pages++;
-                }
-                result = 0;
-        } else
-                result = -ENOMEM;
-        return ERR_PTR(result);
+       struct ccc_page *cpg;
+       int result;
+
+       CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+
+       OBD_SLAB_ALLOC_PTR_GFP(cpg, vvp_page_kmem, CFS_ALLOC_IO);
+       if (cpg != NULL) {
+               cpg->cpg_page = vmpage;
+               page_cache_get(vmpage);
+
+               CFS_INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+               if (page->cp_type == CPT_CACHEABLE) {
+                       SetPagePrivate(vmpage);
+                       vmpage->private = (unsigned long)page;
+                       cl_page_slice_add(page, &cpg->cpg_cl, obj,
+                                         &vvp_page_ops);
+               } else {
+                       struct ccc_object *clobj = cl2ccc(obj);
+
+                       LASSERT(!mutex_trylock(&clobj->cob_inode->i_mutex));
+                       cl_page_slice_add(page, &cpg->cpg_cl, obj,
+                                         &vvp_transient_page_ops);
+                       clobj->cob_transient_pages++;
+               }
+               result = 0;
+       } else
+               result = -ENOMEM;
+       return ERR_PTR(result);
 }