Whamcloud - gitweb
LU-744 obdclass: revise cl_page refcount
authorGregoire Pichon <gregoire.pichon@bull.net>
Tue, 12 Feb 2013 13:52:54 +0000 (14:52 +0100)
committerOleg Drokin <oleg.drokin@intel.com>
Wed, 6 Mar 2013 06:17:20 +0000 (01:17 -0500)
By holding a refcount for radix tree, we can remove a couple of
spin lock and atomic operation for cl_page.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: Gregoire Pichon <gregoire.pichon@bull.net>
Change-Id: I4d8dc04b9b4ef066b54a90be3cfbd0c09d96c320
Reviewed-on: http://review.whamcloud.com/5429
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Bobi Jam <bobijam@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/include/cl_object.h
lustre/llite/rw26.c
lustre/obdclass/cl_page.c

index 2b99492..b75209a 100644 (file)
@@ -716,8 +716,6 @@ struct cl_page {
          * modified only internally within cl_page.c. Protected by a VM lock.
          */
         const enum cl_page_state cp_state;
-       /** Protect to get and put page, see cl_page_put and cl_vmpage_page */
-       cfs_spinlock_t           cp_lock;
         /**
          * Linkage of pages within some group. Protected by
          * cl_page::cp_mutex. */
@@ -1088,6 +1086,16 @@ do {                                                                          \
         }                                                                     \
 } while (0)
 
+static inline int __page_in_use(const struct cl_page *page, int refc)
+{
+       if (page->cp_type == CPT_CACHEABLE)
+               ++refc;
+       LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+       return (cfs_atomic_read(&page->cp_ref) > refc);
+}
+#define cl_page_in_use(pg)       __page_in_use(pg, 1)
+#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+
 /** @} cl_page */
 
 /** \addtogroup cl_lock cl_lock
index bf5ca8a..29129cf 100644 (file)
@@ -178,7 +178,7 @@ static int ll_releasepage(struct page *vmpage, RELEASEPAGE_ARG_TYPE gfp_mask)
         page = cl_vmpage_page(vmpage, obj);
         result = page == NULL;
         if (page != NULL) {
-                if (cfs_atomic_read(&page->cp_ref) == 1) {
+               if (!cl_page_in_use(page)) {
                         result = 1;
                         cl_page_delete(env, page);
                 }
index 7818746..213e2bc 100644 (file)
@@ -97,8 +97,9 @@ static struct lu_kmem_descr cl_page_caches[] = {
 #endif /* !INVARIANT_CHECK */
 
 /**
- * Internal version of cl_page_top, it should be called with page referenced,
- * or cp_lock held.
+ * Internal version of cl_page_top, it should be called if the page is
+ * known to be not freed, says with page referenced, or radix tree lock held,
+ * or page owned.
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
@@ -120,11 +121,8 @@ static struct cl_page *cl_page_top_trusted(struct cl_page *page)
  */
 static void cl_page_get_trust(struct cl_page *page)
 {
-        /*
-         * Checkless version for trusted users.
-         */
-        if (cfs_atomic_inc_return(&page->cp_ref) == 1)
-                cfs_atomic_inc(&cl_object_site(page->cp_obj)->cs_pages.cs_busy);
+       LASSERT(cfs_atomic_read(&page->cp_ref) > 0);
+       cfs_atomic_inc(&page->cp_ref);
 }
 
 /**
@@ -138,11 +136,6 @@ cl_page_at_trusted(const struct cl_page *page,
                    const struct lu_device_type *dtype)
 {
         const struct cl_page_slice *slice;
-
-#ifdef INVARIANT_CHECK
-        if (!cfs_atomic_read(&page->cp_ref))
-               LASSERT_SPIN_LOCKED(&page->cp_lock);
-#endif
         ENTRY;
 
         page = cl_page_top_trusted((struct cl_page *)page);
@@ -169,9 +162,8 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
         LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
 
         page = radix_tree_lookup(&hdr->coh_tree, index);
-        if (page != NULL) {
+       if (page != NULL)
                 cl_page_get_trust(page);
-        }
         return page;
 }
 EXPORT_SYMBOL(cl_page_lookup);
@@ -342,13 +334,14 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
         OBD_SLAB_ALLOC_PTR_GFP(page, cl_page_kmem, CFS_ALLOC_IO);
         if (page != NULL) {
                 cfs_atomic_set(&page->cp_ref, 1);
+               if (type == CPT_CACHEABLE) /* for radix tree */
+                       cfs_atomic_inc(&page->cp_ref);
                 page->cp_obj = o;
                 cl_object_get(o);
                 page->cp_obj_ref = lu_object_ref_add(&o->co_lu,
                                                      "cl_page", page);
                 page->cp_index = ind;
                 cl_page_state_set_trust(page, CPS_CACHED);
-               cfs_spin_lock_init(&page->cp_lock);
                 page->cp_type = type;
                 CFS_INIT_LIST_HEAD(&page->cp_layers);
                 CFS_INIT_LIST_HEAD(&page->cp_batch);
@@ -370,7 +363,6 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                         }
                 }
                 if (err == NULL) {
-                        cfs_atomic_inc(&site->cs_pages.cs_busy);
                         cfs_atomic_inc(&site->cs_pages.cs_total);
 
 #ifdef LUSTRE_PAGESTATE_TRACKING
@@ -420,10 +412,8 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
-               /* cl_page::cp_lock is used to protect the page state and
-                * refcount, but need an external lock to protect the
-                * child/parent relationship, so vmpage lock must be held for
-                * this purpose. */
+               /* vmpage lock is used to protect the child/parent
+                * relationship */
                KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
@@ -496,7 +486,6 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
         cfs_spin_unlock(&hdr->coh_page_guard);
 
         if (unlikely(ghost != NULL)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
                 cl_page_delete0(env, ghost, 0);
                 cl_page_free(env, ghost);
         }
@@ -537,7 +526,7 @@ static inline int cl_page_invariant(const struct cl_page *pg)
         child  = pg->cp_child;
         owner  = pg->cp_owner;
 
-        return cfs_atomic_read(&pg->cp_ref) > 0 &&
+       return cl_page_in_use(pg) &&
                 ergo(parent != NULL, parent->cp_child == pg) &&
                 ergo(child != NULL, child->cp_parent == pg) &&
                 ergo(child != NULL, pg->cp_obj != child->cp_obj) &&
@@ -659,40 +648,23 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_site *site = cl_object_site(page->cp_obj);
-
         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
 
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
 
-       if (cfs_atomic_dec_and_lock(&page->cp_ref, &page->cp_lock)) {
-                cfs_atomic_dec(&site->cs_pages.cs_busy);
-                /* We're going to access the page w/o a reference, but it's
-                * ok because we have grabbed the lock cp_lock, which
-                 * means nobody is able to free this page behind us.
-                 */
-                if (page->cp_state == CPS_FREEING) {
-                        /* We drop the page reference and check the page state
-                         * inside the cp_lock. So that if it gets here,
-                         * it is the REALLY last reference to this page.
-                         */
-                       cfs_spin_unlock(&page->cp_lock);
-
-                        LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
-                        PASSERT(env, page, page->cp_owner == NULL);
-                        PASSERT(env, page, cfs_list_empty(&page->cp_batch));
-                        /*
-                         * Page is no longer reachable by other threads. Tear
-                         * it down.
-                         */
-                        cl_page_free(env, page);
-
-                        EXIT;
-                        return;
-                }
-                cfs_spin_unlock(&page->cp_lock);
+       if (cfs_atomic_dec_and_test(&page->cp_ref)) {
+               LASSERT(page->cp_state == CPS_FREEING);
+
+               LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
+               PASSERT(env, page, page->cp_owner == NULL);
+               PASSERT(env, page, cfs_list_empty(&page->cp_batch));
+               /*
+                * Page is no longer reachable by other threads. Tear
+                * it down.
+                */
+               cl_page_free(env, page);
         }
 
         EXIT;
@@ -747,14 +719,12 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
        if (top == NULL)
                RETURN(NULL);
 
-       cfs_spin_lock(&top->cp_lock);
        for (page = top; page != NULL; page = page->cp_child) {
                 if (cl_object_same(page->cp_obj, obj)) {
                         cl_page_get_trust(page);
                         break;
                 }
         }
-       cfs_spin_unlock(&top->cp_lock);
         LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }
@@ -1156,6 +1126,9 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
         cl_page_export(env, pg, 0);
         cl_page_state_set0(env, pg, CPS_FREEING);
 
+       CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
+                      (const struct lu_env *, const struct cl_page_slice *));
+
         if (tmp->cp_type == CPT_CACHEABLE) {
                 if (!radix)
                         /* !radix means that @pg is not yet in the radix tree,
@@ -1174,11 +1147,10 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
                         PASSERT(env, tmp, hdr->coh_pages > 0);
                         hdr->coh_pages--;
                         cfs_spin_unlock(&hdr->coh_page_guard);
+                       cl_page_put(env, tmp);
                 }
         }
 
-        CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
-                       (const struct lu_env *, const struct cl_page_slice *));
         EXIT;
 }