Whamcloud - gitweb
LU-1666 obdclass: reduce lock contention on coh_page_guard
authorGregoire Pichon <gregoire.pichon@bull.net>
Tue, 12 Feb 2013 13:40:49 +0000 (14:40 +0100)
committerOleg Drokin <oleg.drokin@intel.com>
Thu, 21 Feb 2013 04:24:05 +0000 (23:24 -0500)
Define a per-page spinlock to get and put a cl_page instead of
grabbing per-object lock coh_page_guard.

Signed-off-by: Jinshan Xiong <jinshan.xiong@intel.com>
Signed-off-by: Gregoire Pichon <gregoire.pichon@bull.net>
Change-Id: I3c63f0e33f3d4d44fbb5de06bcd0670e76b872bd
Reviewed-on: http://review.whamcloud.com/5428
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
lustre/include/cl_object.h
lustre/obdclass/cl_page.c

index 4f17e8a..2b99492 100644 (file)
@@ -716,6 +716,8 @@ struct cl_page {
          * modified only internally within cl_page.c. Protected by a VM lock.
          */
         const enum cl_page_state cp_state;
+       /** Protect to get and put page, see cl_page_put and cl_vmpage_page */
+       cfs_spinlock_t           cp_lock;
         /**
          * Linkage of pages within some group. Protected by
          * cl_page::cp_mutex. */
index cc3458b..7818746 100644 (file)
@@ -98,7 +98,7 @@ static struct lu_kmem_descr cl_page_caches[] = {
 
 /**
  * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
+ * or cp_lock held.
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
@@ -140,10 +140,8 @@ cl_page_at_trusted(const struct cl_page *page,
         const struct cl_page_slice *slice;
 
 #ifdef INVARIANT_CHECK
-        struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
         if (!cfs_atomic_read(&page->cp_ref))
-                LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
+               LASSERT_SPIN_LOCKED(&page->cp_lock);
 #endif
         ENTRY;
 
@@ -350,6 +348,7 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                                                      "cl_page", page);
                 page->cp_index = ind;
                 cl_page_state_set_trust(page, CPS_CACHED);
+               cfs_spin_lock_init(&page->cp_lock);
                 page->cp_type = type;
                 CFS_INIT_LIST_HEAD(&page->cp_layers);
                 CFS_INIT_LIST_HEAD(&page->cp_batch);
@@ -421,6 +420,11 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
+               /* cl_page::cp_lock is used to protect the page state and
+                * refcount, but need an external lock to protect the
+                * child/parent relationship, so vmpage lock must be held for
+                * this purpose. */
+               KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
@@ -655,7 +659,6 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object_header *hdr;
         struct cl_site *site = cl_object_site(page->cp_obj);
 
         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
@@ -664,19 +667,18 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
 
-        hdr = cl_object_header(cl_object_top(page->cp_obj));
-        if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
+       if (cfs_atomic_dec_and_lock(&page->cp_ref, &page->cp_lock)) {
                 cfs_atomic_dec(&site->cs_pages.cs_busy);
                 /* We're going to access the page w/o a reference, but it's
-                 * ok because we have grabbed the lock coh_page_guard, which
+                * ok because we have grabbed the lock cp_lock, which
                  * means nobody is able to free this page behind us.
                  */
                 if (page->cp_state == CPS_FREEING) {
                         /* We drop the page reference and check the page state
-                         * inside the coh_page_guard. So that if it gets here,
+                         * inside the cp_lock. So that if it gets here,
                          * it is the REALLY last reference to this page.
                          */
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                       cfs_spin_unlock(&page->cp_lock);
 
                         LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
@@ -690,7 +692,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                         EXIT;
                         return;
                 }
-                cfs_spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&page->cp_lock);
         }
 
         EXIT;
@@ -725,8 +727,8 @@ EXPORT_SYMBOL(cl_page_vmpage);
  */
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
-        struct cl_page *page;
-        struct cl_object_header *hdr;
+       struct cl_page *top;
+       struct cl_page *page;
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
@@ -741,16 +743,18 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
-        hdr = cl_object_header(cl_object_top(obj));
-        cfs_spin_lock(&hdr->coh_page_guard);
-        for (page = (void *)vmpage->private;
-             page != NULL; page = page->cp_child) {
+       top = (struct cl_page *)vmpage->private;
+       if (top == NULL)
+               RETURN(NULL);
+
+       cfs_spin_lock(&top->cp_lock);
+       for (page = top; page != NULL; page = page->cp_child) {
                 if (cl_object_same(page->cp_obj, obj)) {
                         cl_page_get_trust(page);
                         break;
                 }
         }
-        cfs_spin_unlock(&hdr->coh_page_guard);
+       cfs_spin_unlock(&top->cp_lock);
         LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }