Whamcloud - gitweb
LU-1666 obdclass: reduce lock contention on coh_page_guard
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index b2cecd8..ea160eb 100644 (file)
@@ -92,7 +92,7 @@ static struct lu_kmem_descr cl_page_caches[] = {
 
 /**
  * Internal version of cl_page_top, it should be called with page referenced,
 
 /**
  * Internal version of cl_page_top, it should be called with page referenced,
- * or coh_page_guard held.
+ * or cp_lock held.
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
  */
 static struct cl_page *cl_page_top_trusted(struct cl_page *page)
 {
@@ -134,10 +134,8 @@ cl_page_at_trusted(const struct cl_page *page,
         const struct cl_page_slice *slice;
 
 #ifdef INVARIANT_CHECK
         const struct cl_page_slice *slice;
 
 #ifdef INVARIANT_CHECK
-        struct cl_object_header *ch = cl_object_header(page->cp_obj);
-
         if (!cfs_atomic_read(&page->cp_ref))
         if (!cfs_atomic_read(&page->cp_ref))
-                LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
+                LASSERT_SPIN_LOCKED(&page->cp_lock);
 #endif
         ENTRY;
 
 #endif
         ENTRY;
 
@@ -344,6 +342,7 @@ static int cl_page_alloc(const struct lu_env *env, struct cl_object *o,
                                                      "cl_page", page);
                 page->cp_index = ind;
                 cl_page_state_set_trust(page, CPS_CACHED);
                                                      "cl_page", page);
                 page->cp_index = ind;
                 cl_page_state_set_trust(page, CPS_CACHED);
+               cfs_spin_lock_init(&page->cp_lock);
                 page->cp_type = type;
                 CFS_INIT_LIST_HEAD(&page->cp_layers);
                 CFS_INIT_LIST_HEAD(&page->cp_batch);
                 page->cp_type = type;
                 CFS_INIT_LIST_HEAD(&page->cp_layers);
                 CFS_INIT_LIST_HEAD(&page->cp_batch);
@@ -415,6 +414,11 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
                idx, PFID(&hdr->coh_lu.loh_fid), vmpage, vmpage->private, type);
         /* fast path. */
         if (type == CPT_CACHEABLE) {
+               /* cl_page::cp_lock is used to protect the page state and
+                * refcount, but need an external lock to protect the
+                * child/parent relationship, so vmpage lock must be held for
+                * this purpose. */
+               KLASSERT(PageLocked(vmpage));
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
                 /*
                  * cl_vmpage_page() can be called here without any locks as
                  *
@@ -648,7 +652,6 @@ EXPORT_SYMBOL(cl_page_get);
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
  */
 void cl_page_put(const struct lu_env *env, struct cl_page *page)
 {
-        struct cl_object_header *hdr;
         struct cl_site *site = cl_object_site(page->cp_obj);
 
         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
         struct cl_site *site = cl_object_site(page->cp_obj);
 
         PASSERT(env, page, cfs_atomic_read(&page->cp_ref) > !!page->cp_parent);
@@ -657,19 +660,18 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
 
         CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
                        cfs_atomic_read(&page->cp_ref));
 
-        hdr = cl_object_header(cl_object_top(page->cp_obj));
-        if (cfs_atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
+        if (cfs_atomic_dec_and_lock(&page->cp_ref, &page->cp_lock)) {
                 cfs_atomic_dec(&site->cs_pages.cs_busy);
                 /* We're going to access the page w/o a reference, but it's
                 cfs_atomic_dec(&site->cs_pages.cs_busy);
                 /* We're going to access the page w/o a reference, but it's
-                 * ok because we have grabbed the lock coh_page_guard, which
+                 * ok because we have grabbed the lock cp_lock, which
                  * means nobody is able to free this page behind us.
                  */
                 if (page->cp_state == CPS_FREEING) {
                         /* We drop the page reference and check the page state
                  * means nobody is able to free this page behind us.
                  */
                 if (page->cp_state == CPS_FREEING) {
                         /* We drop the page reference and check the page state
-                         * inside the coh_page_guard. So that if it gets here,
+                         * inside the cp_lock. So that if it gets here,
                          * it is the REALLY last reference to this page.
                          */
                          * it is the REALLY last reference to this page.
                          */
-                        cfs_spin_unlock(&hdr->coh_page_guard);
+                        cfs_spin_unlock(&page->cp_lock);
 
                         LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
 
                         LASSERT(cfs_atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
@@ -683,7 +685,7 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                         EXIT;
                         return;
                 }
                         EXIT;
                         return;
                 }
-                cfs_spin_unlock(&hdr->coh_page_guard);
+                cfs_spin_unlock(&page->cp_lock);
         }
 
         EXIT;
         }
 
         EXIT;
@@ -718,8 +720,8 @@ EXPORT_SYMBOL(cl_page_vmpage);
  */
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
  */
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
-        struct cl_page *page;
-        struct cl_object_header *hdr;
+       struct cl_page *top;
+       struct cl_page *page;
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
@@ -734,16 +736,18 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
-        hdr = cl_object_header(cl_object_top(obj));
-        cfs_spin_lock(&hdr->coh_page_guard);
-        for (page = (void *)vmpage->private;
-             page != NULL; page = page->cp_child) {
+        top = (struct cl_page *)vmpage->private;
+       if (top == NULL)
+               RETURN(NULL);
+
+       cfs_spin_lock(&top->cp_lock);
+        for (page = top; page != NULL; page = page->cp_child) {
                 if (cl_object_same(page->cp_obj, obj)) {
                         cl_page_get_trust(page);
                         break;
                 }
         }
                 if (cl_object_same(page->cp_obj, obj)) {
                         cl_page_get_trust(page);
                         break;
                 }
         }
-        cfs_spin_unlock(&hdr->coh_page_guard);
+        cfs_spin_unlock(&top->cp_lock);
         LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }
         LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }