Whamcloud - gitweb
b=19906
[fs/lustre-release.git] / lustre / obdclass / cl_page.c
index bbca002..55887f9 100644 (file)
@@ -179,7 +179,7 @@ struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
 EXPORT_SYMBOL(cl_page_lookup);
 
 /**
- * Returns a list of pages by a given [start, end] of @obj.
+ * Returns a list of pages by a given [start, end] of \a obj.
  *
  * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
  * crucial in the face of [offset, EOF] locks.
@@ -628,10 +628,22 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
 
         ENTRY;
         CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
-        hdr = cl_object_header(page->cp_obj);
-        if (atomic_dec_and_test(&page->cp_ref)) {
+
+        hdr = cl_object_header(cl_object_top(page->cp_obj));
+        if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
                 atomic_dec(&site->cs_pages.cs_busy);
+                /* We're going to access the page w/o a reference, but it's
+                 * ok because we have grabbed the lock coh_page_guard, which
+                 * means nobody is able to free this page behind us.
+                 */
                 if (page->cp_state == CPS_FREEING) {
+                        /* We drop the page reference and check the page state
+                         * inside the coh_page_guard. So that if it gets here,
+                         * it is the REALLY last reference to this page.
+                         */
+                        spin_unlock(&hdr->coh_page_guard);
+
+                        LASSERT(atomic_read(&page->cp_ref) == 0);
                         PASSERT(env, page, page->cp_owner == NULL);
                         PASSERT(env, page, list_empty(&page->cp_batch));
                         /*
@@ -639,8 +651,13 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
                          * it down.
                          */
                         cl_page_free(env, page);
+
+                        EXIT;
+                        return;
                 }
+                spin_unlock(&hdr->coh_page_guard);
         }
+
         EXIT;
 }
 EXPORT_SYMBOL(cl_page_put);
@@ -674,6 +691,7 @@ EXPORT_SYMBOL(cl_page_vmpage);
 struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
 {
         struct cl_page *page;
+        struct cl_object_header *hdr;
 
         ENTRY;
         KLASSERT(PageLocked(vmpage));
@@ -688,6 +706,8 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
          * This loop assumes that ->private points to the top-most page. This
          * can be rectified easily.
          */
+        hdr = cl_object_header(cl_object_top(obj));
+        spin_lock(&hdr->coh_page_guard);
         for (page = (void *)vmpage->private;
              page != NULL; page = page->cp_child) {
                 if (cl_object_same(page->cp_obj, obj)) {
@@ -695,6 +715,7 @@ struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
                         break;
                 }
         }
+        spin_unlock(&hdr->coh_page_guard);
         LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
         RETURN(page);
 }
@@ -1192,7 +1213,7 @@ EXPORT_SYMBOL(cl_page_unmap);
  *
  * Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
  * layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the @uptodate argument.
+ * by the \a uptodate argument.
  *
  * \see cl_page_operations::cpo_export()
  */