EXPORT_SYMBOL(cl_page_lookup);
/**
- * Returns a list of pages by a given [start, end] of @obj.
+ * Returns a list of pages by a given [start, end] of \a obj.
*
* Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
* crucial in the face of [offset, EOF] locks.
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
- hdr = cl_object_header(page->cp_obj);
- if (atomic_dec_and_test(&page->cp_ref)) {
+
+ hdr = cl_object_header(cl_object_top(page->cp_obj));
+ if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
atomic_dec(&site->cs_pages.cs_busy);
+ /* We're going to access the page w/o a reference, but it's
+ * ok because we have grabbed the lock coh_page_guard, which
+ * means nobody is able to free this page behind us.
+ */
if (page->cp_state == CPS_FREEING) {
+ /* We drop the page reference and check the page state
+ * inside the coh_page_guard. So that if it gets here,
+ * it is the REALLY last reference to this page.
+ */
+ spin_unlock(&hdr->coh_page_guard);
+
+ LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, list_empty(&page->cp_batch));
/*
* it down.
*/
cl_page_free(env, page);
+
+ EXIT;
+ return;
}
+ spin_unlock(&hdr->coh_page_guard);
}
+
EXIT;
}
EXPORT_SYMBOL(cl_page_put);
struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
{
struct cl_page *page;
+ struct cl_object_header *hdr;
ENTRY;
KLASSERT(PageLocked(vmpage));
* This loop assumes that ->private points to the top-most page. This
* can be rectified easily.
*/
+ hdr = cl_object_header(cl_object_top(obj));
+ spin_lock(&hdr->coh_page_guard);
for (page = (void *)vmpage->private;
page != NULL; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
break;
}
}
+ spin_unlock(&hdr->coh_page_guard);
LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
RETURN(page);
}
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
* layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the @uptodate argument.
+ * by the \a uptodate argument.
*
* \see cl_page_operations::cpo_export()
*/
void cl_page_completion(const struct lu_env *env,
struct cl_page *pg, enum cl_req_type crt, int ioret)
{
+ struct cl_sync_io *anchor = pg->cp_sync_io;
+
PASSERT(env, pg, crt < CRT_NR);
/* cl_page::cp_req already cleared by the caller (osc_completion()) */
PASSERT(env, pg, pg->cp_req == NULL);
CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
- if (pg->cp_sync_io) {
- cl_sync_io_note(pg->cp_sync_io, ioret);
+ if (anchor) {
+ LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
+ cl_sync_io_note(anchor, ioret);
}
/* Don't assert the page writeback bit here because the lustre file