EXPORT_SYMBOL(cl_page_lookup);
/**
- * Returns a list of pages by a given [start, end] of @obj.
+ * Returns a list of pages by a given [start, end] of \a obj.
*
* Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
* crucial in the face of [offset, EOF] locks.
* for osc, in case of ...
*/
PASSERT(env, page, slice != NULL);
+
page = slice->cpl_page;
/*
* Can safely call cl_page_get_trust() under
*
* \see cl_object_find(), cl_lock_find()
*/
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
+static struct cl_page *cl_page_find0(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type,
+ struct cl_page *parent)
{
struct cl_page *page;
struct cl_page *ghost = NULL;
* consistent even when VM locking is somehow busted,
* which is very useful during diagnosing and debugging.
*/
+ page = ERR_PTR(err);
if (err == -EEXIST) {
/*
* XXX in case of a lookup for CPT_TRANSIENT page,
spin_lock(&hdr->coh_page_guard);
page = ERR_PTR(-EBUSY);
}
- } else
- page = ERR_PTR(err);
- } else
+ }
+ } else {
+ if (parent) {
+ LASSERT(page->cp_parent == NULL);
+ page->cp_parent = parent;
+ parent->cp_child = page;
+ }
hdr->coh_pages++;
+ }
spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
}
RETURN(page);
}
+
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
+{
+ return cl_page_find0(env, o, idx, vmpage, type, NULL);
+}
EXPORT_SYMBOL(cl_page_find);
+
+struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent)
+{
+ return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
+}
+EXPORT_SYMBOL(cl_page_find_sub);
+
static inline int cl_page_invariant(const struct cl_page *pg)
{
struct cl_object_header *header;
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
- hdr = cl_object_header(page->cp_obj);
- if (atomic_dec_and_test(&page->cp_ref)) {
+
+ hdr = cl_object_header(cl_object_top(page->cp_obj));
+ if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
atomic_dec(&site->cs_pages.cs_busy);
+ /* We're going to access the page w/o a reference, but it's
+ * ok because we have grabbed the lock coh_page_guard, which
+ * means nobody is able to free this page behind us.
+ */
if (page->cp_state == CPS_FREEING) {
+ /* We drop the page reference and check the page state
+ * inside the coh_page_guard. So that if it gets here,
+ * it is the REALLY last reference to this page.
+ */
+ spin_unlock(&hdr->coh_page_guard);
+
+ LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, list_empty(&page->cp_batch));
/*
* it down.
*/
cl_page_free(env, page);
+
+ EXIT;
+ return;
}
+ spin_unlock(&hdr->coh_page_guard);
}
+
EXIT;
}
EXPORT_SYMBOL(cl_page_put);
struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
{
struct cl_page *page;
+ struct cl_object_header *hdr;
ENTRY;
KLASSERT(PageLocked(vmpage));
* This loop assumes that ->private points to the top-most page. This
* can be rectified easily.
*/
+ hdr = cl_object_header(cl_object_top(obj));
+ spin_lock(&hdr->coh_page_guard);
for (page = (void *)vmpage->private;
page != NULL; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
break;
}
}
+ spin_unlock(&hdr->coh_page_guard);
LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
RETURN(page);
}
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
* layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the @uptodate argument.
+ * by the \a uptodate argument.
*
* \see cl_page_operations::cpo_export()
*/
void cl_page_completion(const struct lu_env *env,
struct cl_page *pg, enum cl_req_type crt, int ioret)
{
+ struct cl_sync_io *anchor = pg->cp_sync_io;
+
PASSERT(env, pg, crt < CRT_NR);
/* cl_page::cp_req already cleared by the caller (osc_completion()) */
PASSERT(env, pg, pg->cp_req == NULL);
CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
- if (pg->cp_sync_io) {
- cl_sync_io_note(pg->cp_sync_io, ioret);
+ if (anchor) {
+ LASSERT(pg->cp_sync_io == anchor);
pg->cp_sync_io = NULL;
+ cl_sync_io_note(anchor, ioret);
}
/* Don't assert the page writeback bit here because the lustre file