EXPORT_SYMBOL(cl_page_lookup);
/**
- * Returns a list of pages by a given [start, end] of @obj.
+ * Returns a list of pages by a given [start, end] of \a obj.
*
* Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
* crucial in the face of [offset, EOF] locks.
*/
void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io, pgoff_t start, pgoff_t end,
- struct cl_page_list *queue)
+ struct cl_page_list *queue, int nonblock)
{
struct cl_object_header *hdr;
struct cl_page *page;
unsigned int nr;
unsigned int i;
unsigned int j;
+ int (*page_own)(const struct lu_env *env,
+ struct cl_io *io,
+ struct cl_page *pg);
ENTRY;
+ page_own = nonblock ? cl_page_own_try : cl_page_own;
+
idx = start;
hdr = cl_object_header(obj);
pvec = cl_env_info(env)->clt_pvec;
* for osc, in case of ...
*/
PASSERT(env, page, slice != NULL);
+
page = slice->cpl_page;
/*
* Can safely call cl_page_get_trust() under
spin_unlock(&hdr->coh_page_guard);
for (i = 0; i < j; ++i) {
page = pvec[i];
- if (cl_page_own(env, io, page) == 0)
+ if (page_own(env, io, page) == 0)
cl_page_list_add(queue, page);
lu_ref_del(&page->cp_reference,
"page_list", cfs_current());
*
* \see cl_object_find(), cl_lock_find()
*/
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
+static struct cl_page *cl_page_find0(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type,
+ struct cl_page *parent)
{
struct cl_page *page;
struct cl_page *ghost = NULL;
* consistent even when VM locking is somehow busted,
* which is very useful during diagnosing and debugging.
*/
+ page = ERR_PTR(err);
if (err == -EEXIST) {
/*
* XXX in case of a lookup for CPT_TRANSIENT page,
spin_lock(&hdr->coh_page_guard);
page = ERR_PTR(-EBUSY);
}
- } else
- page = ERR_PTR(err);
- } else
+ }
+ } else {
+ if (parent) {
+ LASSERT(page->cp_parent == NULL);
+ page->cp_parent = parent;
+ parent->cp_child = page;
+ }
hdr->coh_pages++;
+ }
spin_unlock(&hdr->coh_page_guard);
if (unlikely(ghost != NULL)) {
}
RETURN(page);
}
+
+struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
+{
+ return cl_page_find0(env, o, idx, vmpage, type, NULL);
+}
EXPORT_SYMBOL(cl_page_find);
+
+struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ struct cl_page *parent)
+{
+ return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
+}
+EXPORT_SYMBOL(cl_page_find_sub);
+
static inline int cl_page_invariant(const struct cl_page *pg)
{
struct cl_object_header *header;
ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%i\n", atomic_read(&page->cp_ref));
- hdr = cl_object_header(page->cp_obj);
- if (atomic_dec_and_test(&page->cp_ref)) {
+
+ hdr = cl_object_header(cl_object_top(page->cp_obj));
+ if (atomic_dec_and_lock(&page->cp_ref, &hdr->coh_page_guard)) {
atomic_dec(&site->cs_pages.cs_busy);
+ /* We're going to access the page w/o a reference, but it's
+ * ok because we have grabbed the lock coh_page_guard, which
+ * means nobody is able to free this page behind us.
+ */
if (page->cp_state == CPS_FREEING) {
+ /* We drop the page reference and check the page state
+ * inside the coh_page_guard. So that if it gets here,
+ * it is the REALLY last reference to this page.
+ */
+ spin_unlock(&hdr->coh_page_guard);
+
+ LASSERT(atomic_read(&page->cp_ref) == 0);
PASSERT(env, page, page->cp_owner == NULL);
PASSERT(env, page, list_empty(&page->cp_batch));
/*
* it down.
*/
cl_page_free(env, page);
+
+ EXIT;
+ return;
}
+ spin_unlock(&hdr->coh_page_guard);
}
+
EXIT;
}
EXPORT_SYMBOL(cl_page_put);
struct cl_page *cl_vmpage_page(cfs_page_t *vmpage, struct cl_object *obj)
{
struct cl_page *page;
+ struct cl_object_header *hdr;
ENTRY;
KLASSERT(PageLocked(vmpage));
* This loop assumes that ->private points to the top-most page. This
* can be rectified easily.
*/
+ hdr = cl_object_header(cl_object_top(obj));
+ spin_lock(&hdr->coh_page_guard);
for (page = (void *)vmpage->private;
page != NULL; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
break;
}
}
+ spin_unlock(&hdr->coh_page_guard);
LASSERT(ergo(page, cl_is_page(page) && page->cp_type == CPT_CACHEABLE));
RETURN(page);
}
EXPORT_SYMBOL(cl_page_is_owned);
/**
- * Owns a page by IO.
+ * Try to own a page by IO.
*
* Waits until page is in cl_page_state::CPS_CACHED state, and then switch it
* into cl_page_state::CPS_OWNED state.
*
* \retval -ve failure, e.g., page was destroyed (and landed in
* cl_page_state::CPS_FREEING instead of cl_page_state::CPS_CACHED).
+ * or, page was owned by another thread, or in IO.
*
* \see cl_page_disown()
* \see cl_page_operations::cpo_own()
+ * \see cl_page_own_try()
+ * \see cl_page_own
*/
-int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
+static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg, int nonblock)
{
int result;
if (pg->cp_state == CPS_FREEING) {
result = -EAGAIN;
} else {
- cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_own));
- PASSERT(env, pg, pg->cp_owner == NULL);
- PASSERT(env, pg, pg->cp_req == NULL);
- pg->cp_owner = io;
- pg->cp_task = current;
- cl_page_owner_set(pg);
- if (pg->cp_state != CPS_FREEING) {
- cl_page_state_set(env, pg, CPS_OWNED);
- result = 0;
- } else {
- cl_page_disown0(env, io, pg);
- result = -EAGAIN;
+ result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(cpo_own),
+ (const struct lu_env *,
+ const struct cl_page_slice *,
+ struct cl_io *, int),
+ io, nonblock);
+ if (result == 0) {
+ PASSERT(env, pg, pg->cp_owner == NULL);
+ PASSERT(env, pg, pg->cp_req == NULL);
+ pg->cp_owner = io;
+ pg->cp_task = current;
+ cl_page_owner_set(pg);
+ if (pg->cp_state != CPS_FREEING) {
+ cl_page_state_set(env, pg, CPS_OWNED);
+ } else {
+ cl_page_disown0(env, io, pg);
+ result = -EAGAIN;
+ }
}
}
PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
RETURN(result);
}
+
+/**
+ * Own a page, might be blocked.
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own(const struct lu_env *env, struct cl_io *io, struct cl_page *pg)
+{
+ return cl_page_own0(env, io, pg, 0);
+}
EXPORT_SYMBOL(cl_page_own);
/**
+ * Nonblock version of cl_page_own().
+ *
+ * \see cl_page_own0()
+ */
+int cl_page_own_try(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ return cl_page_own0(env, io, pg, 1);
+}
+EXPORT_SYMBOL(cl_page_own_try);
+
+
+/**
* Assume page ownership.
*
* Called when page is already locked by the hosting VM.
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
* layer responsible for VM interaction has to mark/clear page as up-to-date
- * by the @uptodate argument.
+ * by the \a uptodate argument.
*
* \see cl_page_operations::cpo_export()
*/
void cl_page_completion(const struct lu_env *env,
struct cl_page *pg, enum cl_req_type crt, int ioret)
{
+ struct cl_sync_io *anchor = pg->cp_sync_io;
+
PASSERT(env, pg, crt < CRT_NR);
/* cl_page::cp_req already cleared by the caller (osc_completion()) */
PASSERT(env, pg, pg->cp_req == NULL);
CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(io[crt].cpo_completion),
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
+ if (anchor) {
+ LASSERT(pg->cp_sync_io == anchor);
+ pg->cp_sync_io = NULL;
+ cl_sync_io_note(anchor, ioret);
+ }
/* Don't assert the page writeback bit here because the lustre file
* may be as a backend of swap space. in this case, the page writeback
}
cl_page_list_init(plist);
- cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist);
+ cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0);
/*
* Since we're purging the pages of an object, we don't care
* the possible outcomes of the following functions.