const struct lu_device_type *dtype)
{
const struct cl_page_slice *slice;
+
+#ifdef INVARIANT_CHECK
struct cl_object_header *ch = cl_object_header(page->cp_obj);
+ if (!atomic_read(&page->cp_ref))
+ LASSERT_SPIN_LOCKED(&ch->coh_page_guard);
+#endif
ENTRY;
- LINVRNT(ergo(!atomic_read(&page->cp_ref),
- spin_is_locked(&ch->coh_page_guard)));
page = cl_page_top_trusted((struct cl_page *)page);
do {
{
struct cl_page *page;
- LASSERT(spin_is_locked(&hdr->coh_page_guard));
+ LASSERT_SPIN_LOCKED(&hdr->coh_page_guard);
page = radix_tree_lookup(&hdr->coh_tree, index);
if (page != NULL) {
(const struct lu_env *,
const struct cl_page_slice *, int), ioret);
- KLASSERT(!PageWriteback(cl_page_vmpage(env, pg)));
+ /* Don't assert the page writeback bit here because the lustre file
+ * may be as a backend of swap space. in this case, the page writeback
+ * is set by VM, and obvious we shouldn't clear it at all. Fortunately
+ * this type of pages are all TRANSIENT pages. */
+ KLASSERT(ergo(pg->cp_type == CPT_CACHEABLE,
+ !PageWriteback(cl_page_vmpage(env, pg))));
EXIT;
}
EXPORT_SYMBOL(cl_page_completion);