#define cl_page_list_for_each_safe(page, temp, list) \
list_for_each_entry_safe((page), (temp), &(list)->pl_pages, cp_batch)
-void cl_page_list_init (struct cl_page_list *plist);
-void cl_page_list_add (struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_move (struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
+void cl_page_list_init(struct cl_page_list *plist);
+void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
+ bool get_ref);
+void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
- struct cl_page *page);
-void cl_page_list_splice (struct cl_page_list *list,
- struct cl_page_list *head);
-void cl_page_list_del (const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page);
-void cl_page_list_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-void cl_page_list_assume (const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
+ struct cl_page *page);
+void cl_page_list_splice(struct cl_page_list *list,
+ struct cl_page_list *head);
+void cl_page_list_del(const struct lu_env *env,
+ struct cl_page_list *plist, struct cl_page *page);
+void cl_page_list_disown(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
void cl_page_list_discard(const struct lu_env *env,
- struct cl_io *io, struct cl_page_list *plist);
-void cl_page_list_fini (const struct lu_env *env, struct cl_page_list *plist);
-
-void cl_2queue_init (struct cl_2queue *queue);
-void cl_2queue_add (struct cl_2queue *queue, struct cl_page *page);
-void cl_2queue_disown (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_assume (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_discard (const struct lu_env *env,
- struct cl_io *io, struct cl_2queue *queue);
-void cl_2queue_fini (const struct lu_env *env, struct cl_2queue *queue);
+ struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
+
+void cl_2queue_init(struct cl_2queue *queue);
+void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page,
+ bool get_ref);
+void cl_2queue_disown(const struct lu_env *env, struct cl_io *io,
+ struct cl_2queue *queue);
+void cl_2queue_assume(const struct lu_env *env, struct cl_io *io,
+ struct cl_2queue *queue);
+void cl_2queue_discard(const struct lu_env *env, struct cl_io *io,
+ struct cl_2queue *queue);
+void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue);
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page);
/** @} cl_page_list */
anchor = &vvp_env_info(env)->vti_anchor;
cl_sync_io_init(anchor, 1);
clpage->cp_sync_io = anchor;
- cl_2queue_add(queue, clpage);
+ cl_2queue_add(queue, clpage, true);
rc = cl_io_submit_rw(env, io, CRT_WRITE, queue);
if (rc)
GOTO(queuefini1, rc);
vpg->vpg_defer_uptodate = 1;
vpg->vpg_ra_used = 0;
}
- cl_page_list_add(queue, page);
+ cl_page_list_add(queue, page, true);
} else {
/* skip completed pages */
cl_page_unassume(env, io, page);
cl_sync_io_init(anchor, 1);
page->cp_sync_io = anchor;
- cl_2queue_add(queue, page);
+ cl_2queue_add(queue, page, true);
}
io_start_index = cl_index(io->ci_obj, io->u.ci_rw.crw_pos);
*/
page->cp_inode = inode;
}
- cl_2queue_add(queue, page);
+ /* We keep the refcount from cl_page_find, so we don't need
+ * another one here
+ */
+ cl_2queue_add(queue, page, false);
/*
* Set page clip to tell transfer formation engine
* that page has to be sent even if it is beyond KMS.
cl_page_clip(env, page, 0, size);
++io_pages;
- /* drop the reference count for cl_page_find */
- cl_page_put(env, page);
offset += page_size;
size -= page_size;
}
lcc->lcc_page = NULL; /* page will be queued */
/* Add it into write queue */
- cl_page_list_add(plist, page);
+ cl_page_list_add(plist, page, true);
if (plist->pl_nr == 1) /* first page */
vio->u.readwrite.vui_from = from;
else
cl_page_assume(env, io, page);
cl_page_list_init(plist);
- cl_page_list_add(plist, page);
+ cl_page_list_add(plist, page, true);
/* size fixup */
if (last_index == vvp_index(vpg))
if (result >= 0) {
io->ci_noquota = 1;
cl_page_own(env, io, page);
- cl_page_list_add(plist, page);
+ cl_page_list_add(plist, page, true);
lu_ref_add(&page->cp_reference,
"cl_io", io);
result = cl_io_commit_async(env, io,
vpg->vpg_page = vmpage;
get_page(vmpage);
- if (page->cp_type == CPT_CACHEABLE) {
+ if (page->cp_type == CPT_TRANSIENT) {
+ /* DIO pages are referenced by userspace, we don't need to take
+ * a reference on them. (contrast with get_page() call above)
+ */
+ cl_page_slice_add(page, &vpg->vpg_cl, obj,
+ &vvp_transient_page_ops);
+ } else {
/* in cache, decref in vvp_page_delete */
atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
cl_page_slice_add(page, &vpg->vpg_cl, obj,
&vvp_page_ops);
- } else {
- cl_page_slice_add(page, &vpg->vpg_cl, obj,
- &vvp_transient_page_ops);
}
+
return 0;
}
/**
* Adds a page to a page list.
*/
-void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
+void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page,
+ bool get_ref)
{
ENTRY;
/* it would be better to check that page is owned by "current" io, but
list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
- cl_page_get(page);
+ if (get_ref)
+ cl_page_get(page);
EXIT;
}
EXPORT_SYMBOL(cl_page_list_add);
/**
* Add a page to the incoming page list of 2-queue.
*/
-void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
+void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page, bool get_ref)
{
- ENTRY;
- cl_page_list_add(&queue->c2_qin, page);
- EXIT;
+ cl_page_list_add(&queue->c2_qin, page, get_ref);
}
EXPORT_SYMBOL(cl_2queue_add);
*/
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
{
- ENTRY;
- cl_2queue_init(queue);
- cl_2queue_add(queue, page);
- EXIT;
+ ENTRY;
+ cl_2queue_init(queue);
+ cl_2queue_add(queue, page, true);
+ EXIT;
}
EXPORT_SYMBOL(cl_2queue_init_page);
struct page *vmpage = pvec->pages[i];
struct cl_page *page = (struct cl_page *)vmpage->private;
- cl_page_list_add(&queue->c2_qout, page);
+ cl_page_list_add(&queue->c2_qout, page, true);
}
}
break;
}
- cl_2queue_add(queue, clp);
+ cl_2queue_add(queue, clp, true);
/*
* drop the reference count for cl_page_find, so that the page