}
/**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
- int result = 1;
- loff_t start;
- loff_t end;
- pgoff_t idx;
-
- idx = page->cp_index;
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /*
- * check that [start, end) and [pos, pos + count) extents
- * overlap.
- */
- if (!cl_io_is_append(io)) {
- const struct cl_io_rw_common *crw = &(io->u.ci_rw);
- start = cl_offset(page->cp_obj, idx);
- end = cl_offset(page->cp_obj, idx + 1);
- result = crw->crw_pos < end &&
- start < crw->crw_pos + crw->crw_count;
- }
- break;
- case CIT_FAULT:
- result = io->u.ci_fault.ft_index == idx;
- break;
- default:
- LBUG();
- }
- return result;
-}
-
-/**
* Called by read io, when page has to be read from the server.
*
* \see cl_io_operations::cio_read_page()
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
ENTRY;
break;
}
}
- if (result == 0)
+ if (result == 0 && queue->c2_qin.pl_nr > 0)
result = cl_io_submit_rw(env, io, CRT_READ, queue);
/*
* Unlock unsent pages in case of error.
cl_page_list_for_each(page, queue) {
int rc;
- LINVRNT(cl_page_in_io(page, io));
rc = cl_page_cancel(env, page);
result = result ?: rc;
}
LASSERT(page->cp_owner != NULL);
LINVRNT(plist->pl_owner == current);
- lockdep_off();
- mutex_lock(&page->cp_mutex);
- lockdep_on();
LASSERT(cfs_list_empty(&page->cp_batch));
cfs_list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
struct cl_page_list *plist, struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
+ LASSERT(cl_page_is_vmlocked(env, page));
LINVRNT(plist->pl_owner == current);
ENTRY;
cfs_list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
--plist->pl_nr;
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
LASSERT(plist->pl_nr > 0);
cfs_list_del_init(&page->cp_batch);
- lockdep_off();
- mutex_unlock(&page->cp_mutex);
- lockdep_on();
--plist->pl_nr;
/*
* cl_page_disown0 rather than usual cl_page_disown() is used,
{
struct cl_page *page;
struct cl_page *temp;
- pgoff_t index = 0;
int result;
LINVRNT(plist->pl_owner == current);
ENTRY;
result = 0;
cl_page_list_for_each_safe(page, temp, plist) {
- LASSERT(index <= page->cp_index);
- index = page->cp_index;
if (cl_page_own(env, io, page) == 0)
result = result ?: page->cp_error;
else
/**
* Returns top-level io.
*
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
static int cl_req_init(const struct lu_env *env, struct cl_req *req,
struct cl_page *page)
{
- struct cl_device *dev;
- struct cl_page_slice *slice;
- int result;
+ struct cl_device *dev;
+ struct cl_page_slice *slice;
+ int result;
- ENTRY;
- result = 0;
- page = cl_page_top(page);
- do {
- cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init != NULL) {
- result = dev->cd_ops->cdo_req_init(env,
- dev, req);
- if (result != 0)
- break;
- }
- }
- page = page->cp_child;
- } while (page != NULL && result == 0);
- RETURN(result);
+ ENTRY;
+ result = 0;
+ cfs_list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+ if (dev->cd_ops->cdo_req_init != NULL) {
+ result = dev->cd_ops->cdo_req_init(env,
+ dev, req);
+ if (result != 0)
+ break;
+ }
+ }
+ RETURN(result);
}
/**
void cl_req_page_add(const struct lu_env *env,
struct cl_req *req, struct cl_page *page)
{
- struct cl_object *obj;
- struct cl_req_obj *rqo;
- int i;
+ struct cl_object *obj;
+ struct cl_req_obj *rqo;
+ int i;
- ENTRY;
- page = cl_page_top(page);
+ ENTRY;
LASSERT(cfs_list_empty(&page->cp_flight));
LASSERT(page->cp_req == NULL);
cl_object_get(obj);
lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
"cl_req", req);
- break;
- }
- }
- LASSERT(i < req->crq_nrobjs);
- EXIT;
+ break;
+ }
+ }
+ LASSERT(i < req->crq_nrobjs);
+ EXIT;
}
EXPORT_SYMBOL(cl_req_page_add);
*/
void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
{
- struct cl_req *req = page->cp_req;
+ struct cl_req *req = page->cp_req;
- ENTRY;
- page = cl_page_top(page);
+ ENTRY;
- LASSERT(!cfs_list_empty(&page->cp_flight));
- LASSERT(req->crq_nrpages > 0);
+ LASSERT(!cfs_list_empty(&page->cp_flight));
+ LASSERT(req->crq_nrpages > 0);
- cfs_list_del_init(&page->cp_flight);
- --req->crq_nrpages;
- page->cp_req = NULL;
- EXIT;
+ cfs_list_del_init(&page->cp_flight);
+ --req->crq_nrpages;
+ page->cp_req = NULL;
+ EXIT;
}
EXPORT_SYMBOL(cl_req_page_done);
{
ENTRY;
init_waitqueue_head(&anchor->csi_waitq);
- cfs_atomic_set(&anchor->csi_sync_nr, nrpages);
- cfs_atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nrpages);
+ atomic_set(&anchor->csi_barrier, nrpages > 0);
anchor->csi_sync_rc = 0;
EXIT;
}
LASSERT(timeout >= 0);
rc = l_wait_event(anchor->csi_waitq,
- cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+ atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
CERROR("SYNC IO failed with error: %d, try to cancel "
"%d remaining pages\n",
- rc, cfs_atomic_read(&anchor->csi_sync_nr));
+ rc, atomic_read(&anchor->csi_sync_nr));
(void)cl_io_cancel(env, io, queue);
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
- cfs_atomic_read(&anchor->csi_sync_nr) == 0,
+ atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
} else {
rc = anchor->csi_sync_rc;
}
- LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) == 0);
+ LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
- while (unlikely(cfs_atomic_read(&anchor->csi_barrier) != 0)) {
+ while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
#ifdef __KERNEL__
cpu_relax();
#endif
* ->{prepare,commit}_write(). Completion is used to signal the end of
* IO.
*/
- LASSERT(cfs_atomic_read(&anchor->csi_sync_nr) > 0);
- if (cfs_atomic_dec_and_test(&anchor->csi_sync_nr)) {
+ LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
+ if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
wake_up_all(&anchor->csi_waitq);
/* it's safe to nuke or reuse anchor now */
- cfs_atomic_set(&anchor->csi_barrier, 0);
+ atomic_set(&anchor->csi_barrier, 0);
}
EXIT;
}