struct cl_2queue *queue = &info->clt_queue;
struct cl_lock_descr *descr = &lock->cll_descr;
long page_count;
+ int nonblock = 1, resched;
int result;
LINVRNT(cl_lock_invariant(env, lock));
io->ci_obj = cl_object_top(descr->cld_obj);
result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result == 0) {
- int nonblock = 1;
+ if (result != 0)
+ GOTO(out, result);
-restart:
+ do {
cl_2queue_init(queue);
cl_page_gang_lookup(env, descr->cld_obj, io, descr->cld_start,
- descr->cld_end, &queue->c2_qin, nonblock);
+ descr->cld_end, &queue->c2_qin, nonblock,
+ &resched);
page_count = queue->c2_qin.pl_nr;
if (page_count > 0) {
result = cl_page_list_unmap(env, io, &queue->c2_qin);
}
cl_2queue_fini(env, queue);
- if (nonblock) {
- nonblock = 0;
- goto restart;
- }
- }
+ if (resched)
+ cfs_cond_resched();
+ } while (resched || nonblock--);
+out:
cl_io_fini(env, io);
RETURN(result);
}
/**
* Returns a list of pages by a given [start, end] of \a obj.
*
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
+ *
* Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
* crucial in the face of [offset, EOF] locks.
*/
void cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io, pgoff_t start, pgoff_t end,
- struct cl_page_list *queue, int nonblock)
+ struct cl_page_list *queue, int nonblock,
+ int *resched)
{
struct cl_object_header *hdr;
struct cl_page *page;
struct cl_page *pg);
ENTRY;
+ if (resched != NULL)
+ *resched = 0;
page_own = nonblock ? cl_page_own_try : cl_page_own;
idx = start;
cfs_spin_lock(&hdr->coh_page_guard);
if (nr < CLT_PVEC_SIZE)
break;
+ if (resched != NULL && cfs_need_resched()) {
+ *resched = 1;
+ break;
+ }
}
cfs_spin_unlock(&hdr->coh_page_guard);
EXIT;
struct cl_object *obj = cl_object_top(clobj);
struct cl_io *io;
struct cl_page_list *plist;
+ int resched;
int result;
ENTRY;
RETURN(io->ci_result);
}
- cl_page_list_init(plist);
- cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0);
- /*
- * Since we're purging the pages of an object, we don't care
- * the possible outcomes of the following functions.
- */
- cl_page_list_unmap(env, io, plist);
- cl_page_list_discard(env, io, plist);
- cl_page_list_disown(env, io, plist);
- cl_page_list_fini(env, plist);
+ do {
+ cl_page_list_init(plist);
+ cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF, plist, 0,
+ &resched);
+ /*
+ * Since we're purging the pages of an object, we don't care
+ * the possible outcomes of the following functions.
+ */
+ cl_page_list_unmap(env, io, plist);
+ cl_page_list_discard(env, io, plist);
+ cl_page_list_disown(env, io, plist);
+ cl_page_list_fini(env, plist);
+
+ if (resched)
+ cfs_cond_resched();
+ } while (resched);
cl_io_fini(env, io);
RETURN(result);
io->ci_obj = cl_object_top(obj);
cl_io_init(env, io, CIT_MISC, io->ci_obj);
cl_page_gang_lookup(env, obj, io,
- descr->cld_start, descr->cld_end, plist, 0);
+ descr->cld_start, descr->cld_end, plist, 0,
+ NULL);
cl_lock_page_list_fixup(env, io, lock, plist);
if (plist->pl_nr > 0) {
CL_LOCK_DEBUG(D_ERROR, env, lock, "still has pages\n");