- cl_page_list_init(plist);
- cl_page_list_for_each_safe(page, temp, queue) {
- pgoff_t idx = page->cp_index;
- struct cl_lock *found;
- struct cl_lock_descr *descr;
-
- /* The algorithm counts on the index-ascending page index. */
- LASSERT(ergo(&temp->cp_batch != &queue->pl_pages,
- page->cp_index < temp->cp_index));
-
- found = cl_lock_at_page(env, lock->cll_descr.cld_obj,
- page, lock, 0, 0);
- if (found == NULL)
- continue;
-
- descr = &found->cll_descr;
- list_for_each_entry_safe_from(page, temp, &queue->pl_pages,
- cp_batch) {
- idx = page->cp_index;
- if (descr->cld_start > idx || descr->cld_end < idx)
- break;
- cl_page_list_move(plist, queue, page);
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, void *cbdata)
+{
+ struct cl_thread_info *info = cl_env_info(env);
+ struct cl_lock *lock = cbdata;
+ pgoff_t index = pgoff_at_lock(page, lock);
+
+ if (index >= info->clt_fn_index) {
+ struct cl_lock *tmp;
+
+ /* refresh non-overlapped index */
+ tmp = cl_lock_at_page(env, lock->cll_descr.cld_obj, page, lock,
+ 1, 0);
+ if (tmp != NULL) {
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, clt_fn_index). This
+ * is safe because if tmp lock is canceled, it will
+ * discard these pages. */
+ info->clt_fn_index = tmp->cll_descr.cld_end + 1;
+ if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
+ info->clt_fn_index = CL_PAGE_EOF;
+ cl_lock_put(env, tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_unmap(env, io, page);
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);