void ll_pgcache_remove_extent(struct inode *inode, struct lov_stripe_md *lsm,
struct ldlm_lock *lock, __u32 stripe)
{
- struct ldlm_extent *extent = &lock->l_policy_data.l_extent;
ldlm_policy_data_t tmpex;
unsigned long start, end, count, skip, i, j;
struct page *page;
struct lustre_handle lockh;
ENTRY;
- CDEBUG(D_INODE, "obdo %lu inode %p ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, extent->start, extent->end, inode->i_size);
+ memcpy(&tmpex, &lock->l_policy_data.l_extent, sizeof(tmpex));
+ CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
+ inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
+ inode->i_size);
/* our locks are page granular thanks to osc_enqueue, we invalidate the
* whole page. */
- LASSERT((extent->start & ~PAGE_CACHE_MASK) == 0);
- LASSERT(((extent->end+1) & ~PAGE_CACHE_MASK) == 0);
+ LASSERT((tmpex.l_extent.start & ~PAGE_CACHE_MASK) == 0);
+ LASSERT(((tmpex.l_extent.end + 1) & ~PAGE_CACHE_MASK) == 0);
- start = extent->start >> PAGE_CACHE_SHIFT;
count = ~0;
skip = 0;
- end = (extent->end >> PAGE_CACHE_SHIFT) + 1;
- if ((end << PAGE_CACHE_SHIFT) < extent->end)
- end = ~0;
+ start = tmpex.l_extent.start >> PAGE_CACHE_SHIFT;
+ end = tmpex.l_extent.end >> PAGE_CACHE_SHIFT;
if (lsm->lsm_stripe_count > 1) {
count = lsm->lsm_stripe_size >> PAGE_CACHE_SHIFT;
skip = (lsm->lsm_stripe_count - 1) * count;
- start += (start/count * skip) + (stripe * count);
+ start += start/count * skip + stripe * count;
if (end != ~0)
- end += (end/count * skip) + (stripe * count);
+ end += end/count * skip + stripe * count;
}
+ if (end < tmpex.l_extent.end >> PAGE_CACHE_SHIFT)
+ end = ~0;
i = (inode->i_size + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
if (i < end)
end = i;
- CDEBUG(D_INODE, "walking page indices start: %lu j: %lu count: %lu "
- "skip: %lu end: %lu%s\n", start, start % count, count, skip, end,
- discard ? " (DISCARDING)" : "");
+ CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
+ "count: %lu skip: %lu end: %lu%s\n", start, start % count,
+ count, skip, end, discard ? " (DISCARDING)" : "");
/* this is the simplistic implementation of page eviction at
* cancelation. It is careful to get races with other page
* lockers handled correctly. fixes from bug 20 will make it
* more efficient by associating locks with pages and with
* batching writeback under the lock explicitly. */
- for (i = start, j = start % count ; ; j++, i++) {
- if (j == count) {
- i += skip;
- j = 0;
- }
- if (i >= end)
- break;
+ for (i = start, j = start % count ; i <= end;
+ tmpex.l_extent.start += PAGE_CACHE_SIZE, j++, i++) {
+ LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
+ LPU64" >= "LPU64" start %lu i %lu end %lu\n",
+ tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
+ start, i, end);
ll_pgcache_lock(inode->i_mapping);
if (list_empty(&inode->i_mapping->dirty_pages) &&
page = find_get_page(inode->i_mapping, i);
if (page == NULL)
- continue;
- LL_CDEBUG_PAGE(page, "locking page\n");
+ goto next_index;
+ LL_CDEBUG_PAGE(D_PAGE, page, "locking page\n");
lock_page(page);
/* page->mapping to check with racing against teardown */
if (page->mapping && PageDirty(page) && !discard) {
ClearPageDirty(page);
- LL_CDEBUG_PAGE(page, "found dirty\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "found dirty\n");
ll_pgcache_lock(inode->i_mapping);
list_del(&page->list);
list_add(&page->list, &inode->i_mapping->locked_pages);
lock_page(page);
}
- tmpex.l_extent.start = (__u64)page->index << PAGE_CACHE_SHIFT;
tmpex.l_extent.end = tmpex.l_extent.start + PAGE_CACHE_SIZE - 1;
/* check to see if another DLM lock covers this page */
rc2 = ldlm_lock_match(lock->l_resource->lr_namespace,
&tmpex, LCK_PR | LCK_PW, &lockh);
if (rc2 == 0 && page->mapping != NULL) {
// checking again to account for writeback's lock_page()
- LL_CDEBUG_PAGE(page, "truncating\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "truncating\n");
ll_truncate_complete_page(page);
}
unlock_page(page);
page_cache_release(page);
+ next_index:
+ if (j == count) {
+ i += skip;
+ j = 0;
+ }
+
}
EXIT;
}
LCK_PR, &flags, ll_extent_lock_callback,
ldlm_completion_ast, ll_glimpse_callback, inode,
sizeof(*lvb), lustre_swab_ost_lvb, &lockh);
- if (rc > 0)
+ if (rc > 0) {
+ CERROR("obd_enqueue returned rc %d, returning -EIO\n", rc);
RETURN(-EIO);
+ }
lvb->lvb_size = lov_merge_size(lli->lli_smd, 0);
//inode->i_mtime = lov_merge_mtime(lli->lli_smd, inode->i_mtime);
CDEBUG(D_INFO, "trying to match res "LPU64"\n", res_id.name[0]);
+ /* FIXME use LDLM_FL_TEST_LOCK instead */
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING;
if (ldlm_lock_match(obddev->obd_namespace, flags, &res_id, LDLM_PLAIN,
NULL, LCK_PR, &lockh)) {
if (TryLockPage(page))
RETURN(-EAGAIN);
- LL_CDEBUG_PAGE(page, "made ready\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "made ready\n");
page_cache_get(page);
/* if we left PageDirty we might get another writepage call
oig_release(oig);
GOTO(out, rc);
}
- LL_CDEBUG_PAGE(page, "write queued\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "write queued\n");
//llap_write_pending(inode, llap);
} else {
lprocfs_counter_incr(ll_i2sbi(inode)->ll_stats,
return;
}
- LL_CDEBUG_PAGE(page, "being evicted\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");
exp = ll_i2obdexp(inode);
if (exp == NULL) {
&page_extent, LCK_PR | LCK_PW, &flags, inode,
&match_lockh);
if (matches < 0)
- LL_CDEBUG_PAGE(page, "lock match failed\n");
+ LL_CDEBUG_PAGE(D_ERROR, page, "lock match failed: rc %d\n",
+ matches);
RETURN(matches);
}
NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,
PAGE_SIZE, 0, ASYNC_COUNT_STABLE);
if (rc) {
- LL_CDEBUG_PAGE(page, "read queueing failed\n");
+ LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);
page_cache_release(page);
}
RETURN(rc);
rc = ll_issue_page_read(exp, llap, oig, 1);
if (rc == 0)
- LL_CDEBUG_PAGE(page, "started read-ahead\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "started read-ahead\n");
if (rc) {
next_page:
- LL_CDEBUG_PAGE(page, "skipping read-ahead\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "skipping read-ahead\n");
unlock_page(page);
}
ll_readahead(&fd->fd_ras, exp, page->mapping, oig);
obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL,
oig);
- LL_CDEBUG_PAGE(page, "marking uptodate from defer\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "marking uptodate from defer\n");
SetPageUptodate(page);
unlock_page(page);
GOTO(out_oig, rc = 0);
if (rc)
GOTO(out, rc);
- LL_CDEBUG_PAGE(page, "queued readpage\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "queued readpage\n");
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_READAHEAD))
ll_readahead(&fd->fd_ras, exp, page->mapping, oig);
if (IS_ERR(llap))
RETURN(PTR_ERR(llap));
- LL_CDEBUG_PAGE(page, "setting ready|urgent\n");
+ LL_CDEBUG_PAGE(D_PAGE, page, "setting ready|urgent\n");
rc = obd_set_async_flags(exp, ll_i2info(page->mapping->host)->lli_smd,
NULL, llap->llap_cookie,