- tmpex = lock->l_policy_data;
- CDEBUG(D_INODE|D_PAGE, "inode %lu(%p) ["LPU64"->"LPU64"] size: %llu\n",
- inode->i_ino, inode, tmpex.l_extent.start, tmpex.l_extent.end,
- i_size_read(inode));
-
- /* our locks are page granular thanks to osc_enqueue, we invalidate the
- * whole page. */
- if ((tmpex.l_extent.start & ~CFS_PAGE_MASK) != 0 ||
- ((tmpex.l_extent.end + 1) & ~CFS_PAGE_MASK) != 0)
- LDLM_ERROR(lock, "lock not aligned on PAGE_SIZE %lu",
- CFS_PAGE_SIZE);
- LASSERT((tmpex.l_extent.start & ~CFS_PAGE_MASK) == 0);
- LASSERT(((tmpex.l_extent.end + 1) & ~CFS_PAGE_MASK) == 0);
-
- count = ~0;
- skip = 0;
- start = tmpex.l_extent.start >> CFS_PAGE_SHIFT;
- end = tmpex.l_extent.end >> CFS_PAGE_SHIFT;
- if (lsm->lsm_stripe_count > 1) {
- count = lsm->lsm_stripe_size >> CFS_PAGE_SHIFT;
- skip = (lsm->lsm_stripe_count - 1) * count;
- start += start/count * skip + stripe * count;
- if (end != ~0)
- end += end/count * skip + stripe * count;
- }
- if (end < tmpex.l_extent.end >> CFS_PAGE_SHIFT)
- end = ~0;
-
- i = i_size_read(inode) ? (__u64)(i_size_read(inode) - 1) >>
- CFS_PAGE_SHIFT : 0;
- if (i < end)
- end = i;
-
- CDEBUG(D_INODE|D_PAGE, "walking page indices start: %lu j: %lu "
- "count: %lu skip: %lu end: %lu%s\n", start, start % count,
- count, skip, end, discard ? " (DISCARDING)" : "");
-
- /* walk through the vmas on the inode and tear down mmaped pages that
- * intersect with the lock. this stops immediately if there are no
- * mmap()ed regions of the file. This is not efficient at all and
- * should be short lived. We'll associate mmap()ed pages with the lock
- * and will be able to find them directly */
- for (i = start; i <= end; i += (j + skip)) {
- j = min(count - (i % count), end - i + 1);
- LASSERT(j > 0);
- LASSERT(mapping);
- if (ll_teardown_mmaps(mapping,
- (__u64)i << CFS_PAGE_SHIFT,
- ((__u64)(i+j) << CFS_PAGE_SHIFT) - 1) )
- break;
- }
-
- /* this is the simplistic implementation of page eviction at
- * cancelation. It is careful to get races with other page
- * lockers handled correctly. fixes from bug 20 will make it
- * more efficient by associating locks with pages and with
- * batching writeback under the lock explicitly. */
- for (i = start, j = start % count; i <= end;
- j++, i++, tmpex.l_extent.start += CFS_PAGE_SIZE) {
- if (j == count) {
- CDEBUG(D_PAGE, "skip index %lu to %lu\n", i, i + skip);
- i += skip;
- j = 0;
- if (i > end)
- break;
- }
- LASSERTF(tmpex.l_extent.start< lock->l_policy_data.l_extent.end,
- LPU64" >= "LPU64" start %lu i %lu end %lu\n",
- tmpex.l_extent.start, lock->l_policy_data.l_extent.end,
- start, i, end);