-/* These are our hacks to keep our directio/bh IO coherent with ext3's
- * page cache use. Most notably ext3 reads file data into the page
- * cache when it is zeroing the tail of partial-block truncates and
- * leaves it there, sometimes generating io from it at later truncates.
- * This removes the partial page and its buffers from the page cache,
- * so it should only ever cause a wait in rare cases, as otherwise we
- * always do full-page IO to the OST.
- *
- * The call to truncate_complete_page() will call journal_invalidatepage()
- * to free the buffers and drop the page from cache. The buffers should
- * not be dirty, because we already called fdatasync/fdatawait on them.
- */
-static int filter_sync_inode_data(struct inode *inode, int locked)
-{
- int rc = 0;
-
- /* This is nearly do_fsync(), without the waiting on the inode */
- /* XXX: in 2.6.16 (at least) we don't need to hold i_mutex over
- * filemap_fdatawrite() and filemap_fdatawait(), so we may no longer
- * need this lock here at all. */
- if (!locked)
- LOCK_INODE_MUTEX(inode);
- if (inode->i_mapping->nrpages) {
-#ifdef PF_SYNCWRITE
- current->flags |= PF_SYNCWRITE;
-#endif
- rc = filemap_fdatawrite(inode->i_mapping);
- if (rc == 0)
- rc = filemap_fdatawait(inode->i_mapping);
-#ifdef PF_SYNCWRITE
- current->flags &= ~PF_SYNCWRITE;
-#endif
- }
- if (!locked)
- UNLOCK_INODE_MUTEX(inode);
-
- return rc;
-}
-/* Clear pages from the mapping before we do direct IO to that offset.
- * Now that the only source of such pages in the truncate path flushes
- * these pages to disk and then discards them, this is error condition.
- * If add back read cache this will happen again. This could be disabled
- * until that time if we never see the below error. */
-static int filter_clear_page_cache(struct inode *inode,
- struct filter_iobuf *iobuf)
-{
- struct page *page;
- int i, rc;
-
- rc = filter_sync_inode_data(inode, 0);
- if (rc != 0)
- RETURN(rc);
-
- /* be careful to call this after fsync_inode_data_buffers has waited
- * for IO to complete before we evict it from the cache */
- for (i = 0; i < iobuf->dr_npages; i++) {
- page = find_lock_page(inode->i_mapping,
- iobuf->dr_pages[i]->index);
- if (page == NULL)
- continue;
- if (page->mapping != NULL) {
- CERROR("page %lu (%d/%d) in page cache during write!\n",
- page->index, i, iobuf->dr_npages);
- wait_on_page_writeback(page);
- ll_truncate_complete_page(page);
- }
-
- unlock_page(page);
- page_cache_release(page);
- }
-
- return 0;
-}
-
-int filter_clear_truncated_page(struct inode *inode)
-{
- struct page *page;
- int rc;
-
- /* Truncate on page boundary, so nothing to flush? */
- if (!(i_size_read(inode) & ~CFS_PAGE_MASK))
- return 0;
-
- rc = filter_sync_inode_data(inode, 1);
- if (rc != 0)
- RETURN(rc);
-
- /* be careful to call this after fsync_inode_data_buffers has waited
- * for IO to complete before we evict it from the cache */
- page = find_lock_page(inode->i_mapping,
- i_size_read(inode) >> CFS_PAGE_SHIFT);
- if (page) {
- if (page->mapping != NULL) {
- wait_on_page_writeback(page);
- ll_truncate_complete_page(page);
- }
- unlock_page(page);
- page_cache_release(page);
- }
-
- return 0;
-}
-