#include <linux/lustre_fsfilt.h>
#include "filter_internal.h"
-#warning "implement writeback mode -bzzz"
-
/* 512byte block min */
#define MAX_BLOCKS_PER_PAGE (PAGE_SIZE / 512)
struct dio_request {
struct dio_request *dreq = bio->bi_private;
unsigned long flags;
+ if (bio->bi_size) {
+ CWARN("gets called against non-complete bio 0x%p: %d/%d/%d\n",
+ bio, bio->bi_size, done, error);
+ return 1;
+ }
+
+ if (dreq == NULL) {
+ CERROR("***** bio->bi_private is NULL! This should never "
+ "happen. Normally, I would crash here, but instead I "
+ "will dump the bio contents to the console. Please "
+ "report this to CFS, along with any interesting messages "
+ "leading up to this point (like SCSI errors, perhaps). "
+ "Because bi_private is NULL, I can't wake up the thread "
+ "that initiated this I/O -- so you will probably have to "
+ "reboot this node.");
+ CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
+ "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
+ "bi_private: %p\n", bio->bi_next, bio->bi_flags,
+ bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
+ bio->bi_end_io, atomic_read(&bio->bi_cnt),
+ bio->bi_private);
+ return 0;
+ }
+
spin_lock_irqsave(&dreq->dr_lock, flags);
bio->bi_private = dreq->dr_bios;
dreq->dr_bios = bio;
rc = dreq->dr_error;
RETURN(rc);
}
-
-static void filter_clear_page_cache(struct inode *inode,
- struct dio_request *iobuf)
+
+/* These are our hacks to keep our directio/bh IO coherent with ext3's
+ * page cache use. Most notably ext3 reads file data into the page
+ * cache when it is zeroing the tail of partial-block truncates and
+ * leaves it there, sometimes generating io from it at later truncates.
+ * This removes the partial page and its buffers from the page cache,
+ * so it should only ever cause a wait in rare cases, as otherwise we
+ * always do full-page IO to the OST.
+ *
+ * The call to truncate_complete_page() will call journal_invalidatepage()
+ * to free the buffers and drop the page from cache. The buffers should
+ * not be dirty, because we already called fdatasync/fdatawait on them.
+ */
+static int filter_clear_page_cache(struct inode *inode,
+ struct dio_request *iobuf)
{
struct page *page;
- int i;
-
- for (i = 0; i < iobuf->dr_npages ; i++) {
+ int i, rc, rc2;
+
+ /* This is nearly generic_osync_inode, without the waiting on the inode
+ rc = generic_osync_inode(inode, inode->i_mapping,
+ OSYNC_DATA|OSYNC_METADATA);
+ */
+ rc = filemap_fdatawrite(inode->i_mapping);
+ rc2 = sync_mapping_buffers(inode->i_mapping);
+ if (rc == 0)
+ rc = rc2;
+ rc2 = filemap_fdatawait(inode->i_mapping);
+ if (rc == 0)
+ rc = rc2;
+ if (rc != 0)
+ RETURN(rc);
+
+ /* be careful to call this after fsync_inode_data_buffers has waited
+ * for IO to complete before we evict it from the cache */
+ for (i = 0; i < iobuf->dr_npages; i++) {
page = find_lock_page(inode->i_mapping,
- iobuf->dr_pages[i]->index);
+ iobuf->dr_pages[i]->index);
if (page == NULL)
- continue;
+ continue;
if (page->mapping != NULL) {
- block_invalidatepage(page, 0);
- wait_on_page_writeback(page);
- ll_truncate_complete_page(page);
+ wait_on_page_writeback(page);
+ ll_truncate_complete_page(page);
}
+
unlock_page(page);
page_cache_release(page);
}
+ return 0;
}
-
/* Must be called with i_sem taken for writes; this will drop it */
int filter_direct_io(int rw, struct dentry *dchild, void *iobuf,
struct obd_export *exp, struct iattr *attr,
rc2 = filter_finish_transno(exp, oti, 0);
if (rc2 != 0)
CERROR("can't close transaction: %d\n", rc);
+ rc = (rc == 0) ? rc2 : rc;
+
+ rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,wait_handle);
+ rc = (rc == 0) ? rc2 : rc;
- if (rc == 0)
- rc = rc2;
if (rc != 0)
RETURN(rc);
}
- /* This is nearly osync_inode, without the waiting
- rc = generic_osync_inode(inode, inode->i_mapping,
- OSYNC_DATA|OSYNC_METADATA); */
- rc = filemap_fdatawrite(inode->i_mapping);
- rc2 = sync_mapping_buffers(inode->i_mapping);
- if (rc == 0)
- rc = rc2;
- rc2 = filemap_fdatawait(inode->i_mapping);
- if (rc == 0)
- rc = rc2;
-
+ rc = filter_clear_page_cache(inode, dreq);
if (rc != 0)
RETURN(rc);
- /* be careful to call this after fsync_inode_data_buffers has waited
- * for IO to complete before we evict it from the cache */
- filter_clear_page_cache(inode, dreq);
-
RETURN(filter_do_bio(obd, inode, dreq, rw));
}
unsigned long now = jiffies;
int i, err, cleanup_phase = 0;
struct obd_device *obd = exp->exp_obd;
+ void *wait_handle = NULL;
int total_size = 0;
+ loff_t old_size;
ENTRY;
LASSERT(oti != NULL);
cleanup_phase = 2;
down(&inode->i_sem);
+ old_size = inode->i_size;
oti->oti_handle = fsfilt_brw_start(obd, objcount, &fso, niocount, res,
oti);
if (IS_ERR(oti->oti_handle)) {
iattr_from_obdo(&iattr,oa,OBD_MD_FLATIME|OBD_MD_FLMTIME|OBD_MD_FLCTIME);
/* filter_direct_io drops i_sem */
rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, dreq, exp, &iattr,
- oti, NULL);
+ oti, &wait_handle);
+
+#if 0
+ if (inode->i_size != old_size) {
+ struct llog_cookie *cookie = obdo_logcookie(oa);
+ struct lustre_id *id = obdo_id(oa);
+ filter_log_sz_change(obd, id, oa->o_easize, cookie, inode);
+ }
+#endif
+
if (rc == 0)
obdo_from_inode(oa, inode, FILTER_VALID_FLAGS);
fsfilt_check_slow(now, obd_timeout, "direct_io");
- err = fsfilt_commit(obd, obd->u.filter.fo_sb, inode, oti->oti_handle,
- obd_sync_filter);
- if (err)
+ err = fsfilt_commit_wait(obd, inode, wait_handle);
+ if (rc == 0)
rc = err;
-
- if (obd_sync_filter && !err)
- LASSERT(oti->oti_transno <= obd->obd_last_committed);
-
+
fsfilt_check_slow(now, obd_timeout, "commitrw commit");
cleanup: