* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
cfs_waitq_signal(&iobuf->dr_wait);
}
+#ifdef HAVE_BIO_ENDIO_2ARG
+#define DIO_RETURN(a)
+static void dio_complete_routine(struct bio *bio, int error)
+#else
+#define DIO_RETURN(a) return(a)
static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
+#endif
{
struct filter_iobuf *iobuf = bio->bi_private;
struct bio_vec *bvl;
int i;
- if (bio->bi_rw == WRITE &&
- unlikely(test_and_clear_bit(BIO_RDONLY, &bio->bi_flags))) {
- struct block_device *bdev = bio->bi_bdev;
-
- CWARN("Write to readonly device %s (%#x) bi_flags: %lx, "
- "bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_cnt: %d, "
- "bi_private: %p, done: %u, error: %d\n",
- bdev->bd_disk ? bdev->bd_disk->disk_name : "",
- bdev->bd_dev, bio->bi_flags, bio->bi_vcnt, bio->bi_idx,
- bio->bi_size, atomic_read(&bio->bi_cnt), bio->bi_private,
- done, error);
- }
-
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
- if (bio->bi_size) /* Not complete */
- return 1;
+#ifdef HAVE_BIO_ENDIO_2ARG
+ /* The "bi_size" check was needed for kernels < 2.6.24 in order to
+ * handle the case where a SCSI request error caused this callback
+ * to be called before all of the biovecs had been processed.
+ * Without this check the server thread will hang. In newer kernels
+ * the bio_end_io routine is never called for partial completions,
+ * so this check is no longer needed. */
+ if (bio->bi_size) /* Not complete */
+ DIO_RETURN(1);
+#endif
if (unlikely(iobuf == NULL)) {
CERROR("***** bio->bi_private is NULL! This should never "
bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
bio->bi_private);
- return 0;
+ DIO_RETURN(0);
}
/* the check is outside of the cycle for performance reason -bzzz */
* deadlocking the OST. The bios are now released as soon as complete
* so the pool cannot be exhausted while IOs are competing. bug 10076 */
bio_put(bio);
- return 0;
+ DIO_RETURN(0);
}
static int can_be_merged(struct bio *bio, sector_t sector)
continue; /* added this frag OK */
if (bio != NULL) {
- request_queue_t *q =
+ struct request_queue *q =
bdev_get_queue(bio->bi_bdev);
/* Dang! I have to fragment this I/O */
CDEBUG(D_INODE, "bio++ sz %d vcnt %d(%d) "
- "sectors %d(%d) psg %d(%d) hsg %d(%d)\n",
+ "sectors %d(%d) psg %d(%d) hsg %d(%d) "
+ "sector %llu next %llu\n",
bio->bi_size,
bio->bi_vcnt, bio->bi_max_vecs,
- bio->bi_size >> 9, q->max_sectors,
+ bio->bi_size >> 9, queue_max_sectors(q),
bio_phys_segments(q, bio),
- q->max_phys_segments,
+ queue_max_phys_segments(q),
bio_hw_segments(q, bio),
- q->max_hw_segments);
+ queue_max_hw_segments(q),
+ (unsigned long long)bio->bi_sector,
+ (unsigned long long)sector);
record_start_io(iobuf, rw, bio->bi_size, exp);
rc = fsfilt_send_bio(rw, obd, inode, bio);
rc = rc2;
}
- rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,
- wait_handle);
+ if (wait_handle)
+ rc2 = fsfilt_commit_async(obd,inode,oti->oti_handle,
+ wait_handle);
+ else
+ rc2 = fsfilt_commit(obd, inode, oti->oti_handle, 0);
if (rc == 0)
rc = rc2;
if (rc != 0)
int i, err, cleanup_phase = 0;
struct obd_device *obd = exp->exp_obd;
struct filter_obd *fo = &obd->u.filter;
- void *wait_handle;
+ void *wait_handle = NULL;
int total_size = 0;
unsigned int qcids[MAXQUOTAS] = { oa->o_uid, oa->o_gid };
int rec_pending[MAXQUOTAS] = { 0, 0 }, quota_pages = 0;
+ int sync_journal_commit = obd->u.filter.fo_syncjournal;
ENTRY;
LASSERT(oti != NULL);
LASSERT(PageLocked(lnb->page));
LASSERT(!PageWriteback(lnb->page));
- /* preceding filemap_write_and_wait() should have clean pages */
- if (fo->fo_writethrough_cache)
- clear_page_dirty_for_io(lnb->page);
+ /* since write & truncate are serialized by the i_alloc_sem,
+ * even partial truncate should not leave dirty pages in
+ * the page cache */
LASSERT(!PageDirty(lnb->page));
SetPageUptodate(lnb->page);
(flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
OBD_BRW_FROM_GRANT)
iobuf->dr_ignore_quota = 1;
+
+ if (!(lnb->flags & OBD_BRW_ASYNC)) {
+ sync_journal_commit = 1;
+ }
}
/* we try to get enough quota to write here, and let ldiskfs
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
cleanup_phase = 2;
- DQUOT_INIT(inode);
+ ll_vfs_dq_init(inode);
+ fsfilt_check_slow(obd, now, "quota init");
LOCK_INODE_MUTEX(inode);
fsfilt_check_slow(obd, now, "i_mutex");
/* filter_direct_io drops i_mutex */
rc = filter_direct_io(OBD_BRW_WRITE, res->dentry, iobuf, exp, &iattr,
- oti, &wait_handle);
- if (rc == 0)
- obdo_from_inode(oa, inode,
- FILTER_VALID_FLAGS |OBD_MD_FLUID |OBD_MD_FLGID);
- else
- obdo_from_inode(oa, inode, OBD_MD_FLUID | OBD_MD_FLGID);
+ oti, sync_journal_commit ? &wait_handle : NULL);
+
+ obdo_from_inode(oa, inode, NULL, rc == 0 ? FILTER_VALID_FLAGS : 0 |
+ OBD_MD_FLUID |OBD_MD_FLGID);
lquota_getflag(filter_quota_interface_ref, obd, oa);
fsfilt_check_slow(obd, now, "direct_io");
- err = fsfilt_commit_wait(obd, inode, wait_handle);
+ if (wait_handle)
+ err = fsfilt_commit_wait(obd, inode, wait_handle);
+ else
+ err = 0;
+
if (err) {
CERROR("Failure to commit OST transaction (%d)?\n", err);
- rc = err;
+ if (rc == 0)
+ rc = err;
}
- if (obd->obd_replayable && !rc)
+ if (obd->obd_replayable && !rc && wait_handle)
LASSERTF(oti->oti_transno <= obd->obd_last_committed,
"oti_transno "LPU64" last_committed "LPU64"\n",
oti->oti_transno, obd->obd_last_committed);