LASSERTF(iobuf->dr_elapsed_valid == 0,
"iobuf %p, reqs %d, rw %d, line %d\n", iobuf,
- cfs_atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
+ atomic_read(&iobuf->dr_numreqs), iobuf->dr_rw,
iobuf->dr_init_at);
LASSERT(pages <= PTLRPC_MAX_BRW_PAGES);
init_waitqueue_head(&iobuf->dr_wait);
- cfs_atomic_set(&iobuf->dr_numreqs, 0);
+ atomic_set(&iobuf->dr_numreqs, 0);
iobuf->dr_npages = 0;
iobuf->dr_error = 0;
iobuf->dr_dev = d;
#define __REQ_WRITE BIO_RW
#endif
-#ifdef HAVE_BIO_ENDIO_2ARG
-#define DIO_RETURN(a)
static void dio_complete_routine(struct bio *bio, int error)
-#else
-#define DIO_RETURN(a) return(a)
-static int dio_complete_routine(struct bio *bio, unsigned int done, int error)
-#endif
{
struct osd_iobuf *iobuf = bio->bi_private;
struct bio_vec *bvl;
/* CAVEAT EMPTOR: possibly in IRQ context
* DO NOT record procfs stats here!!! */
- if (unlikely(iobuf == NULL)) {
- CERROR("***** bio->bi_private is NULL! This should never "
- "happen. Normally, I would crash here, but instead I "
- "will dump the bio contents to the console. Please "
- "report this to <http://jira.whamcloud.com/> , along "
- "with any interesting messages leading up to this point "
- "(like SCSI errors, perhaps). Because bi_private is "
- "NULL, I can't wake up the thread that initiated this "
- "IO - you will probably have to reboot this node.\n");
- CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
- "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
- "bi_private: %p\n", bio->bi_next, bio->bi_flags,
- bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
- bio->bi_end_io, cfs_atomic_read(&bio->bi_cnt),
- bio->bi_private);
- DIO_RETURN(0);
- }
+ if (unlikely(iobuf == NULL)) {
+ CERROR("***** bio->bi_private is NULL! This should never "
+ "happen. Normally, I would crash here, but instead I "
+ "will dump the bio contents to the console. Please "
+ "report this to <http://jira.whamcloud.com/> , along "
+ "with any interesting messages leading up to this point "
+ "(like SCSI errors, perhaps). Because bi_private is "
+ "NULL, I can't wake up the thread that initiated this "
+ "IO - you will probably have to reboot this node.\n");
+ CERROR("bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d, "
+ "bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, "
+ "bi_private: %p\n", bio->bi_next, bio->bi_flags,
+ bio->bi_rw, bio->bi_vcnt, bio->bi_idx, bio->bi_size,
+ bio->bi_end_io, atomic_read(&bio->bi_cnt),
+ bio->bi_private);
+ return;
+ }
/* the check is outside of the cycle for performance reason -bzzz */
if (!test_bit(__REQ_WRITE, &bio->bi_rw)) {
SetPageUptodate(bvl->bv_page);
LASSERT(PageLocked(bvl->bv_page));
}
- cfs_atomic_dec(&iobuf->dr_dev->od_r_in_flight);
+ atomic_dec(&iobuf->dr_dev->od_r_in_flight);
} else {
- cfs_atomic_dec(&iobuf->dr_dev->od_w_in_flight);
+ atomic_dec(&iobuf->dr_dev->od_w_in_flight);
}
- /* any real error is good enough -bzzz */
- if (error != 0 && iobuf->dr_error == 0)
- iobuf->dr_error = error;
+ /* any real error is good enough -bzzz */
+ if (error != 0 && iobuf->dr_error == 0)
+ iobuf->dr_error = error;
/*
* set dr_elapsed before dr_numreqs turns to 0, otherwise
* data in this processing and an assertion in a subsequent
* call to OSD.
*/
- if (cfs_atomic_read(&iobuf->dr_numreqs) == 1) {
+ if (atomic_read(&iobuf->dr_numreqs) == 1) {
iobuf->dr_elapsed = jiffies - iobuf->dr_start_time;
iobuf->dr_elapsed_valid = 1;
}
- if (cfs_atomic_dec_and_test(&iobuf->dr_numreqs))
+ if (atomic_dec_and_test(&iobuf->dr_numreqs))
wake_up(&iobuf->dr_wait);
- /* Completed bios used to be chained off iobuf->dr_bios and freed in
- * filter_clear_dreq(). It was then possible to exhaust the biovec-256
- * mempool when serious on-disk fragmentation was encountered,
- * deadlocking the OST. The bios are now released as soon as complete
- * so the pool cannot be exhausted while IOs are competing. bug 10076 */
- bio_put(bio);
- DIO_RETURN(0);
+ /* Completed bios used to be chained off iobuf->dr_bios and freed in
+ * filter_clear_dreq(). It was then possible to exhaust the biovec-256
+ * mempool when serious on-disk fragmentation was encountered,
+ * deadlocking the OST. The bios are now released as soon as complete
+ * so the pool cannot be exhausted while IOs are competing. bug 10076 */
+ bio_put(bio);
}
static void record_start_io(struct osd_iobuf *iobuf, int size)
{
- struct osd_device *osd = iobuf->dr_dev;
- struct obd_histogram *h = osd->od_brw_stats.hist;
-
- iobuf->dr_frags++;
- cfs_atomic_inc(&iobuf->dr_numreqs);
-
- if (iobuf->dr_rw == 0) {
- cfs_atomic_inc(&osd->od_r_in_flight);
- lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
- cfs_atomic_read(&osd->od_r_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
- } else if (iobuf->dr_rw == 1) {
- cfs_atomic_inc(&osd->od_w_in_flight);
- lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
- cfs_atomic_read(&osd->od_w_in_flight));
- lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
- } else {
- LBUG();
- }
+ struct osd_device *osd = iobuf->dr_dev;
+ struct obd_histogram *h = osd->od_brw_stats.hist;
+
+ iobuf->dr_frags++;
+ atomic_inc(&iobuf->dr_numreqs);
+
+ if (iobuf->dr_rw == 0) {
+ atomic_inc(&osd->od_r_in_flight);
+ lprocfs_oh_tally(&h[BRW_R_RPC_HIST],
+ atomic_read(&osd->od_r_in_flight));
+ lprocfs_oh_tally_log2(&h[BRW_R_DISK_IOSIZE], size);
+ } else if (iobuf->dr_rw == 1) {
+ atomic_inc(&osd->od_w_in_flight);
+ lprocfs_oh_tally(&h[BRW_W_RPC_HIST],
+ atomic_read(&osd->od_w_in_flight));
+ lprocfs_oh_tally_log2(&h[BRW_W_DISK_IOSIZE], size);
+ } else {
+ LBUG();
+ }
}
static void osd_submit_bio(int rw, struct bio *bio)
rc = 0;
}
- out:
- /* in order to achieve better IO throughput, we don't wait for writes
- * completion here. instead we proceed with transaction commit in
- * parallel and wait for IO completion once transaction is stopped
- * see osd_trans_stop() for more details -bzzz */
- if (iobuf->dr_rw == 0) {
+out:
+ /* in order to achieve better IO throughput, we don't wait for writes
+ * completion here. instead we proceed with transaction commit in
+ * parallel and wait for IO completion once transaction is stopped
+ * see osd_trans_stop() for more details -bzzz */
+ if (iobuf->dr_rw == 0) {
wait_event(iobuf->dr_wait,
- cfs_atomic_read(&iobuf->dr_numreqs) == 0);
- }
+ atomic_read(&iobuf->dr_numreqs) == 0);
+ }
- if (rc == 0)
- rc = iobuf->dr_error;
- RETURN(rc);
+ if (rc == 0)
+ rc = iobuf->dr_error;
+ RETURN(rc);
}
static int osd_map_remote_to_local(loff_t offset, ssize_t len, int *nrpages,
return rc;
}
+static inline int osd_extents_enabled(struct super_block *sb,
+ struct inode *inode)
+{
+ if (inode != NULL) {
+ if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL)
+ return 1;
+ } else if (test_opt(sb, EXTENTS)) {
+ return 1;
+ }
+ return 0;
+}
+
+static inline int osd_calc_bkmap_credits(struct super_block *sb,
+ struct inode *inode,
+ const loff_t size,
+ const loff_t pos,
+ const int blocks)
+{
+ int credits, bits, bs, i;
+
+ bits = sb->s_blocksize_bits;
+ bs = 1 << bits;
+
+ /* legacy blockmap: 3 levels * 3 (bitmap,gd,itself)
+ * we do not expect blockmaps on the large files,
+ * so let's shrink it to 2 levels (4GB files) */
+
+ /* this is default reservation: 2 levels */
+ credits = (blocks + 2) * 3;
+
+ /* actual offset is unknown, hard to optimize */
+ if (pos == -1)
+ return credits;
+
+ /* now check for few specific cases to optimize */
+ if (pos + size <= LDISKFS_NDIR_BLOCKS * bs) {
+ /* no indirects */
+ credits = blocks;
+ /* allocate if not allocated */
+ if (inode == NULL) {
+ credits += blocks * 2;
+ return credits;
+ }
+ for (i = (pos >> bits); i < (pos >> bits) + blocks; i++) {
+ LASSERT(i < LDISKFS_NDIR_BLOCKS);
+ if (LDISKFS_I(inode)->i_data[i] == 0)
+ credits += 2;
+ }
+ } else if (pos + size <= (LDISKFS_NDIR_BLOCKS + 1024) * bs) {
+ /* single indirect */
+ credits = blocks * 3;
+ /* probably indirect block has been allocated already */
+ if (!inode || LDISKFS_I(inode)->i_data[LDISKFS_IND_BLOCK])
+ credits += 3;
+ }
+
+ return credits;
+}
+
static ssize_t osd_declare_write(const struct lu_env *env, struct dt_object *dt,
- const loff_t size, loff_t pos,
- struct thandle *handle)
+ const struct lu_buf *buf, loff_t _pos,
+ struct thandle *handle)
{
- struct osd_thandle *oh;
- int credits;
- struct inode *inode;
- int rc;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct super_block *sb = osd_sb(osd_obj2dev(obj));
+ struct osd_thandle *oh;
+ int rc = 0, est = 0, credits, blocks, allocated = 0;
+ int bits, bs;
+ int depth, size;
+ loff_t pos;
ENTRY;
+ LASSERT(buf != NULL);
LASSERT(handle != NULL);
oh = container_of0(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
- credits = osd_dto_credits_noquota[DTO_WRITE_BLOCK];
+ size = buf->lb_len;
+ bits = sb->s_blocksize_bits;
+ bs = 1 << bits;
- osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
+ if (_pos == -1) {
+ /* if this is an append, then we
+ * should expect cross-block record */
+ pos = 0;
+ } else {
+ pos = _pos;
+ }
- inode = osd_dt_obj(dt)->oo_inode;
+ /* blocks to modify */
+ blocks = ((pos + size + bs - 1) >> bits) - (pos >> bits);
+ LASSERT(blocks > 0);
+
+ if (inode != NULL && _pos != -1) {
+ /* object size in blocks */
+ est = (i_size_read(inode) + bs - 1) >> bits;
+ allocated = inode->i_blocks >> (bits - 9);
+ if (pos + size <= i_size_read(inode) && est <= allocated) {
+ /* looks like an overwrite, no need to modify tree */
+ credits = blocks;
+ /* no need to modify i_size */
+ goto out;
+ }
+ }
+
+ if (osd_extents_enabled(sb, inode)) {
+ /*
+ * many concurrent threads may grow tree by the time
+ * our transaction starts. so, consider 2 is a min depth
+ * for every level we may need to allocate a new block
+ * and take some entries from the old one. so, 3 blocks
+ * to allocate (bitmap, gd, itself) + old block - 4 per
+ * level.
+ */
+ depth = inode != NULL ? ext_depth(inode) : 0;
+ depth = max(depth, 1) + 1;
+ credits = depth;
+ /* if not append, then split may need to modify
+ * existing blocks moving entries into the new ones */
+ if (_pos == -1)
+ credits += depth;
+ /* blocks to store data: bitmap,gd,itself */
+ credits += blocks * 3;
+ } else {
+ credits = osd_calc_bkmap_credits(sb, inode, size, _pos, blocks);
+ }
+ /* if inode is created as part of the transaction,
+ * then it's counted already by the creation method */
+ if (inode != NULL)
+ credits++;
- /* we may declare write to non-exist llog */
- if (inode == NULL)
- RETURN(0);
+out:
+
+ osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
/* dt_declare_write() is usually called for system objects, such
* as llog or last_rcvd files. We needn't enforce quota on those
* objects, so always set the lqi_space as 0. */
- rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid, 0, oh,
- true, true, NULL, false);
+ if (inode != NULL)
+ rc = osd_declare_inode_qid(env, inode->i_uid, inode->i_gid,
+ 0, oh, true, true, NULL, false);
RETURN(rc);
}
}
static int osd_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th,
+ struct lustre_capa *capa)
{
- struct osd_thandle *oh;
- struct osd_object *obj = osd_dt_obj(dt);
- struct inode *inode = obj->oo_inode;
- handle_t *h;
- tid_t tid;
- loff_t oldsize;
+ struct osd_thandle *oh;
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ handle_t *h;
+ tid_t tid;
int rc = 0, rc2 = 0;
- ENTRY;
+ ENTRY;
- LASSERT(end == OBD_OBJECT_EOF);
- LASSERT(dt_object_exists(dt));
- LASSERT(osd_invariant(obj));
+ LASSERT(end == OBD_OBJECT_EOF);
+ LASSERT(dt_object_exists(dt));
+ LASSERT(osd_invariant(obj));
LASSERT(inode != NULL);
ll_vfs_dq_init(inode);
- LASSERT(th);
- oh = container_of(th, struct osd_thandle, ot_super);
- LASSERT(oh->ot_handle->h_transaction != NULL);
+ LASSERT(th);
+ oh = container_of(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle->h_transaction != NULL);
osd_trans_exec_op(env, th, OSD_OT_PUNCH);
- tid = oh->ot_handle->h_transaction->t_tid;
+ tid = oh->ot_handle->h_transaction->t_tid;
- oldsize=inode->i_size;
i_size_write(inode, start);
- truncate_pagecache(inode, oldsize, start);
- if (inode->i_op->truncate)
+ ll_truncate_pagecache(inode, start);
+#ifdef HAVE_INODEOPS_TRUNCATE
+ if (inode->i_op->truncate) {
inode->i_op->truncate(inode);
+ } else
+#endif
+ ldiskfs_truncate(inode);
- /*
- * For a partial-page truncate, flush the page to disk immediately to
- * avoid data corruption during direct disk write. b=17397
- */
+ /*
+ * For a partial-page truncate, flush the page to disk immediately to
+ * avoid data corruption during direct disk write. b=17397
+ */
if ((start & ~CFS_PAGE_MASK) != 0)
rc = filemap_fdatawrite_range(inode->i_mapping, start, start+1);