*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/osd/osd_io.c
*
/* prerequisite for linux/xattr.h */
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/pagevec.h>
/*
/* ext_depth() */
#include <ldiskfs/ldiskfs_extents.h>
+#include <ldiskfs/ldiskfs.h>
static inline bool osd_use_page_cache(struct osd_device *d)
{
iobuf->dr_elapsed_valid = 0;
LASSERT(iobuf->dr_dev == d);
LASSERT(iobuf->dr_frags > 0);
- lprocfs_oh_tally(&d->od_brw_stats.hist[BRW_R_DIO_FRAGS+rw],
+ lprocfs_oh_tally(&d->od_brw_stats.bs_hist[BRW_R_DIO_FRAGS + rw],
iobuf->dr_frags);
- lprocfs_oh_tally_log2(&d->od_brw_stats.hist[BRW_R_IO_TIME+rw],
+ lprocfs_oh_tally_log2(&d->od_brw_stats.bs_hist[BRW_R_IO_TIME+rw],
ktime_to_ms(iobuf->dr_elapsed));
}
}
#ifdef HAVE_BIO_ENDIO_USES_ONE_ARG
static void dio_complete_routine(struct bio *bio)
{
- int error = bio->bi_status;
+ int error = blk_status_to_errno(bio->bi_status);
#else
static void dio_complete_routine(struct bio *bio, int error)
{
*/
if (unlikely(iobuf == NULL)) {
- CERROR("***** bio->bi_private is NULL! This should never happen. Normally, I would crash here, but instead I will dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/> , along with any interesting messages leading up to this point (like SCSI errors, perhaps). Because bi_private is NULL, I can't wake up the thread that initiated this IO - you will probably have to reboot this node.\n");
+ CERROR("***** bio->bi_private is NULL! Dump the bio contents to the console. Please report this to <https://jira.whamcloud.com/>, and probably have to reboot this node.\n");
CERROR("bi_next: %p, bi_flags: %lx, " __stringify(bi_opf)
": %x, bi_vcnt: %d, bi_idx: %d, bi->size: %d, bi_end_io: %p, bi_cnt: %d, bi_private: %p\n",
bio->bi_next, (unsigned long)bio->bi_flags,
static void record_start_io(struct osd_iobuf *iobuf, int size)
{
- struct osd_device *osd = iobuf->dr_dev;
- struct obd_histogram *h = osd->od_brw_stats.hist;
+ struct osd_device *osd = iobuf->dr_dev;
+ struct obd_histogram *h = osd->od_brw_stats.bs_hist;
iobuf->dr_frags++;
atomic_inc(&iobuf->dr_numreqs);
{
struct blk_integrity *bi = bdev_get_integrity(bdev);
struct bio_integrity_payload *bip = bio->bi_integrity;
- struct niobuf_local *lnb;
+ struct niobuf_local *lnb = NULL;
unsigned short sector_size = blk_integrity_interval(bi);
void *bio_prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
struct bio_vec *bv;
sector_t sector = bio_start_sector(bio);
- unsigned int sectors, total;
+ unsigned int i, sectors, total;
DECLARE_BVEC_ITER_ALL(iter_all);
__u16 *expected_guard;
int rc;
total = 0;
bio_for_each_segment_all(bv, bio, iter_all) {
- lnb = iobuf->dr_lnbs[index];
+ for (i = index; i < iobuf->dr_npages; i++) {
+ if (iobuf->dr_pages[i] == bv->bv_page) {
+ lnb = iobuf->dr_lnbs[i];
+ break;
+ }
+ }
+ if (!lnb)
+ continue;
expected_guard = lnb->lnb_guards;
sectors = bv->bv_len / sector_size;
if (lnb->lnb_guard_rpc) {
total += sectors * bi->tuple_size;
LASSERT(total <= bip_size(bio->bi_integrity));
index++;
+ lnb = NULL;
}
return 0;
}
RETURN(0);
}
+static void osd_mark_page_io_done(struct osd_iobuf *iobuf,
+ struct inode *inode,
+ sector_t start_blocks,
+ sector_t count)
+{
+ struct niobuf_local *lnb;
+ int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
+ pgoff_t pg_start, pg_end;
+
+ pg_start = start_blocks / blocks_per_page;
+ if (start_blocks % blocks_per_page)
+ pg_start++;
+ if (count >= blocks_per_page)
+ pg_end = (start_blocks + count -
+ blocks_per_page) / blocks_per_page;
+ else
+ return; /* nothing to mark */
+ for ( ; pg_start <= pg_end; pg_start++) {
+ lnb = iobuf->dr_lnbs[pg_start];
+ lnb->lnb_flags |= OBD_BRW_DONE;
+ }
+}
+
static int osd_do_bio(struct osd_device *osd, struct inode *inode,
- struct osd_iobuf *iobuf)
+ struct osd_iobuf *iobuf, sector_t start_blocks,
+ sector_t count)
{
int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
struct page **pages = iobuf->dr_pages;
int npages = iobuf->dr_npages;
sector_t *blocks = iobuf->dr_blocks;
- int total_blocks = npages * blocks_per_page;
struct super_block *sb = inode->i_sb;
int sector_bits = sb->s_blocksize_bits - 9;
unsigned int blocksize = sb->s_blocksize;
unsigned int page_offset;
sector_t sector;
int nblocks;
- int block_idx;
- int page_idx;
+ int block_idx, block_idx_end;
+ int page_idx, page_idx_start;
int i;
int rc = 0;
bool fault_inject;
bool integrity_enabled;
struct blk_plug plug;
+ int blocks_left_page;
+
ENTRY;
fault_inject = OBD_FAIL_CHECK(OBD_FAIL_OST_INTEGRITY_FAULT);
osd_brw_stats_update(osd, iobuf);
iobuf->dr_start_time = ktime_get();
+ if (!count)
+ count = npages * blocks_per_page;
+ block_idx_end = start_blocks + count;
+
blk_start_plug(&plug);
- for (page_idx = 0, block_idx = 0;
- page_idx < npages;
- page_idx++, block_idx += blocks_per_page) {
- page = pages[page_idx];
- LASSERT(block_idx + blocks_per_page <= total_blocks);
- for (i = 0, page_offset = 0;
- i < blocks_per_page;
+ page_idx_start = start_blocks / blocks_per_page;
+ for (page_idx = page_idx_start, block_idx = start_blocks;
+ block_idx < block_idx_end; page_idx++,
+ block_idx += blocks_left_page) {
+ /* For cases where the filesystems blocksize is not the
+ * same as PAGE_SIZE (e.g. ARM with PAGE_SIZE=64KB and
+ * blocksize=4KB), there will be multiple blocks to
+ * read/write per page. Also, the start and end block may
+ * not be aligned to the start and end of the page, so the
+ * first page may skip some blocks at the start ("i != 0",
+ * "blocks_left_page" is reduced), and the last page may
+ * skip some blocks at the end (limited by "count").
+ */
+ page = pages[page_idx];
+ LASSERT(page_idx < iobuf->dr_npages);
+
+ i = block_idx % blocks_per_page;
+ blocks_left_page = blocks_per_page - i;
+ if (block_idx + blocks_left_page > block_idx_end)
+ blocks_left_page = block_idx_end - block_idx;
+ page_offset = i * blocksize;
+ for (i = 0; i < blocks_left_page;
i += nblocks, page_offset += blocksize * nblocks) {
nblocks = 1;
if (blocks[block_idx + i] == 0) { /* hole */
LASSERTF(iobuf->dr_rw == 0,
- "page_idx %u, block_idx %u, i %u\n",
- page_idx, block_idx, i);
+ "page_idx %u, block_idx %u, i %u,"
+ "start_blocks: %llu, count: %llu, npages: %d\n",
+ page_idx, block_idx, i,
+ (unsigned long long)start_blocks,
+ (unsigned long long)count, npages);
memset(kmap(page) + page_offset, 0, blocksize);
kunmap(page);
continue;
sector = (sector_t)blocks[block_idx + i] << sector_bits;
/* Additional contiguous file blocks? */
- while (i + nblocks < blocks_per_page &&
+ while (i + nblocks < blocks_left_page &&
(sector + (nblocks << sector_bits)) ==
((sector_t)blocks[block_idx + i + nblocks] <<
sector_bits))
bio_start_page_idx = page_idx;
/* allocate new bio */
bio = bio_alloc(GFP_NOIO, min(BIO_MAX_PAGES,
- (npages - page_idx) *
- blocks_per_page));
+ (block_idx_end - block_idx +
+ blocks_left_page - 1)));
if (bio == NULL) {
- CERROR("Can't allocate bio %u*%u = %u pages\n",
- (npages - page_idx), blocks_per_page,
- (npages - page_idx) * blocks_per_page);
+ CERROR("Can't allocate bio %u pages\n",
+ block_idx_end - block_idx +
+ blocks_left_page - 1);
rc = -ENOMEM;
goto out;
}
OBD_FREE_PTR(bio_private);
}
+ /* Write only now */
+ if (rc == 0 && iobuf->dr_rw)
+ osd_mark_page_io_done(iobuf, inode,
+ start_blocks, count);
+
RETURN(rc);
}
GOTO(cleanup, rc = -ENOMEM);
lnb->lnb_locked = 1;
+ if (cache)
+ mark_page_accessed(lnb->lnb_page);
}
#if 0
osd_bufs_put(env, dt, lnb - i, i);
return rc;
}
+/* Borrow @ext4_chunk_trans_blocks */
+static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
+{
+ ldiskfs_group_t groups;
+ int gdpblocks;
+ int idxblocks;
+ int depth;
+ int ret;
+
+ depth = ext_depth(inode);
+ idxblocks = depth * 2;
+
+ /*
+ * Now let's see how many group bitmaps and group descriptors need
+ * to account.
+ */
+ groups = idxblocks + 1;
+ gdpblocks = groups;
+ if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
+ groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
+ if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
+ gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
+
+ /* bitmaps and block group descriptor blocks */
+ ret = idxblocks + groups + gdpblocks;
+
+ /* Blocks for super block, inode, quota and xattr blocks */
+ ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
+
+ return ret;
+}
+
+#ifdef HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS
+static int osd_extend_restart_trans(handle_t *handle, int needed,
+ struct inode *inode)
+{
+ int rc;
+
+ rc = ldiskfs_journal_ensure_credits(handle, needed,
+ ldiskfs_trans_default_revoke_credits(inode->i_sb));
+ /* this means journal has been restarted */
+ if (rc > 0)
+ rc = 0;
+
+ return rc;
+}
+#else
+static int osd_extend_restart_trans(handle_t *handle, int needed,
+ struct inode *inode)
+{
+ int rc;
+
+ if (ldiskfs_handle_has_enough_credits(handle, needed))
+ return 0;
+ rc = ldiskfs_journal_extend(handle,
+ needed - handle->h_buffer_credits);
+ if (rc <= 0)
+ return rc;
+
+ return ldiskfs_journal_restart(handle, needed);
+}
+#endif /* HAVE_LDISKFS_JOURNAL_ENSURE_CREDITS */
+
+static int osd_ldiskfs_map_write(struct inode *inode, struct osd_iobuf *iobuf,
+ struct osd_device *osd, sector_t start_blocks,
+ sector_t count, loff_t *disk_size,
+ __u64 user_size)
+{
+ /* if file has grown, take user_size into account */
+ if (user_size && *disk_size > user_size)
+ *disk_size = user_size;
+
+ spin_lock(&inode->i_lock);
+ if (*disk_size > i_size_read(inode)) {
+ i_size_write(inode, *disk_size);
+ LDISKFS_I(inode)->i_disksize = *disk_size;
+ spin_unlock(&inode->i_lock);
+ osd_dirty_inode(inode, I_DIRTY_DATASYNC);
+ } else {
+ spin_unlock(&inode->i_lock);
+ }
+
+ /*
+ * We don't do stats here as in read path because
+ * write is async: we'll do this in osd_put_bufs()
+ */
+ return osd_do_bio(osd, inode, iobuf, start_blocks, count);
+}
-static int osd_ldiskfs_map_inode_pages(struct inode *inode, struct page **page,
- int pages, sector_t *blocks,
- int create)
+static unsigned int osd_extent_bytes(const struct osd_device *o)
+{
+ unsigned int *extent_bytes_ptr =
+ raw_cpu_ptr(o->od_extent_bytes_percpu);
+
+ if (likely(*extent_bytes_ptr))
+ return *extent_bytes_ptr;
+
+ /* initialize on first access or CPU hotplug */
+ if (!ldiskfs_has_feature_extents(osd_sb(o)))
+ *extent_bytes_ptr = 1 << osd_sb(o)->s_blocksize_bits;
+ else
+ *extent_bytes_ptr = OSD_DEFAULT_EXTENT_BYTES;
+
+ return *extent_bytes_ptr;
+}
+
+#define EXTENT_BYTES_DECAY 64
+static void osd_decay_extent_bytes(struct osd_device *osd,
+ unsigned int new_bytes)
+{
+ unsigned int old_bytes;
+
+ if (!ldiskfs_has_feature_extents(osd_sb(osd)))
+ return;
+
+ old_bytes = osd_extent_bytes(osd);
+ *raw_cpu_ptr(osd->od_extent_bytes_percpu) =
+ (old_bytes * (EXTENT_BYTES_DECAY - 1) +
+ min(new_bytes, OSD_DEFAULT_EXTENT_BYTES) +
+ EXTENT_BYTES_DECAY - 1) / EXTENT_BYTES_DECAY;
+}
+
+static int osd_ldiskfs_map_inode_pages(struct inode *inode,
+ struct osd_iobuf *iobuf,
+ struct osd_device *osd,
+ int create, __u64 user_size,
+ int check_credits,
+ struct thandle *thandle)
{
int blocks_per_page = PAGE_SIZE >> inode->i_blkbits;
- int rc = 0, i = 0;
+ int rc = 0, i = 0, mapped_index = 0;
struct page *fp = NULL;
int clen = 0;
pgoff_t max_page_index;
handle_t *handle = NULL;
+ sector_t start_blocks = 0, count = 0;
+ loff_t disk_size = 0;
+ struct page **page = iobuf->dr_pages;
+ int pages = iobuf->dr_npages;
+ sector_t *blocks = iobuf->dr_blocks;
+ struct niobuf_local *lnb1, *lnb2;
+ loff_t size1, size2;
max_page_index = inode->i_sb->s_maxbytes >> PAGE_SHIFT;
rc = osd_attach_jinode(inode);
if (rc)
return rc;
+ disk_size = i_size_read(inode);
+ /* if disk_size is already bigger than specified user_size,
+ * ignore user_size
+ */
+ if (disk_size > user_size)
+ user_size = 0;
}
/* pages are sorted already. so, we just have to find
* contig. space and process them properly
*/
while (i < pages) {
- long blen, total = 0;
+ long blen, total = 0, previous_total = 0;
struct ldiskfs_map_blocks map = { 0 };
if (fp == NULL) { /* start new extent */
map.m_lblk = fp->index * blocks_per_page;
map.m_len = blen = clen * blocks_per_page;
cont_map:
+ /**
+ * We might restart transaction for block allocations,
+ * in order to make sure data ordered mode, issue IO, disk
+ * size update and block allocations need be within same
+ * transaction to make sure consistency.
+ */
+ if (handle && check_credits) {
+ struct osd_thandle *oh;
+
+ LASSERT(thandle != NULL);
+ oh = container_of(thandle, struct osd_thandle,
+ ot_super);
+ /*
+ * only issue IO if restart transaction needed,
+ * as update disk size need hold inode lock, we
+ * want to avoid that as much as possible.
+ */
+ if (oh->oh_declared_ext <= 0) {
+ rc = osd_ldiskfs_map_write(inode,
+ iobuf, osd, start_blocks,
+ count, &disk_size, user_size);
+ if (rc)
+ GOTO(cleanup, rc);
+ thandle->th_restart_tran = 1;
+ GOTO(cleanup, rc = -EAGAIN);
+ }
+
+ if (OBD_FAIL_CHECK(OBD_FAIL_OST_RESTART_IO))
+ oh->oh_declared_ext = 0;
+ else
+ oh->oh_declared_ext--;
+ }
rc = ldiskfs_map_blocks(handle, inode, &map, create);
if (rc >= 0) {
int c = 0;
total++;
break;
}
+ if ((map.m_flags & LDISKFS_MAP_UNWRITTEN) &&
+ !create) {
+ /* don't try to read allocated, but
+ * unwritten blocks, instead fill the
+ * patches with zeros in osd_do_bio() */
+ *(blocks + total) = 0;
+ continue;
+ }
*(blocks + total) = map.m_pblk + c;
/* unmap any possible underlying
* metadata from the block device
}
rc = 0;
}
+
+ if (rc == 0 && create) {
+ count += (total - previous_total);
+ mapped_index = (count + blocks_per_page -
+ 1) / blocks_per_page - 1;
+ lnb1 = iobuf->dr_lnbs[i - clen];
+ lnb2 = iobuf->dr_lnbs[mapped_index];
+ size1 = lnb1->lnb_file_offset -
+ (lnb1->lnb_file_offset % PAGE_SIZE) +
+ (total << inode->i_blkbits);
+ size2 = lnb2->lnb_file_offset + lnb2->lnb_len;
+
+ if (size1 > size2)
+ size1 = size2;
+ if (size1 > disk_size)
+ disk_size = size1;
+ }
+
if (rc == 0 && total < blen) {
+ /*
+ * decay extent blocks if we could not
+ * allocate extent once.
+ */
+ osd_decay_extent_bytes(osd,
+ (total - previous_total) << inode->i_blkbits);
map.m_lblk = fp->index * blocks_per_page + total;
map.m_len = blen - total;
+ previous_total = total;
goto cont_map;
}
if (rc != 0)
GOTO(cleanup, rc);
-
+ /*
+ * decay extent blocks if we could allocate
+ * good large extent.
+ */
+ if (total - previous_total >=
+ osd_extent_bytes(osd) >> inode->i_blkbits)
+ osd_decay_extent_bytes(osd,
+ (total - previous_total) << inode->i_blkbits);
/* look for next extent */
fp = NULL;
blocks += blocks_per_page * clen;
}
cleanup:
+ if (rc == 0 && create &&
+ start_blocks < pages * blocks_per_page) {
+ rc = osd_ldiskfs_map_write(inode, iobuf, osd, start_blocks,
+ count, &disk_size, user_size);
+ LASSERT(start_blocks + count == pages * blocks_per_page);
+ }
return rc;
}
lprocfs_counter_add(osd->od_stats, LPROC_OSD_GET_PAGE, timediff);
if (iobuf->dr_npages) {
- rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks, 0);
+ rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
+ 0, 0, NULL);
if (likely(rc == 0)) {
- rc = osd_do_bio(osd, inode, iobuf);
+ rc = osd_do_bio(osd, inode, iobuf, 0, 0);
/* do IO stats for preparation reads */
osd_fini_iobuf(osd, iobuf);
}
struct osd_fextent {
sector_t start;
sector_t end;
+ __u32 flags;
unsigned int mapped:1;
};
sector_t start;
struct fiemap_extent_info fei = { 0 };
struct fiemap_extent fe = { 0 };
- mm_segment_t saved_fs;
int rc;
if (block >= cached_extent->start && block < cached_extent->end)
fei.fi_extents_max = 1;
fei.fi_extents_start = &fe;
- saved_fs = get_fs();
- set_fs(KERNEL_DS);
rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
- set_fs(saved_fs);
if (rc != 0)
return 0;
start = fe.fe_logical >> inode->i_blkbits;
+ cached_extent->flags = fe.fe_flags;
+ if (fei.fi_extents_mapped == 0) {
+ /* a special case - no extent found at this offset and forward.
+ * we can consider this as a hole to EOF. it's safe to cache
+ * as other threads can not allocate/punch blocks this thread
+ * is working on (LDLM). */
+ cached_extent->start = block;
+ cached_extent->end = i_size_read(inode) >> inode->i_blkbits;
+ cached_extent->mapped = 0;
+ return 0;
+ }
if (start > block) {
cached_extent->start = block;
return cached_extent->mapped;
}
+#define MAX_EXTENTS_PER_WRITE 100
static int osd_declare_write_commit(const struct lu_env *env,
struct dt_object *dt,
struct niobuf_local *lnb, int npages,
const struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_thandle *oh;
- int extents = 0;
- int depth;
+ int extents = 0, new_meta = 0;
+ int depth, new_blocks = 0;
int i;
- int newblocks = 0;
+ int dirty_groups = 0;
int rc = 0;
int credits = 0;
long long quota_space = 0;
struct osd_fextent mapped = { 0 }, extent = { 0 };
enum osd_quota_local_flags local_flags = 0;
enum osd_qid_declare_flags declare_flags = OSD_QID_BLK;
+ unsigned int extent_bytes;
ENTRY;
LASSERT(handle != NULL);
oh = container_of(handle, struct osd_thandle, ot_super);
LASSERT(oh->ot_handle == NULL);
+ /*
+ * We track a decaying average extent blocks per filesystem,
+ * for most of time, it will be 1M, with filesystem becoming
+ * heavily-fragmented, it will be reduced to 4K at the worst.
+ */
+ extent_bytes = osd_extent_bytes(osd);
+ LASSERT(extent_bytes >= osd_sb(osd)->s_blocksize);
+
/* calculate number of extents (probably better to pass nb) */
for (i = 0; i < npages; i++) {
/* ignore quota for the whole request if any page is from
* client cache or written by root.
*
- * XXX once we drop the 1.8 client support, the checking
- * for whether page is from cache can be simplified as:
- * !(lnb[i].flags & OBD_BRW_SYNC)
- *
* XXX we could handle this on per-lnb basis as done by
* grant.
*/
if ((lnb[i].lnb_flags & OBD_BRW_NOQUOTA) ||
- (lnb[i].lnb_flags & (OBD_BRW_FROM_GRANT | OBD_BRW_SYNC)) ==
- OBD_BRW_FROM_GRANT)
+ (lnb[i].lnb_flags & OBD_BRW_SYS_RESOURCE) ||
+ !(lnb[i].lnb_flags & OBD_BRW_SYNC))
declare_flags |= OSD_QID_FORCE;
- if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped)) {
+ /*
+ * Convert unwritten extent might need split extents, could
+ * not skip it.
+ */
+ if (osd_is_mapped(dt, lnb[i].lnb_file_offset, &mapped) &&
+ !(mapped.flags & FIEMAP_EXTENT_UNWRITTEN)) {
+ lnb[i].lnb_flags |= OBD_BRW_MAPPED;
+ continue;
+ }
+
+ if (lnb[i].lnb_flags & OBD_BRW_DONE) {
lnb[i].lnb_flags |= OBD_BRW_MAPPED;
continue;
}
/* count only unmapped changes */
- newblocks++;
+ new_blocks++;
if (lnb[i].lnb_file_offset != extent.end || extent.end == 0) {
- extents++;
+ if (extent.end != 0)
+ extents += (extent.end - extent.start +
+ extent_bytes - 1) / extent_bytes;
+ extent.start = lnb[i].lnb_file_offset;
extent.end = lnb[i].lnb_file_offset + lnb[i].lnb_len;
} else {
extent.end += lnb[i].lnb_len;
quota_space += PAGE_SIZE;
}
+ credits++; /* inode */
/*
- * each extent can go into new leaf causing a split
- * 5 is max tree depth: inode + 4 index blocks
- * with blockmaps, depth is 3 at most
+ * overwrite case, no need to modify tree and
+ * allocate blocks.
+ */
+ if (!extent.end)
+ goto out_declare;
+
+ extents += (extent.end - extent.start +
+ extent_bytes - 1) / extent_bytes;
+ /**
+ * with system space usage growing up, mballoc codes won't
+ * try best to scan block group to align best free extent as
+ * we can. So extent bytes per extent could be decayed to a
+ * very small value, this could make us reserve too many credits.
+ * We could be more optimistic in the credit reservations, even
+ * in a case where the filesystem is nearly full, it is extremely
+ * unlikely that the worst case would ever be hit.
+ */
+ if (extents > MAX_EXTENTS_PER_WRITE)
+ extents = MAX_EXTENTS_PER_WRITE;
+
+ /**
+ * If we add a single extent, then in the worse case, each tree
+ * level index/leaf need to be changed in case of the tree split.
+ * If more extents are inserted, they could cause the whole tree
+ * split more than once, but this is really rare.
*/
if (LDISKFS_I(inode)->i_flags & LDISKFS_EXTENTS_FL) {
/*
* many concurrent threads may grow tree by the time
- * our transaction starts. so, consider 2 is a min depth
+ * our transaction starts. so, consider 2 is a min depth.
*/
depth = ext_depth(inode);
- depth = max(depth, 1) + 1;
- newblocks += depth;
- credits++; /* inode */
- credits += depth * 2 * extents;
+ depth = min(max(depth, 1) + 1, LDISKFS_MAX_EXTENT_DEPTH);
+ if (extents <= 1) {
+ credits += depth * 2 * extents;
+ new_meta = depth;
+ } else {
+ credits += depth * 3 * extents;
+ new_meta = depth * 2 * extents;
+ }
} else {
- depth = 3;
- newblocks += depth;
- credits++; /* inode */
- credits += depth * extents;
+ /*
+ * With N contiguous data blocks, we need at most
+ * N/EXT4_ADDR_PER_BLOCK(inode->i_sb) + 1 indirect blocks,
+ * 2 dindirect blocks, and 1 tindirect block
+ */
+ new_meta = DIV_ROUND_UP(new_blocks,
+ LDISKFS_ADDR_PER_BLOCK(inode->i_sb)) + 4;
+ credits += new_meta;
}
+ dirty_groups += (extents + new_meta);
+
+ oh->oh_declared_ext = extents;
/* quota space for metadata blocks */
- quota_space += depth * extents * LDISKFS_BLOCK_SIZE(osd_sb(osd));
+ quota_space += new_meta * LDISKFS_BLOCK_SIZE(osd_sb(osd));
/* quota space should be reported in 1K blocks */
quota_space = toqb(quota_space);
/* each new block can go in different group (bitmap + gd) */
/* we can't dirty more bitmap blocks than exist */
- if (newblocks > LDISKFS_SB(osd_sb(osd))->s_groups_count)
+ if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_groups_count)
credits += LDISKFS_SB(osd_sb(osd))->s_groups_count;
else
- credits += newblocks;
+ credits += dirty_groups;
/* we can't dirty more gd blocks than exist */
- if (newblocks > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
+ if (dirty_groups > LDISKFS_SB(osd_sb(osd))->s_gdb_count)
credits += LDISKFS_SB(osd_sb(osd))->s_gdb_count;
else
- credits += newblocks;
+ credits += dirty_groups;
+ CDEBUG(D_INODE,
+ "%s: inode #%lu extent_bytes %u extents %d credits %d\n",
+ osd_ino2name(inode), inode->i_ino, extent_bytes, extents,
+ credits);
+
+out_declare:
osd_trans_declare_op(env, oh, OSD_OT_WRITE, credits);
/* make sure the over quota flags were not set */
struct osd_iobuf *iobuf = &oti->oti_iobuf;
struct inode *inode = osd_dt_obj(dt)->oo_inode;
struct osd_device *osd = osd_obj2dev(osd_dt_obj(dt));
- loff_t disk_size;
- int rc = 0, i;
+ int rc = 0, i, check_credits = 0;
LASSERT(inode);
if (unlikely(rc != 0))
RETURN(rc);
- disk_size = i_size_read(inode);
- /* if disk_size is already bigger than specified user_size,
- * ignore user_size
- */
- if (disk_size > user_size)
- user_size = 0;
dquot_initialize(inode);
for (i = 0; i < npages; i++) {
continue;
}
+ if (lnb[i].lnb_flags & OBD_BRW_DONE)
+ continue;
+
+ if (!(lnb[i].lnb_flags & OBD_BRW_MAPPED))
+ check_credits = 1;
+
LASSERT(PageLocked(lnb[i].lnb_page));
LASSERT(!PageWriteback(lnb[i].lnb_page));
- if (lnb[i].lnb_file_offset + lnb[i].lnb_len > disk_size)
- disk_size = lnb[i].lnb_file_offset + lnb[i].lnb_len;
-
/*
* Since write and truncate are serialized by oo_sem, even
* partial-page truncate should not leave dirty pages in the
osd_iobuf_add_page(iobuf, &lnb[i]);
}
- /* if file has grown, take user_size into account */
- if (user_size && disk_size > user_size)
- disk_size = user_size;
-
osd_trans_exec_op(env, thandle, OSD_OT_WRITE);
if (OBD_FAIL_CHECK(OBD_FAIL_OST_MAPBLK_ENOSPC)) {
rc = -ENOSPC;
} else if (iobuf->dr_npages > 0) {
- rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks, 1);
+ rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd,
+ 1, user_size,
+ check_credits,
+ thandle);
} else {
/* no pages to write, no transno is needed */
thandle->th_local = 1;
}
- if (likely(rc == 0)) {
- spin_lock(&inode->i_lock);
- if (disk_size > i_size_read(inode)) {
- i_size_write(inode, disk_size);
- LDISKFS_I(inode)->i_disksize = disk_size;
- spin_unlock(&inode->i_lock);
- osd_dirty_inode(inode, I_DIRTY_DATASYNC);
- } else {
- spin_unlock(&inode->i_lock);
- }
-
- rc = osd_do_bio(osd, inode, iobuf);
- /* we don't do stats here as in read path because
- * write is async: we'll do this in osd_put_bufs()
- */
- } else {
+ if (rc != 0 && !thandle->th_restart_tran)
osd_fini_iobuf(osd, iobuf);
- }
osd_trans_exec_check(env, thandle, OSD_OT_WRITE);
- if (unlikely(rc != 0)) {
+ if (unlikely(rc != 0 && !thandle->th_restart_tran)) {
/* if write fails, we should drop pages from the cache */
for (i = 0; i < npages; i++) {
if (lnb[i].lnb_page == NULL)
cache_hits + cache_misses);
if (iobuf->dr_npages) {
- rc = osd_ldiskfs_map_inode_pages(inode, iobuf->dr_pages,
- iobuf->dr_npages,
- iobuf->dr_blocks, 0);
- rc = osd_do_bio(osd, inode, iobuf);
+ rc = osd_ldiskfs_map_inode_pages(inode, iobuf, osd, 0,
+ 0, 0, NULL);
+ if (!rc)
+ rc = osd_do_bio(osd, inode, iobuf, 0, 0);
/* IO stats will be done in osd_bufs_put() */
* level.
*/
depth = inode != NULL ? ext_depth(inode) : 0;
- depth = max(depth, 1) + 1;
+ depth = min(max(depth, 1) + 3, LDISKFS_MAX_EXTENT_DEPTH);
credits = depth;
/* if not append, then split may need to modify
* existing blocks moving entries into the new ones
++bufsize;
}
- dirty_inode = test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
+ /* only the first flag-set matters */
+ dirty_inode = !test_and_set_bit(LDISKFS_INODE_JOURNAL_DATA,
&ei->i_flags);
/* sparse checking is racy, but sparse is very rare case, leave as is */
if (unlikely(IS_ERR_OR_NULL(bh) && !sync))
CWARN(
"%s: adding bh without locking off %llu (block %lu, size %d, offs %llu)\n",
- inode->i_sb->s_id,
+ osd_ino2name(inode),
offset, block, bufsize, *offs);
if (IS_ERR_OR_NULL(bh)) {
CERROR(
"%s: error reading offset %llu (block %lu, size %d, offs %llu), credits %d/%d: rc = %d\n",
- inode->i_sb->s_id, offset, block, bufsize, *offs,
- credits, handle->h_buffer_credits, err);
+ osd_ino2name(inode), offset, block, bufsize,
+ *offs, credits, handle->h_buffer_credits, err);
break;
}
ENTRY;
/*
- * Only mode == 0 (which is standard prealloc) is supported now.
+ * mode == 0 (which is standard prealloc) and PUNCH is supported
* Rest of mode options is not supported yet.
*/
- if (mode & ~FALLOC_FL_KEEP_SIZE)
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ RETURN(-EOPNOTSUPP);
+
+ /* disable fallocate completely */
+ if (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks < 0)
RETURN(-EOPNOTSUPP);
LASSERT(th);
LASSERT(inode);
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ rc = osd_declare_inode_qid(env, i_uid_read(inode),
+ i_gid_read(inode),
+ i_projid_read(inode), 0, oh,
+ osd_dt_obj(dt), NULL, OSD_QID_BLK);
+ if (rc == 0)
+ rc = osd_trunc_lock(osd_dt_obj(dt), oh, false);
+ RETURN(rc);
+ }
+
/* quota space for metadata blocks
* approximate metadata estimate should be good enough.
*/
RETURN(rc);
}
-/* Borrow @ext4_chunk_trans_blocks */
-static int osd_chunk_trans_blocks(struct inode *inode, int nrblocks)
-{
- ldiskfs_group_t groups;
- int gdpblocks;
- int idxblocks;
- int depth;
- int ret;
-
- depth = ext_depth(inode);
- idxblocks = depth * 2;
-
- /*
- * Now let's see how many group bitmaps and group descriptors need
- * to account.
- */
- groups = idxblocks + 1;
- gdpblocks = groups;
- if (groups > LDISKFS_SB(inode->i_sb)->s_groups_count)
- groups = LDISKFS_SB(inode->i_sb)->s_groups_count;
- if (gdpblocks > LDISKFS_SB(inode->i_sb)->s_gdb_count)
- gdpblocks = LDISKFS_SB(inode->i_sb)->s_gdb_count;
-
- /* bitmaps and block group descriptor blocks */
- ret = idxblocks + groups + gdpblocks;
-
- /* Blocks for super block, inode, quota and xattr blocks */
- ret += LDISKFS_META_TRANS_BLOCKS(inode->i_sb);
-
- return ret;
-}
-
-static int osd_extend_restart_trans(handle_t *handle, int needed)
-{
- int rc;
-
- if (ldiskfs_handle_has_enough_credits(handle, needed))
- return 0;
-
- rc = ldiskfs_journal_extend(handle, needed - handle->h_buffer_credits);
- if (rc <= 0)
- return rc;
-
- rc = ldiskfs_journal_restart(handle, needed);
-
- return rc;
-}
-
-static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, int mode, struct thandle *th)
+static int osd_fallocate_preallocate(const struct lu_env *env,
+ struct dt_object *dt,
+ __u64 start, __u64 end, int mode,
+ struct thandle *th)
{
struct osd_thandle *oh = container_of(th, struct osd_thandle, ot_super);
handle_t *handle = ldiskfs_journal_current_handle();
ENTRY;
- /*
- * Only mode == 0 (which is standard prealloc) is supported now.
- * Rest of mode options is not supported yet.
- */
- if (mode & ~FALLOC_FL_KEEP_SIZE)
- RETURN(-EOPNOTSUPP);
-
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(inode != NULL);
boff = start >> inode->i_blkbits;
blen = (ALIGN(end, 1 << inode->i_blkbits) >> inode->i_blkbits) - boff;
- flags = LDISKFS_GET_BLOCKS_CREATE;
+ /* Create and mark new extents as either zero or unwritten */
+ flags = (osd_dev(dt->do_lu.lo_dev)->od_fallocate_zero_blocks ||
+ !ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)) ?
+ LDISKFS_GET_BLOCKS_CREATE_ZERO :
+ LDISKFS_GET_BLOCKS_CREATE_UNWRIT_EXT;
+#ifndef HAVE_LDISKFS_GET_BLOCKS_KEEP_SIZE
if (mode & FALLOC_FL_KEEP_SIZE)
flags |= LDISKFS_GET_BLOCKS_KEEP_SIZE;
-
+#endif
inode_lock(inode);
- /*
- * We only support preallocation for extent-based file only.
- */
- if (!(ldiskfs_test_inode_flag(inode, LDISKFS_INODE_EXTENTS)))
- GOTO(out, rc = -EOPNOTSUPP);
-
if (!(mode & FALLOC_FL_KEEP_SIZE) && (end > i_size_read(inode) ||
end > LDISKFS_I(inode)->i_disksize)) {
new_size = end;
map.m_lblk = boff;
map.m_len = blen;
- /*
- * Don't normalize the request if it can fit in one extent so
- * that it doesn't get unnecessarily split into multiple
- * extents.
+ /* Don't normalize the request if it can fit in one extent so
+ * that it doesn't get unnecessarily split into multiple extents.
*/
if (blen <= EXT_UNWRITTEN_MAX_LEN)
flags |= LDISKFS_GET_BLOCKS_NO_NORMALIZE;
}
/* TODO: quota check */
- rc = osd_extend_restart_trans(handle, credits);
+ rc = osd_extend_restart_trans(handle, credits, inode);
if (rc)
break;
epos = end;
if (ldiskfs_update_inode_size(inode, epos) & 0x1)
inode->i_mtime = inode->i_ctime;
+#ifndef HAVE_LDISKFS_GET_BLOCKS_KEEP_SIZE
} else {
if (epos > inode->i_size)
ldiskfs_set_inode_flag(inode,
LDISKFS_INODE_EOFBLOCKS);
+#endif
}
ldiskfs_mark_inode_dirty(handle, inode);
}
out:
- inode_unlock(inode);
-
/* extand credits if needed for operations such as attribute set */
if (rc >= 0)
- rc = osd_extend_restart_trans(handle, save_credits);
+ rc = osd_extend_restart_trans(handle, save_credits, inode);
+
+ inode_unlock(inode);
+
+ RETURN(rc);
+}
+
+static int osd_fallocate_punch(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, int mode,
+ struct thandle *th)
+{
+ struct osd_object *obj = osd_dt_obj(dt);
+ struct inode *inode = obj->oo_inode;
+ struct osd_access_lock *al;
+ struct osd_thandle *oh;
+ int rc = 0, found = 0;
+
+ ENTRY;
+
+ LASSERT(dt_object_exists(dt));
+ LASSERT(osd_invariant(obj));
+ LASSERT(inode != NULL);
+
+ dquot_initialize(inode);
+
+ LASSERT(th);
+ oh = container_of(th, struct osd_thandle, ot_super);
+ LASSERT(oh->ot_handle->h_transaction != NULL);
+
+ list_for_each_entry(al, &oh->ot_trunc_locks, tl_list) {
+ if (obj != al->tl_obj)
+ continue;
+ LASSERT(al->tl_shared == 0);
+ found = 1;
+ /* do actual punch in osd_trans_stop() */
+ al->tl_start = start;
+ al->tl_end = end;
+ al->tl_mode = mode;
+ al->tl_punch = true;
+ break;
+ }
RETURN(rc);
}
+static int osd_fallocate(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, int mode, struct thandle *th)
+{
+ int rc;
+
+ ENTRY;
+
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ /* punch */
+ rc = osd_fallocate_punch(env, dt, start, end, mode, th);
+ } else {
+ /* standard preallocate */
+ rc = osd_fallocate_preallocate(env, dt, start, end, mode, th);
+ }
+ RETURN(rc);
+}
+
static int osd_declare_punch(const struct lu_env *env, struct dt_object *dt,
__u64 start, __u64 end, struct thandle *th)
{
struct inode *inode = osd_dt_obj(dt)->oo_inode;
u64 len;
int rc;
- mm_segment_t cur_fs;
LASSERT(inode);
if (inode->i_op->fiemap == NULL)
if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
filemap_write_and_wait(inode->i_mapping);
- /* Save previous value address limit */
- cur_fs = get_fs();
- /* Set the address limit of the kernel */
- set_fs(KERNEL_DS);
-
rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
fm->fm_flags = fieinfo.fi_flags;
fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
- /* Restore the previous address limt */
- set_fs(cur_fs);
-
return rc;
}
loff_t offset, int whence)
{
struct osd_object *obj = osd_dt_obj(dt);
+ struct osd_device *dev = osd_obj2dev(obj);
struct inode *inode = obj->oo_inode;
struct file *file;
loff_t result;
ENTRY;
-
LASSERT(dt_object_exists(dt));
LASSERT(osd_invariant(obj));
LASSERT(inode);
LASSERT(offset >= 0);
- file = osd_quasi_file(env, inode);
- result = file->f_op->llseek(file, offset, whence);
+ file = alloc_file_pseudo(inode, dev->od_mnt, "/", O_NOATIME,
+ inode->i_fop);
+ if (IS_ERR(file))
+ RETURN(PTR_ERR(file));
+ file->f_mode |= FMODE_64BITHASH;
+ result = file->f_op->llseek(file, offset, whence);
+ ihold(inode);
+ fput(file);
/*
* If 'offset' is beyond end of object file then treat it as not error
* but valid case for SEEK_HOLE and return 'offset' as result.
}
}
+/* For a partial-page punch, flush punch range to disk immediately */
+static void osd_partial_page_flush_punch(struct osd_device *d,
+ struct inode *inode, loff_t start,
+ loff_t end)
+{
+ if (osd_use_page_cache(d)) {
+ filemap_fdatawrite_range(inode->i_mapping, start, end);
+ } else {
+ /* Notice we use "wait" version to ensure I/O is complete */
+ filemap_write_and_wait_range(inode->i_mapping, start,
+ end);
+ invalidate_mapping_pages(inode->i_mapping, start >> PAGE_SHIFT,
+ end >> PAGE_SHIFT);
+ }
+}
+
+/*
+ * For a partial-page truncate, flush the page to disk immediately to
+ * avoid data corruption during direct disk write. b=17397
+ */
+static void osd_partial_page_flush(struct osd_device *d, struct inode *inode,
+ loff_t offset)
+{
+ if (!(offset & ~PAGE_MASK))
+ return;
+
+ if (osd_use_page_cache(d)) {
+ filemap_fdatawrite_range(inode->i_mapping, offset, offset + 1);
+ } else {
+ /* Notice we use "wait" version to ensure I/O is complete */
+ filemap_write_and_wait_range(inode->i_mapping, offset,
+ offset + 1);
+ invalidate_mapping_pages(inode->i_mapping, offset >> PAGE_SHIFT,
+ offset >> PAGE_SHIFT);
+ }
+}
+
void osd_execute_truncate(struct osd_object *obj)
{
struct osd_device *d = osd_obj2dev(obj);
spin_unlock(&inode->i_lock);
osd_dirty_inode(inode, I_DIRTY_DATASYNC);
}
+ osd_partial_page_flush(d, inode, size);
+}
- /*
- * For a partial-page truncate, flush the page to disk immediately to
- * avoid data corruption during direct disk write. b=17397
- */
- if ((size & ~PAGE_MASK) == 0)
- return;
- if (osd_use_page_cache(d)) {
- filemap_fdatawrite_range(inode->i_mapping, size, size + 1);
- } else {
- /* Notice we use "wait" version to ensure I/O is complete */
- filemap_write_and_wait_range(inode->i_mapping, size, size + 1);
- invalidate_mapping_pages(inode->i_mapping, size >> PAGE_SHIFT,
- size >> PAGE_SHIFT);
- }
+static int osd_execute_punch(const struct lu_env *env, struct osd_object *obj,
+ loff_t start, loff_t end, int mode)
+{
+ struct osd_device *d = osd_obj2dev(obj);
+ struct inode *inode = obj->oo_inode;
+ struct file *file;
+ int rc;
+
+ file = alloc_file_pseudo(inode, d->od_mnt, "/", O_NOATIME,
+ inode->i_fop);
+ if (IS_ERR(file))
+ RETURN(PTR_ERR(file));
+
+ file->f_mode |= FMODE_64BITHASH;
+ rc = file->f_op->fallocate(file, mode, start, end - start);
+ ihold(inode);
+ fput(file);
+ if (rc == 0)
+ osd_partial_page_flush_punch(d, inode, start, end - 1);
+ return rc;
}
-void osd_process_truncates(struct list_head *list)
+int osd_process_truncates(const struct lu_env *env, struct list_head *list)
{
struct osd_access_lock *al;
+ int rc = 0;
- LASSERT(journal_current_handle() == NULL);
+ LASSERT(!journal_current_handle());
list_for_each_entry(al, list, tl_list) {
if (al->tl_shared)
continue;
- if (!al->tl_truncate)
- continue;
- osd_execute_truncate(al->tl_obj);
+ if (al->tl_truncate)
+ osd_execute_truncate(al->tl_obj);
+ else if (al->tl_punch)
+ rc = osd_execute_punch(env, al->tl_obj, al->tl_start,
+ al->tl_end, al->tl_mode);
}
+
+ return rc;
}