+From: Theodore Ts'o <tytso@mit.edu>
+
+From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
+
+
+Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
+ ext4_get_blocks()
+
+Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
+which uses a much smaller structure, struct ext4_map_blocks which is
+20 bytes, as opposed to a struct buffer_head, which nearly 5 times
+bigger on an x86_64 machine. By switching things to use
+ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
+since we can avoid allocating a struct buffer_head on the stack.
+
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Index: linux-stage/fs/ext4/ext4.h
+===================================================================
+--- linux-stage.orig/fs/ext4/ext4.h 2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/ext4.h 2016-07-15 09:52:29.000000000 +0300
+@@ -142,10 +142,8 @@ struct ext4_allocation_request {
+ #define EXT4_MAP_MAPPED (1 << BH_Mapped)
+ #define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
+ #define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
+-#define EXT4_MAP_UNINIT (1 << BH_Uninit)
+ #define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
+- EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
+- EXT4_MAP_UNINIT)
++ EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
+
+ struct ext4_map_blocks {
+ ext4_fsblk_t m_pblk;
+@@ -2184,9 +2182,9 @@ extern int ext4_ext_tree_init(handle_t *
+ extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
+ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
+ int chunk);
+-extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+- ext4_lblk_t iblock, unsigned int max_blocks,
+- struct buffer_head *bh_result, int flags);
++#define HAVE_EXT4_MAP_BLOCKS
++extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map, int flags);
+ extern void ext4_ext_truncate(struct inode *);
+ extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
+ loff_t length);
+@@ -2196,6 +2194,8 @@ extern long ext4_fallocate(struct inode
+ loff_t len);
+ extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
+ ssize_t len);
++extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map, int flags);
+ extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
+ sector_t block, unsigned int max_blocks,
+ struct buffer_head *bh, int flags);
+Index: linux-stage/fs/ext4/extents.c
+===================================================================
+--- linux-stage.orig/fs/ext4/extents.c 2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/extents.c 2016-07-15 09:53:10.000000000 +0300
+@@ -2960,7 +2960,7 @@ fix_extent_len:
+
+ #define EXT4_EXT_ZERO_LEN 7
+ /*
+- * This function is called by ext4_ext_get_blocks() if someone tries to write
++ * This function is called by ext4_ext_map_blocks() if someone tries to write
+ * to an uninitialized extent. It may result in splitting the uninitialized
+ * extent into multiple extents (upto three - one initialized and two
+ * uninitialized).
+@@ -2970,11 +2970,10 @@ fix_extent_len:
+ * c> Splits in three extents: Somone is writing in middle of the extent
+ */
+ static int ext4_ext_convert_to_initialized(handle_t *handle,
+- struct inode *inode,
+- struct ext4_ext_path *path,
+- ext4_lblk_t iblock,
+- unsigned int max_blocks,
+- int flags)
++ struct inode *inode,
++ struct ext4_map_blocks *map,
++ struct ext4_ext_path *path,
++ int flags)
+ {
+ struct ext4_extent *ex, newex, orig_ex;
+ struct ext4_extent *ex1 = NULL;
+@@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
+
+ ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+- (unsigned long long)iblock, max_blocks);
++ (unsigned long long)map->m_lblk, map->m_len);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+- if (eof_block < iblock + max_blocks)
+- eof_block = iblock + max_blocks;
++ if (eof_block < map->m_lblk + map->m_len)
++ eof_block = map->m_lblk + map->m_len;
+
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+- allocated = ee_len - (iblock - ee_block);
+- newblock = iblock - ee_block + ext4_ext_pblock(ex);
++ allocated = ee_len - (map->m_lblk - ee_block);
++ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+
+ ex2 = ex;
+ orig_ex.ee_block = ex->ee_block;
+@@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
+ return allocated;
+ }
+
+- /* ex1: ee_block to iblock - 1 : uninitialized */
+- if (iblock > ee_block) {
++ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++ if (map->m_lblk > ee_block) {
+ ex1 = ex;
+- ex1->ee_len = cpu_to_le16(iblock - ee_block);
++ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ext4_ext_dirty(handle, inode, path + depth);
+ ex2 = &newex;
+@@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
+ * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ * overlap of blocks.
+ */
+- if (!ex1 && allocated > max_blocks)
+- ex2->ee_len = cpu_to_le16(max_blocks);
++ if (!ex1 && allocated > map->m_len)
++ ex2->ee_len = cpu_to_le16(map->m_len);
+ /* ex3: to ee_block + ee_len : uninitialised */
+- if (allocated > max_blocks) {
++ if (allocated > map->m_len) {
+ unsigned int newdepth;
+ /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
+ if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
+ /*
+- * iblock == ee_block is handled by the zerouout
++ * map->m_lblk == ee_block is handled by the zerouout
+ * at the beginning.
+ * Mark first half uninitialized.
+ * Mark second half initialized and zero out the
+@@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
+ ext4_ext_dirty(handle, inode, path + depth);
+
+ ex3 = &newex;
+- ex3->ee_block = cpu_to_le32(iblock);
++ ex3->ee_block = cpu_to_le32(map->m_lblk);
+ ext4_ext_store_pblock(ex3, newblock);
+ ex3->ee_len = cpu_to_le16(allocated);
+ err = ext4_ext_insert_extent(handle, inode, path,
+@@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
+ ext4_ext_store_pblock(ex,
+ ext4_ext_pblock(&orig_ex));
+ ext4_ext_dirty(handle, inode, path + depth);
+- /* blocks available from iblock */
++ /* blocks available from map->m_lblk */
+ return allocated;
+
+ } else if (err)
+@@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
+ */
+ depth = ext_depth(inode);
+ ext4_ext_drop_refs(path);
+- path = ext4_ext_find_extent(inode,
+- iblock, path);
++ path = ext4_ext_find_extent(inode, map->m_lblk,
++ path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ return err;
+@@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
+ return allocated;
+ }
+ ex3 = &newex;
+- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+- ext4_ext_store_pblock(ex3, newblock + max_blocks);
+- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++ ext4_ext_store_pblock(ex3, newblock + map->m_len);
++ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ ext4_ext_mark_uninitialized(ex3);
+ err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ if (err == -ENOSPC && may_zeroout) {
+@@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ ext4_ext_dirty(handle, inode, path + depth);
+ /* zeroed the full extent */
+- /* blocks available from iblock */
++ /* blocks available from map->m_lblk */
+ return allocated;
+
+ } else if (err)
+@@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
+
+ depth = newdepth;
+ ext4_ext_drop_refs(path);
+- path = ext4_ext_find_extent(inode, iblock, path);
++ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
+ if (err)
+ goto out;
+
+- allocated = max_blocks;
++ allocated = map->m_len;
+
+ /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
+ * to insert a extent in the middle zerout directly
+ * otherwise give the extent a chance to merge to left
+ */
+ if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
+- iblock != ee_block && may_zeroout) {
++ map->m_lblk != ee_block && may_zeroout) {
+ err = ext4_ext_zeroout(inode, &orig_ex);
+ if (err)
+ goto fix_extent_len;
+@@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ ext4_ext_dirty(handle, inode, path + depth);
+ /* zero out the first half */
+- /* blocks available from iblock */
++ /* blocks available from map->m_lblk */
+ return allocated;
+ }
+ }
+@@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
+ */
+ if (ex1 && ex1 != ex) {
+ ex1 = ex;
+- ex1->ee_len = cpu_to_le16(iblock - ee_block);
++ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ext4_ext_dirty(handle, inode, path + depth);
+ ex2 = &newex;
+ }
+- /* ex2: iblock to iblock + maxblocks-1 : initialised */
+- ex2->ee_block = cpu_to_le32(iblock);
++ /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
++ ex2->ee_block = cpu_to_le32(map->m_lblk);
+ ext4_ext_store_pblock(ex2, newblock);
+ ex2->ee_len = cpu_to_le16(allocated);
+ if (ex2 != ex)
+@@ -3277,7 +3276,7 @@ fix_extent_len:
+ }
+
+ /*
+- * This function is called by ext4_ext_get_blocks() from
++ * This function is called by ext4_ext_map_blocks() from
+ * ext4_get_blocks_dio_write() when DIO to write
+ * to an uninitialized extent.
+ *
+@@ -3300,9 +3299,8 @@ fix_extent_len:
+ */
+ static int ext4_split_unwritten_extents(handle_t *handle,
+ struct inode *inode,
++ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path,
+- ext4_lblk_t iblock,
+- unsigned int max_blocks,
+ int flags)
+ {
+ struct ext4_extent *ex, newex, orig_ex;
+@@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
+
+ ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
+ "block %llu, max_blocks %u\n", inode->i_ino,
+- (unsigned long long)iblock, max_blocks);
++ (unsigned long long)map->m_lblk, map->m_len);
+
+ eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
+ inode->i_sb->s_blocksize_bits;
+- if (eof_block < iblock + max_blocks)
+- eof_block = iblock + max_blocks;
++ if (eof_block < map->m_lblk + map->m_len)
++ eof_block = map->m_lblk + map->m_len;
+
+ depth = ext_depth(inode);
+ eh = path[depth].p_hdr;
+ ex = path[depth].p_ext;
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+- allocated = ee_len - (iblock - ee_block);
+- newblock = iblock - ee_block + ext4_ext_pblock(ex);
++ allocated = ee_len - (map->m_lblk - ee_block);
++ newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
+
+ ex2 = ex;
+ orig_ex.ee_block = ex->ee_block;
+@@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
+ * block where the write begins, and the write completely
+ * covers the extent, then we don't need to split it.
+ */
+- if ((iblock == ee_block) && (allocated <= max_blocks))
++ if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
+ return allocated;
+
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+ goto out;
+- /* ex1: ee_block to iblock - 1 : uninitialized */
+- if (iblock > ee_block) {
++ /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
++ if (map->m_lblk > ee_block) {
+ ex1 = ex;
+- ex1->ee_len = cpu_to_le16(iblock - ee_block);
++ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ext4_ext_dirty(handle, inode, path + depth);
+ ex2 = &newex;
+@@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
+ * we insert ex3, if ex1 is NULL. This is to avoid temporary
+ * overlap of blocks.
+ */
+- if (!ex1 && allocated > max_blocks)
+- ex2->ee_len = cpu_to_le16(max_blocks);
++ if (!ex1 && allocated > map->m_len)
++ ex2->ee_len = cpu_to_le16(map->m_len);
+ /* ex3: to ee_block + ee_len : uninitialised */
+- if (allocated > max_blocks) {
++ if (allocated > map->m_len) {
+ unsigned int newdepth;
+ ex3 = &newex;
+- ex3->ee_block = cpu_to_le32(iblock + max_blocks);
+- ext4_ext_store_pblock(ex3, newblock + max_blocks);
+- ex3->ee_len = cpu_to_le16(allocated - max_blocks);
++ ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
++ ext4_ext_store_pblock(ex3, newblock + map->m_len);
++ ex3->ee_len = cpu_to_le16(allocated - map->m_len);
+ ext4_ext_mark_uninitialized(ex3);
+ err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
+ if (err == -ENOSPC && may_zeroout) {
+@@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
+ err = ext4_ext_zeroout(inode, ex3);
+ if (err)
+ goto fix_extent_len;
+- max_blocks = allocated;
+- ex2->ee_len = cpu_to_le16(max_blocks);
++ map->m_len = allocated;
++ ex2->ee_len = cpu_to_le16(map->m_len);
+ goto skip;
+ }
+ err = ext4_ext_zeroout(inode, &orig_ex);
+@@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
+ ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
+ ext4_ext_dirty(handle, inode, path + depth);
+ /* zeroed the full extent */
+- /* blocks available from iblock */
++ /* blocks available from map->m_lblk */
+ return allocated;
+
+ } else if (err)
+@@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
+
+ depth = newdepth;
+ ext4_ext_drop_refs(path);
+- path = ext4_ext_find_extent(inode, iblock, path);
++ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
+ err = ext4_ext_get_access(handle, inode, path + depth);
+ if (err)
+ goto out;
+-
+- allocated = max_blocks;
++ allocated = map->m_len;
+ }
+ skip:
+ /*
+@@ -3457,16 +3454,16 @@ skip:
+ */
+ if (ex1 && ex1 != ex) {
+ ex1 = ex;
+- ex1->ee_len = cpu_to_le16(iblock - ee_block);
++ ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
+ ext4_ext_mark_uninitialized(ex1);
+ ext4_ext_dirty(handle, inode, path + depth);
+ ex2 = &newex;
+ }
+ /*
+- * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
+- * uninitialised still.
++ * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
++ * using direct I/O, uninitialised still.
+ */
+- ex2->ee_block = cpu_to_le32(iblock);
++ ex2->ee_block = cpu_to_le32(map->m_lblk);
+ ext4_ext_store_pblock(ex2, newblock);
+ ex2->ee_len = cpu_to_le16(allocated);
+ ext4_ext_mark_uninitialized(ex2);
+@@ -3506,8 +3503,7 @@ fix_extent_len:
+
+ static int ext4_convert_unwritten_extents_dio(handle_t *handle,
+ struct inode *inode,
+- ext4_lblk_t iblock,
+- unsigned int max_blocks,
++ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path)
+ {
+ struct ext4_extent *ex;
+@@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
+
+ /* If extent is larger than requested then split is required */
+
+- if (ee_block != iblock || ee_len > max_blocks) {
+- err = ext4_split_unwritten_extents(handle, inode, path,
+- iblock, max_blocks,
++ if (ee_block != map->m_lblk || ee_len > map->m_len) {
++ err = ext4_split_unwritten_extents(handle, inode, map, path,
+ EXT4_EXT_DATA_VALID);
+ if (err < 0)
+ goto out;
+ ext4_ext_drop_refs(path);
+- path = ext4_ext_find_extent(inode, iblock, path);
++ path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ goto out;
+@@ -3627,10 +3622,9 @@ out:
+
+ static int
+ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
+- ext4_lblk_t iblock, unsigned int max_blocks,
++ struct ext4_map_blocks *map,
+ struct ext4_ext_path *path, int flags,
+- unsigned int allocated, struct buffer_head *bh_result,
+- ext4_fsblk_t newblock)
++ unsigned int allocated, ext4_fsblk_t newblock)
+ {
+ int ret = 0;
+ int err = 0;
+@@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
+
+ ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
+ "block %llu, max_blocks %u, flags %d, allocated %u",
+- inode->i_ino, (unsigned long long)iblock, max_blocks,
++ inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
+ flags, allocated);
+ ext4_ext_show_leaf(inode, path);
+
+@@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
+ /* DIO get_block() before submit the IO, split the extent */
+ if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
+- ret = ext4_split_unwritten_extents(handle,
+- inode, path, iblock,
+- max_blocks, flags);
++ ret = ext4_split_unwritten_extents(handle, inode, map,
++ path, flags);
+ /*
+ * Flag the inode(non aio case) or end_io struct (aio case)
+ * that this IO needs to convertion to written when IO is
+@@ -3670,12 +3663,11 @@ ext4_ext_handle_uninitialized_extents(ha
+ if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
+ EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
+ ret = ext4_convert_unwritten_extents_dio(handle, inode,
+- iblock, max_blocks,
+- path);
++ map, path);
+ if (ret >= 0) {
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+- err = check_eofblocks_fl(handle, inode, iblock, path,
+- max_blocks);
++ err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
++ map->m_len);
+ } else
+ err = ret;
+ goto out2;
+@@ -3697,18 +3689,15 @@ ext4_ext_handle_uninitialized_extents(ha
+ * the buffer head will be unmapped so that
+ * a read from the block returns 0s.
+ */
+- set_buffer_unwritten(bh_result);
++ map->m_flags |= EXT4_MAP_UNWRITTEN;
+ goto out1;
+ }
+
+ /* buffered write, writepage time, convert*/
+- ret = ext4_ext_convert_to_initialized(handle, inode,
+- path, iblock,
+- max_blocks,
+- flags);
++ ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
+ if (ret >= 0) {
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+- err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
++ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
+ if (err < 0)
+ goto out2;
+ }
+@@ -3718,7 +3707,7 @@ out:
+ goto out2;
+ } else
+ allocated = ret;
+- set_buffer_new(bh_result);
++ map->m_flags |= EXT4_MAP_NEW;
+ /*
+ * if we allocated more blocks than requested
+ * we need to make sure we unmap the extra block
+@@ -3726,11 +3715,11 @@ out:
+ * unmapped later when we find the buffer_head marked
+ * new.
+ */
+- if (allocated > max_blocks) {
++ if (allocated > map->m_len) {
+ unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
+- newblock + max_blocks,
+- allocated - max_blocks);
+- allocated = max_blocks;
++ newblock + map->m_len,
++ allocated - map->m_len);
++ allocated = map->m_len;
+ }
+
+ /*
+@@ -3744,13 +3733,13 @@ out:
+ ext4_da_update_reserve_space(inode, allocated, 0);
+
+ map_out:
+- set_buffer_mapped(bh_result);
++ map->m_flags |= EXT4_MAP_MAPPED;
+ out1:
+- if (allocated > max_blocks)
+- allocated = max_blocks;
++ if (allocated > map->m_len)
++ allocated = map->m_len;
+ ext4_ext_show_leaf(inode, path);
+- bh_result->b_bdev = inode->i_sb->s_bdev;
+- bh_result->b_blocknr = newblock;
++ map->m_pblk = newblock;
++ map->m_len = allocated;
+ out2:
+ if (path) {
+ ext4_ext_drop_refs(path);
+@@ -3777,10 +3766,8 @@ out2:
+ *
+ * return < 0, error case.
+ */
+-int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
+- ext4_lblk_t iblock,
+- unsigned int max_blocks, struct buffer_head *bh_result,
+- int flags)
++int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map, int flags)
+ {
+ struct ext4_ext_path *path = NULL;
+ struct ext4_extent_header *eh;
+@@ -3791,12 +3778,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ struct ext4_allocation_request ar;
+ ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
+
+- __clear_bit(BH_New, &bh_result->b_state);
+ ext_debug("blocks %u/%u requested for inode %lu\n",
+- iblock, max_blocks, inode->i_ino);
++ map->m_lblk, map->m_len, inode->i_ino);
+
+ /* check in cache */
+- if (ext4_ext_in_cache(inode, iblock, &newex)) {
++ if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
+ if (!newex.ee_start_lo && !newex.ee_start_hi) {
+ if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
+ /*
+@@ -3808,18 +3794,18 @@ int ext4_ext_get_blocks(handle_t *handle
+ /* we should allocate requested block */
+ } else {
+ /* block is already allocated */
+- newblock = iblock
++ newblock = map->m_lblk
+ - le32_to_cpu(newex.ee_block)
+ + ext4_ext_pblock(&newex);
+ /* number of remaining blocks in the extent */
+ allocated = ext4_ext_get_actual_len(&newex) -
+- (iblock - le32_to_cpu(newex.ee_block));
++ (map->m_lblk - le32_to_cpu(newex.ee_block));
+ goto out;
+ }
+ }
+
+ /* find extent for this block */
+- path = ext4_ext_find_extent(inode, iblock, NULL);
++ path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ path = NULL;
+@@ -3836,7 +3822,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
+ EXT4_ERROR_INODE(inode, "bad extent address "
+ "iblock: %d, depth: %d pblock %lld",
+- iblock, depth, path[depth].p_block);
++ map->m_lblk, depth, path[depth].p_block);
+ err = -EIO;
+ goto out2;
+ }
+@@ -3854,11 +3840,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ */
+ ee_len = ext4_ext_get_actual_len(ex);
+ /* if found extent covers block, simply return it */
+- if (in_range(iblock, ee_block, ee_len)) {
+- newblock = iblock - ee_block + ee_start;
++ if (in_range(map->m_lblk, ee_block, ee_len)) {
++ newblock = map->m_lblk - ee_block + ee_start;
+ /* number of remaining blocks in the extent */
+- allocated = ee_len - (iblock - ee_block);
+- ext_debug("%u fit into %u:%d -> %llu\n", iblock,
++ allocated = ee_len - (map->m_lblk - ee_block);
++ ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
+ ee_block, ee_len, newblock);
+
+ /*
+@@ -3870,9 +3856,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ ee_len, ee_start);
+ goto out;
+ }
+- ret = ext4_ext_handle_uninitialized_extents(
+- handle, inode, iblock, max_blocks, path,
+- flags, allocated, bh_result, newblock);
++ ret = ext4_ext_handle_uninitialized_extents(handle,
++ inode, map, path, flags, allocated,
++ newblock);
+ return ret;
+ }
+ }
+@@ -3886,7 +3872,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ * put just found gap into cache to speed up
+ * subsequent requests
+ */
+- ext4_ext_put_gap_in_cache(inode, path, iblock);
++ ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
+ goto out2;
+ }
+ /*
+@@ -3894,11 +3880,11 @@ int ext4_ext_get_blocks(handle_t *handle
+ */
+
+ /* find neighbour allocated blocks */
+- ar.lleft = iblock;
++ ar.lleft = map->m_lblk;
+ err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
+ if (err)
+ goto out2;
+- ar.lright = iblock;
++ ar.lright = map->m_lblk;
+ err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
+ if (err)
+ goto out2;
+@@ -3909,26 +3895,26 @@ int ext4_ext_get_blocks(handle_t *handle
+ * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
+ * EXT_UNINIT_MAX_LEN.
+ */
+- if (max_blocks > EXT_INIT_MAX_LEN &&
++ if (map->m_len > EXT_INIT_MAX_LEN &&
+ !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+- max_blocks = EXT_INIT_MAX_LEN;
+- else if (max_blocks > EXT_UNINIT_MAX_LEN &&
++ map->m_len = EXT_INIT_MAX_LEN;
++ else if (map->m_len > EXT_UNINIT_MAX_LEN &&
+ (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
+- max_blocks = EXT_UNINIT_MAX_LEN;
++ map->m_len = EXT_UNINIT_MAX_LEN;
+
+- /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
+- newex.ee_block = cpu_to_le32(iblock);
+- newex.ee_len = cpu_to_le16(max_blocks);
++ /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
++ newex.ee_block = cpu_to_le32(map->m_lblk);
++ newex.ee_len = cpu_to_le16(map->m_len);
+ err = ext4_ext_check_overlap(inode, &newex, path);
+ if (err)
+ allocated = ext4_ext_get_actual_len(&newex);
+ else
+- allocated = max_blocks;
++ allocated = map->m_len;
+
+ /* allocate new block */
+ ar.inode = inode;
+- ar.goal = ext4_ext_find_goal(inode, path, iblock);
+- ar.logical = iblock;
++ ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
++ ar.logical = map->m_lblk;
+ ar.len = allocated;
+ if (S_ISREG(inode->i_mode))
+ ar.flags = EXT4_MB_HINT_DATA;
+@@ -3967,7 +3953,7 @@ int ext4_ext_get_blocks(handle_t *handle
+ }
+ }
+
+- err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
++ err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
+ if (err)
+ goto out2;
+
+@@ -3987,9 +3973,9 @@ int ext4_ext_get_blocks(handle_t *handle
+ /* previous routine could use block we allocated */
+ newblock = ext4_ext_pblock(&newex);
+ allocated = ext4_ext_get_actual_len(&newex);
+- if (allocated > max_blocks)
+- allocated = max_blocks;
+- set_buffer_new(bh_result);
++ if (allocated > map->m_len)
++ allocated = map->m_len;
++ map->m_flags |= EXT4_MAP_NEW;
+
+ /*
+ * Update reserved blocks/metadata blocks after successful
+@@ -4003,17 +3989,17 @@ int ext4_ext_get_blocks(handle_t *handle
+ * when it is _not_ an uninitialized extent.
+ */
+ if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
+- ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
++ ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ } else
+ ext4_update_inode_fsync_trans(handle, inode, 0);
+ out:
+- if (allocated > max_blocks)
+- allocated = max_blocks;
++ if (allocated > map->m_len)
++ allocated = map->m_len;
+ ext4_ext_show_leaf(inode, path);
+- set_buffer_mapped(bh_result);
+- bh_result->b_bdev = inode->i_sb->s_bdev;
+- bh_result->b_blocknr = newblock;
++ map->m_flags |= EXT4_MAP_MAPPED;
++ map->m_pblk = newblock;
++ map->m_len = allocated;
+ out2:
+ if (path) {
+ ext4_ext_drop_refs(path);
+@@ -4196,7 +4182,7 @@ retry:
+ if (ret <= 0) {
+ #ifdef EXT4FS_DEBUG
+ WARN_ON(ret <= 0);
+- printk(KERN_ERR "%s: ext4_ext_get_blocks "
++ printk(KERN_ERR "%s: ext4_ext_map_blocks "
+ "returned error inode#%lu, block=%u, "
+ "max_blocks=%u", __func__,
+ inode->i_ino, block, max_blocks);
+@@ -4709,6 +4695,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
+ EXPORT_SYMBOL(ext4_mb_new_blocks);
+ EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
+ EXPORT_SYMBOL(ext4_mark_inode_dirty);
+-EXPORT_SYMBOL(ext4_ext_walk_space);
+ EXPORT_SYMBOL(ext4_ext_find_extent);
+ EXPORT_SYMBOL(ext4_ext_drop_refs);
+Index: linux-stage/fs/ext4/inode.c
+===================================================================
+--- linux-stage.orig/fs/ext4/inode.c 2016-07-15 09:52:28.000000000 +0300
++++ linux-stage/fs/ext4/inode.c 2016-07-15 09:52:29.000000000 +0300
+@@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
+ int ret;
+
+ /*
+- * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
++ * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
+ * moment, get_block can be called only for blocks inside i_size since
+ * page cache has been already dropped and writes are blocked by
+ * i_mutex. So we can safely drop the i_data_sem here.
+@@ -970,9 +970,9 @@ err_out:
+ }
+
+ /*
+- * The ext4_ind_get_blocks() function handles non-extents inodes
++ * The ext4_ind_map_blocks() function handles non-extents inodes
+ * (i.e., using the traditional indirect/double-indirect i_blocks
+- * scheme) for ext4_get_blocks().
++ * scheme) for ext4_map_blocks().
+ *
+ * Allocation strategy is simple: if we have to allocate something, we will
+ * have to go the whole way to leaf. So let's do it before attaching anything
+@@ -991,15 +991,14 @@ err_out:
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
+ *
+- * The ext4_ind_get_blocks() function should be called with
++ * The ext4_ind_map_blocks() function should be called with
+ * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+ * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+ * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+ * blocks.
+ */
+-static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
+- ext4_lblk_t iblock, unsigned int maxblocks,
+- struct buffer_head *bh_result,
++static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map,
+ int flags)
+ {
+ int err = -EIO;
+@@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t
+
+ J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
+ J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
+- depth = ext4_block_to_path(inode, iblock, offsets,
++ depth = ext4_block_to_path(inode, map->m_lblk, offsets,
+ &blocks_to_boundary);
+
+ if (depth == 0)
+@@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t
+ /* Simplest case - block found, no allocation needed */
+ if (!partial) {
+ first_block = le32_to_cpu(chain[depth - 1].key);
+- clear_buffer_new(bh_result);
+ count++;
+ /*map more blocks*/
+- while (count < maxblocks && count <= blocks_to_boundary) {
++ while (count < map->m_len && count <= blocks_to_boundary) {
+ ext4_fsblk_t blk;
+
+ blk = le32_to_cpu(*(chain[depth-1].p + count));
+@@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t
+ /*
+ * Okay, we need to do block allocation.
+ */
+- goal = ext4_find_goal(inode, iblock, partial);
++ goal = ext4_find_goal(inode, map->m_lblk, partial);
+
+ /* the number of blocks need to allocate for [d,t]indirect blocks */
+ indirect_blks = (chain + depth) - partial - 1;
+@@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t
+ * direct blocks to allocate for this branch.
+ */
+ count = ext4_blks_to_allocate(partial, indirect_blks,
+- maxblocks, blocks_to_boundary);
++ map->m_len, blocks_to_boundary);
+ /*
+ * Block out ext4_truncate while we alter the tree
+ */
+- err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
++ err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
+ &count, goal,
+ offsets + (partial - chain), partial);
+
+@@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t
+ * may need to return -EAGAIN upwards in the worst case. --sct
+ */
+ if (!err)
+- err = ext4_splice_branch(handle, inode, iblock,
++ err = ext4_splice_branch(handle, inode, map->m_lblk,
+ partial, indirect_blks, count);
+ if (err)
+ goto cleanup;
+
+- set_buffer_new(bh_result);
++ map->m_flags |= EXT4_MAP_NEW;
+
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+ got_it:
+- map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
++ map->m_flags |= EXT4_MAP_MAPPED;
++ map->m_pblk = le32_to_cpu(chain[depth-1].key);
++ map->m_len = count;
+ if (count > blocks_to_boundary)
+- set_buffer_boundary(bh_result);
++ map->m_flags |= EXT4_MAP_BOUNDARY;
+ err = count;
+ /* Clean up and exit */
+ partial = chain + depth - 1; /* the whole chain */
+@@ -1096,7 +1096,6 @@ cleanup:
+ brelse(partial->bh);
+ partial--;
+ }
+- BUFFER_TRACE(bh_result, "returned");
+ out:
+ return err;
+ }
+@@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
+ }
+
+ /*
+- * The ext4_get_blocks() function tries to look up the requested blocks,
++ * The ext4_map_blocks() function tries to look up the requested blocks,
+ * and returns if the blocks are already mapped.
+ *
+ * Otherwise it takes the write lock of the i_data_sem and allocate blocks
+ * and store the allocated blocks in the result buffer head and mark it
+ * mapped.
+ *
+- * If file type is extents based, it will call ext4_ext_get_blocks(),
+- * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
++ * If file type is extents based, it will call ext4_ext_map_blocks(),
++ * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
+ * based files
+ *
+ * On success, it returns the number of blocks being mapped or allocate.
+@@ -1312,35 +1311,31 @@ static pgoff_t ext4_num_dirty_pages(stru
+ *
+ * It returns the error in case of allocation failure.
+ */
+-int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
+- unsigned int max_blocks, struct buffer_head *bh,
+- int flags)
++int ext4_map_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map, int flags)
+ {
+ int retval;
+
+- clear_buffer_mapped(bh);
+- clear_buffer_unwritten(bh);
++ map->m_flags = 0;
++ ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
++ "logical block %lu\n", inode->i_ino, flags, map->m_len,
++ (unsigned long) map->m_lblk);
+
+- ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+- "logical block %lu\n", inode->i_ino, flags, max_blocks,
+- (unsigned long)block);
+ /*
+ * Try to see if we can get the block without requesting a new
+ * file system block.
+ */
+ down_read((&EXT4_I(inode)->i_data_sem));
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
+- bh, 0);
++ retval = ext4_ext_map_blocks(handle, inode, map, 0);
+ } else {
+- retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
+- bh, 0);
++ retval = ext4_ind_map_blocks(handle, inode, map, 0);
+ }
+ up_read((&EXT4_I(inode)->i_data_sem));
+
+- if (retval > 0 && buffer_mapped(bh)) {
++ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ int ret = check_block_validity(inode, "file system corruption",
+- block, bh->b_blocknr, retval);
++ map->m_lblk, map->m_pblk, retval);
+ if (ret != 0)
+ return ret;
+ }
+@@ -1356,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
+ * ext4_ext_get_block() returns th create = 0
+ * with buffer head unmapped.
+ */
+- if (retval > 0 && buffer_mapped(bh))
++ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
+ return retval;
+
+ /*
+@@ -1369,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
+ * of BH_Unwritten and BH_Mapped flags being simultaneously
+ * set on the buffer_head.
+ */
+- clear_buffer_unwritten(bh);
++ map->m_flags &= ~EXT4_MAP_UNWRITTEN;
+
+ /*
+ * New blocks allocate and/or writing to uninitialized extent
+@@ -1392,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
+ * could have changed the inode type in between
+ */
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
+- retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
+- bh, flags);
++ retval = ext4_ext_map_blocks(handle, inode, map, flags);
+ } else {
+- retval = ext4_ind_get_blocks(handle, inode, block,
+- max_blocks, bh, flags);
++ retval = ext4_ind_map_blocks(handle, inode, map, flags);
+
+- if (retval > 0 && buffer_new(bh)) {
++ if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
+ /*
+ * We allocated new blocks which will result in
+ * i_data's format changing. Force the migrate
+@@ -1421,15 +1414,38 @@ int ext4_get_blocks(handle_t *handle, st
+ EXT4_I(inode)->i_delalloc_reserved_flag = 0;
+
+ up_write((&EXT4_I(inode)->i_data_sem));
+- if (retval > 0 && buffer_mapped(bh)) {
++ if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
+ int ret = check_block_validity(inode, "file system "
+ "corruption after allocation",
+- block, bh->b_blocknr, retval);
++ map->m_lblk, map->m_pblk,
++ retval);
+ if (ret != 0)
+ return ret;
+ }
+ return retval;
+ }
++EXPORT_SYMBOL(ext4_map_blocks);
++
++int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
++ unsigned int max_blocks, struct buffer_head *bh,
++ int flags)
++{
++ struct ext4_map_blocks map;
++ int ret;
++
++ map.m_lblk = block;
++ map.m_len = max_blocks;
++
++ ret = ext4_map_blocks(handle, inode, &map, flags);
++ if (ret < 0)
++ return ret;
++
++ bh->b_blocknr = map.m_pblk;
++ bh->b_size = inode->i_sb->s_blocksize * map.m_len;
++ bh->b_bdev = inode->i_sb->s_bdev;
++ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
++ return ret;
++}
+
+ /* Maximum number of blocks we map for direct IO at once. */
+ #define DIO_MAX_BLOCKS 4096