Whamcloud - gitweb
LU-8410 ldiskfs: new FIEMAP API
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / rhel6.5 / ext4-add-new-abstraction-ext4_map_blocks.patch
1 From: Theodore Ts'o <tytso@mit.edu>
2
3 From e35fd6609b2fee54484d520deccb8f18bf7d38f3 Mon Sep 17 00:00:00 2001
4
5
6 Subject: [PATCH] ext4: Add new abstraction ext4_map_blocks() underneath
7  ext4_get_blocks()
8
9 Jack up ext4_get_blocks() and add a new function, ext4_map_blocks()
10 which uses a much smaller structure, struct ext4_map_blocks which is
11 20 bytes, as opposed to a struct buffer_head, which nearly 5 times
12 bigger on an x86_64 machine.  By switching things to use
13 ext4_map_blocks(), we can save stack space by using ext4_map_blocks()
14 since we can avoid allocating a struct buffer_head on the stack.
15
16 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
17 Index: linux-stage/fs/ext4/ext4.h
18 ===================================================================
19 --- linux-stage.orig/fs/ext4/ext4.h     2016-07-15 09:52:28.000000000 +0300
20 +++ linux-stage/fs/ext4/ext4.h  2016-07-15 09:52:29.000000000 +0300
21 @@ -142,10 +142,8 @@ struct ext4_allocation_request {
22  #define EXT4_MAP_MAPPED                (1 << BH_Mapped)
23  #define EXT4_MAP_UNWRITTEN     (1 << BH_Unwritten)
24  #define EXT4_MAP_BOUNDARY      (1 << BH_Boundary)
25 -#define EXT4_MAP_UNINIT                (1 << BH_Uninit)
26  #define EXT4_MAP_FLAGS         (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
27 -                                EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
28 -                                EXT4_MAP_UNINIT)
29 +                                EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
30  
31  struct ext4_map_blocks {
32         ext4_fsblk_t m_pblk;
33 @@ -2184,9 +2182,9 @@ extern int ext4_ext_tree_init(handle_t *
34  extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
35  extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
36                                        int chunk);
37 -extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
38 -                              ext4_lblk_t iblock, unsigned int max_blocks,
39 -                              struct buffer_head *bh_result, int flags);
40 +#define HAVE_EXT4_MAP_BLOCKS
41 +extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
42 +                              struct ext4_map_blocks *map, int flags);
43  extern void ext4_ext_truncate(struct inode *);
44  extern int ext4_ext_punch_hole(struct inode *inode, loff_t offset,
45                                 loff_t length);
46 @@ -2196,6 +2194,8 @@ extern long ext4_fallocate(struct inode 
47                           loff_t len);
48  extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
49                           ssize_t len);
50 +extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
51 +                          struct ext4_map_blocks *map, int flags);
52  extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
53                            sector_t block, unsigned int max_blocks,
54                            struct buffer_head *bh, int flags);
55 Index: linux-stage/fs/ext4/extents.c
56 ===================================================================
57 --- linux-stage.orig/fs/ext4/extents.c  2016-07-15 09:52:28.000000000 +0300
58 +++ linux-stage/fs/ext4/extents.c       2016-07-15 09:53:10.000000000 +0300
59 @@ -2960,7 +2960,7 @@ fix_extent_len:
60  
61  #define EXT4_EXT_ZERO_LEN 7
62  /*
63 - * This function is called by ext4_ext_get_blocks() if someone tries to write
64 + * This function is called by ext4_ext_map_blocks() if someone tries to write
65   * to an uninitialized extent. It may result in splitting the uninitialized
66   * extent into multiple extents (upto three - one initialized and two
67   * uninitialized).
68 @@ -2970,11 +2970,10 @@ fix_extent_len:
69   *   c> Splits in three extents: Somone is writing in middle of the extent
70   */
71  static int ext4_ext_convert_to_initialized(handle_t *handle,
72 -                                               struct inode *inode,
73 -                                               struct ext4_ext_path *path,
74 -                                               ext4_lblk_t iblock,
75 -                                               unsigned int max_blocks,
76 -                                               int flags)
77 +                                          struct inode *inode,
78 +                                          struct ext4_map_blocks *map,
79 +                                          struct ext4_ext_path *path,
80 +                                          int flags)
81  {
82         struct ext4_extent *ex, newex, orig_ex;
83         struct ext4_extent *ex1 = NULL;
84 @@ -2990,20 +2989,20 @@ static int ext4_ext_convert_to_initializ
85  
86         ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
87                 "block %llu, max_blocks %u\n", inode->i_ino,
88 -               (unsigned long long)iblock, max_blocks);
89 +               (unsigned long long)map->m_lblk, map->m_len);
90  
91         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
92                 inode->i_sb->s_blocksize_bits;
93 -       if (eof_block < iblock + max_blocks)
94 -               eof_block = iblock + max_blocks;
95 +       if (eof_block < map->m_lblk + map->m_len)
96 +               eof_block = map->m_lblk + map->m_len;
97  
98         depth = ext_depth(inode);
99         eh = path[depth].p_hdr;
100         ex = path[depth].p_ext;
101         ee_block = le32_to_cpu(ex->ee_block);
102         ee_len = ext4_ext_get_actual_len(ex);
103 -       allocated = ee_len - (iblock - ee_block);
104 -       newblock = iblock - ee_block + ext4_ext_pblock(ex);
105 +       allocated = ee_len - (map->m_lblk - ee_block);
106 +       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
107  
108         ex2 = ex;
109         orig_ex.ee_block = ex->ee_block;
110 @@ -3033,10 +3032,10 @@ static int ext4_ext_convert_to_initializ
111                 return allocated;
112         }
113  
114 -       /* ex1: ee_block to iblock - 1 : uninitialized */
115 -       if (iblock > ee_block) {
116 +       /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
117 +       if (map->m_lblk > ee_block) {
118                 ex1 = ex;
119 -               ex1->ee_len = cpu_to_le16(iblock - ee_block);
120 +               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
121                 ext4_ext_mark_uninitialized(ex1);
122                 ext4_ext_dirty(handle, inode, path + depth);
123                 ex2 = &newex;
124 @@ -3046,15 +3045,15 @@ static int ext4_ext_convert_to_initializ
125          * we insert ex3, if ex1 is NULL. This is to avoid temporary
126          * overlap of blocks.
127          */
128 -       if (!ex1 && allocated > max_blocks)
129 -               ex2->ee_len = cpu_to_le16(max_blocks);
130 +       if (!ex1 && allocated > map->m_len)
131 +               ex2->ee_len = cpu_to_le16(map->m_len);
132         /* ex3: to ee_block + ee_len : uninitialised */
133 -       if (allocated > max_blocks) {
134 +       if (allocated > map->m_len) {
135                 unsigned int newdepth;
136                 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
137                 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
138                         /*
139 -                        * iblock == ee_block is handled by the zerouout
140 +                        * map->m_lblk == ee_block is handled by the zerouout
141                          * at the beginning.
142                          * Mark first half uninitialized.
143                          * Mark second half initialized and zero out the
144 @@ -3067,7 +3066,7 @@ static int ext4_ext_convert_to_initializ
145                         ext4_ext_dirty(handle, inode, path + depth);
146  
147                         ex3 = &newex;
148 -                       ex3->ee_block = cpu_to_le32(iblock);
149 +                       ex3->ee_block = cpu_to_le32(map->m_lblk);
150                         ext4_ext_store_pblock(ex3, newblock);
151                         ex3->ee_len = cpu_to_le16(allocated);
152                         err = ext4_ext_insert_extent(handle, inode, path,
153 @@ -3081,7 +3080,7 @@ static int ext4_ext_convert_to_initializ
154                                 ext4_ext_store_pblock(ex,
155                                         ext4_ext_pblock(&orig_ex));
156                                 ext4_ext_dirty(handle, inode, path + depth);
157 -                               /* blocks available from iblock */
158 +                               /* blocks available from map->m_lblk */
159                                 return allocated;
160  
161                         } else if (err)
162 @@ -3103,8 +3102,8 @@ static int ext4_ext_convert_to_initializ
163                                  */
164                                 depth = ext_depth(inode);
165                                 ext4_ext_drop_refs(path);
166 -                               path = ext4_ext_find_extent(inode,
167 -                                                               iblock, path);
168 +                               path = ext4_ext_find_extent(inode, map->m_lblk,
169 +                                                           path);
170                                 if (IS_ERR(path)) {
171                                         err = PTR_ERR(path);
172                                         return err;
173 @@ -3124,9 +3123,9 @@ static int ext4_ext_convert_to_initializ
174                         return allocated;
175                 }
176                 ex3 = &newex;
177 -               ex3->ee_block = cpu_to_le32(iblock + max_blocks);
178 -               ext4_ext_store_pblock(ex3, newblock + max_blocks);
179 -               ex3->ee_len = cpu_to_le16(allocated - max_blocks);
180 +               ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
181 +               ext4_ext_store_pblock(ex3, newblock + map->m_len);
182 +               ex3->ee_len = cpu_to_le16(allocated - map->m_len);
183                 ext4_ext_mark_uninitialized(ex3);
184                 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
185                 if (err == -ENOSPC && may_zeroout) {
186 @@ -3139,7 +3138,7 @@ static int ext4_ext_convert_to_initializ
187                         ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
188                         ext4_ext_dirty(handle, inode, path + depth);
189                         /* zeroed the full extent */
190 -                       /* blocks available from iblock */
191 +                       /* blocks available from map->m_lblk */
192                         return allocated;
193  
194                 } else if (err)
195 @@ -3159,7 +3158,7 @@ static int ext4_ext_convert_to_initializ
196  
197                 depth = newdepth;
198                 ext4_ext_drop_refs(path);
199 -               path = ext4_ext_find_extent(inode, iblock, path);
200 +               path = ext4_ext_find_extent(inode, map->m_lblk, path);
201                 if (IS_ERR(path)) {
202                         err = PTR_ERR(path);
203                         goto out;
204 @@ -3173,14 +3172,14 @@ static int ext4_ext_convert_to_initializ
205                 if (err)
206                         goto out;
207  
208 -               allocated = max_blocks;
209 +               allocated = map->m_len;
210  
211                 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
212                  * to insert a extent in the middle zerout directly
213                  * otherwise give the extent a chance to merge to left
214                  */
215                 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
216 -                       iblock != ee_block && may_zeroout) {
217 +                       map->m_lblk != ee_block && may_zeroout) {
218                         err =  ext4_ext_zeroout(inode, &orig_ex);
219                         if (err)
220                                 goto fix_extent_len;
221 @@ -3190,7 +3189,7 @@ static int ext4_ext_convert_to_initializ
222                         ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
223                         ext4_ext_dirty(handle, inode, path + depth);
224                         /* zero out the first half */
225 -                       /* blocks available from iblock */
226 +                       /* blocks available from map->m_lblk */
227                         return allocated;
228                 }
229         }
230 @@ -3201,13 +3200,13 @@ static int ext4_ext_convert_to_initializ
231          */
232         if (ex1 && ex1 != ex) {
233                 ex1 = ex;
234 -               ex1->ee_len = cpu_to_le16(iblock - ee_block);
235 +               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
236                 ext4_ext_mark_uninitialized(ex1);
237                 ext4_ext_dirty(handle, inode, path + depth);
238                 ex2 = &newex;
239         }
240 -       /* ex2: iblock to iblock + maxblocks-1 : initialised */
241 -       ex2->ee_block = cpu_to_le32(iblock);
242 +       /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */
243 +       ex2->ee_block = cpu_to_le32(map->m_lblk);
244         ext4_ext_store_pblock(ex2, newblock);
245         ex2->ee_len = cpu_to_le16(allocated);
246         if (ex2 != ex)
247 @@ -3277,7 +3276,7 @@ fix_extent_len:
248  }
249  
250  /*
251 - * This function is called by ext4_ext_get_blocks() from
252 + * This function is called by ext4_ext_map_blocks() from
253   * ext4_get_blocks_dio_write() when DIO to write
254   * to an uninitialized extent.
255   *
256 @@ -3300,9 +3299,8 @@ fix_extent_len:
257   */
258  static int ext4_split_unwritten_extents(handle_t *handle,
259                                         struct inode *inode,
260 +                                       struct ext4_map_blocks *map,
261                                         struct ext4_ext_path *path,
262 -                                       ext4_lblk_t iblock,
263 -                                       unsigned int max_blocks,
264                                         int flags)
265  {
266         struct ext4_extent *ex, newex, orig_ex;
267 @@ -3318,20 +3316,20 @@ static int ext4_split_unwritten_extents(
268  
269         ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
270                 "block %llu, max_blocks %u\n", inode->i_ino,
271 -               (unsigned long long)iblock, max_blocks);
272 +               (unsigned long long)map->m_lblk, map->m_len);
273  
274         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
275                 inode->i_sb->s_blocksize_bits;
276 -       if (eof_block < iblock + max_blocks)
277 -               eof_block = iblock + max_blocks;
278 +       if (eof_block < map->m_lblk + map->m_len)
279 +               eof_block = map->m_lblk + map->m_len;
280  
281         depth = ext_depth(inode);
282         eh = path[depth].p_hdr;
283         ex = path[depth].p_ext;
284         ee_block = le32_to_cpu(ex->ee_block);
285         ee_len = ext4_ext_get_actual_len(ex);
286 -       allocated = ee_len - (iblock - ee_block);
287 -       newblock = iblock - ee_block + ext4_ext_pblock(ex);
288 +       allocated = ee_len - (map->m_lblk - ee_block);
289 +       newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex);
290  
291         ex2 = ex;
292         orig_ex.ee_block = ex->ee_block;
293 @@ -3349,16 +3347,16 @@ static int ext4_split_unwritten_extents(
294          * block where the write begins, and the write completely
295          * covers the extent, then we don't need to split it.
296          */
297 -       if ((iblock == ee_block) && (allocated <= max_blocks))
298 +       if ((map->m_lblk == ee_block) && (allocated <= map->m_len))
299                 return allocated;
300  
301         err = ext4_ext_get_access(handle, inode, path + depth);
302         if (err)
303                 goto out;
304 -       /* ex1: ee_block to iblock - 1 : uninitialized */
305 -       if (iblock > ee_block) {
306 +       /* ex1: ee_block to map->m_lblk - 1 : uninitialized */
307 +       if (map->m_lblk > ee_block) {
308                 ex1 = ex;
309 -               ex1->ee_len = cpu_to_le16(iblock - ee_block);
310 +               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
311                 ext4_ext_mark_uninitialized(ex1);
312                 ext4_ext_dirty(handle, inode, path + depth);
313                 ex2 = &newex;
314 @@ -3368,15 +3366,15 @@ static int ext4_split_unwritten_extents(
315          * we insert ex3, if ex1 is NULL. This is to avoid temporary
316          * overlap of blocks.
317          */
318 -       if (!ex1 && allocated > max_blocks)
319 -               ex2->ee_len = cpu_to_le16(max_blocks);
320 +       if (!ex1 && allocated > map->m_len)
321 +               ex2->ee_len = cpu_to_le16(map->m_len);
322         /* ex3: to ee_block + ee_len : uninitialised */
323 -       if (allocated > max_blocks) {
324 +       if (allocated > map->m_len) {
325                 unsigned int newdepth;
326                 ex3 = &newex;
327 -               ex3->ee_block = cpu_to_le32(iblock + max_blocks);
328 -               ext4_ext_store_pblock(ex3, newblock + max_blocks);
329 -               ex3->ee_len = cpu_to_le16(allocated - max_blocks);
330 +               ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len);
331 +               ext4_ext_store_pblock(ex3, newblock + map->m_len);
332 +               ex3->ee_len = cpu_to_le16(allocated - map->m_len);
333                 ext4_ext_mark_uninitialized(ex3);
334                 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
335                 if (err == -ENOSPC && may_zeroout) {
336 @@ -3400,8 +3398,8 @@ static int ext4_split_unwritten_extents(
337                                 err =  ext4_ext_zeroout(inode, ex3);
338                                 if (err)
339                                         goto fix_extent_len;
340 -                               max_blocks = allocated;
341 -                               ex2->ee_len = cpu_to_le16(max_blocks);
342 +                               map->m_len = allocated;
343 +                               ex2->ee_len = cpu_to_le16(map->m_len);
344                                 goto skip;
345                         }
346                         err =  ext4_ext_zeroout(inode, &orig_ex);
347 @@ -3413,7 +3411,7 @@ static int ext4_split_unwritten_extents(
348                         ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex));
349                         ext4_ext_dirty(handle, inode, path + depth);
350                         /* zeroed the full extent */
351 -                       /* blocks available from iblock */
352 +                       /* blocks available from map->m_lblk */
353                         return allocated;
354  
355                 } else if (err)
356 @@ -3433,7 +3431,7 @@ static int ext4_split_unwritten_extents(
357  
358                 depth = newdepth;
359                 ext4_ext_drop_refs(path);
360 -               path = ext4_ext_find_extent(inode, iblock, path);
361 +               path = ext4_ext_find_extent(inode, map->m_lblk, path);
362                 if (IS_ERR(path)) {
363                         err = PTR_ERR(path);
364                         goto out;
365 @@ -3446,8 +3444,7 @@ static int ext4_split_unwritten_extents(
366                 err = ext4_ext_get_access(handle, inode, path + depth);
367                 if (err)
368                         goto out;
369 -
370 -               allocated = max_blocks;
371 +               allocated = map->m_len;
372         }
373  skip:
374         /*
375 @@ -3457,16 +3454,16 @@ skip:
376          */
377         if (ex1 && ex1 != ex) {
378                 ex1 = ex;
379 -               ex1->ee_len = cpu_to_le16(iblock - ee_block);
380 +               ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block);
381                 ext4_ext_mark_uninitialized(ex1);
382                 ext4_ext_dirty(handle, inode, path + depth);
383                 ex2 = &newex;
384         }
385         /*
386 -        * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
387 -        * uninitialised still.
388 +        * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written
389 +        * using direct I/O, uninitialised still.
390          */
391 -       ex2->ee_block = cpu_to_le32(iblock);
392 +       ex2->ee_block = cpu_to_le32(map->m_lblk);
393         ext4_ext_store_pblock(ex2, newblock);
394         ex2->ee_len = cpu_to_le16(allocated);
395         ext4_ext_mark_uninitialized(ex2);
396 @@ -3506,8 +3503,7 @@ fix_extent_len:
397  
398  static int ext4_convert_unwritten_extents_dio(handle_t *handle,
399                                               struct inode *inode,
400 -                                             ext4_lblk_t iblock,
401 -                                             unsigned int max_blocks,
402 +                                             struct ext4_map_blocks *map,
403                                               struct ext4_ext_path *path)
404  {
405         struct ext4_extent *ex;
406 @@ -3529,14 +3525,13 @@ static int ext4_convert_unwritten_extent
407  
408         /* If extent is larger than requested then split is required */
409  
410 -       if (ee_block != iblock || ee_len > max_blocks) {
411 -               err = ext4_split_unwritten_extents(handle, inode, path,
412 -                                       iblock, max_blocks,
413 +       if (ee_block != map->m_lblk || ee_len > map->m_len) {
414 +               err = ext4_split_unwritten_extents(handle, inode, map, path,
415                                         EXT4_EXT_DATA_VALID);
416                 if (err < 0)
417                         goto out;
418                 ext4_ext_drop_refs(path);
419 -               path = ext4_ext_find_extent(inode, iblock, path);
420 +               path = ext4_ext_find_extent(inode, map->m_lblk, path);
421                 if (IS_ERR(path)) {
422                         err = PTR_ERR(path);
423                         goto out;
424 @@ -3627,10 +3622,9 @@ out:
425  
426  static int
427  ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
428 -                       ext4_lblk_t iblock, unsigned int max_blocks,
429 +                       struct ext4_map_blocks *map,
430                         struct ext4_ext_path *path, int flags,
431 -                       unsigned int allocated, struct buffer_head *bh_result,
432 -                       ext4_fsblk_t newblock)
433 +                       unsigned int allocated, ext4_fsblk_t newblock)
434  {
435         int ret = 0;
436         int err = 0;
437 @@ -3638,7 +3632,7 @@ ext4_ext_handle_uninitialized_extents(ha
438  
439         ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
440                   "block %llu, max_blocks %u, flags %d, allocated %u",
441 -                 inode->i_ino, (unsigned long long)iblock, max_blocks,
442 +                 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
443                   flags, allocated);
444         ext4_ext_show_leaf(inode, path);
445  
446 @@ -3651,9 +3645,8 @@ ext4_ext_handle_uninitialized_extents(ha
447         /* DIO get_block() before submit the IO, split the extent */
448         if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
449             EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
450 -               ret = ext4_split_unwritten_extents(handle,
451 -                                               inode, path, iblock,
452 -                                               max_blocks, flags);
453 +               ret = ext4_split_unwritten_extents(handle, inode, map,
454 +                                                  path, flags);
455                 /*
456                  * Flag the inode(non aio case) or end_io struct (aio case)
457                  * that this IO needs to convertion to written when IO is
458 @@ -3670,12 +3663,11 @@ ext4_ext_handle_uninitialized_extents(ha
459         if ((flags & ~EXT4_GET_BLOCKS_METADATA_NOFAIL) ==
460             EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
461                 ret = ext4_convert_unwritten_extents_dio(handle, inode,
462 -                                                        iblock, max_blocks,
463 -                                                        path);
464 +                                                        map, path);
465                 if (ret >= 0) {
466                         ext4_update_inode_fsync_trans(handle, inode, 1);
467 -                       err = check_eofblocks_fl(handle, inode, iblock, path,
468 -                                                max_blocks);
469 +                       err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
470 +                                                map->m_len);
471                 } else
472                         err = ret;
473                 goto out2;
474 @@ -3697,18 +3689,15 @@ ext4_ext_handle_uninitialized_extents(ha
475                  * the buffer head will be unmapped so that
476                  * a read from the block returns 0s.
477                  */
478 -               set_buffer_unwritten(bh_result);
479 +               map->m_flags |= EXT4_MAP_UNWRITTEN;
480                 goto out1;
481         }
482  
483         /* buffered write, writepage time, convert*/
484 -       ret = ext4_ext_convert_to_initialized(handle, inode,
485 -                                               path, iblock,
486 -                                               max_blocks,
487 -                                               flags);
488 +       ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
489         if (ret >= 0) {
490                 ext4_update_inode_fsync_trans(handle, inode, 1);
491 -               err = check_eofblocks_fl(handle, inode, iblock, path, max_blocks);
492 +               err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
493                 if (err < 0)
494                         goto out2;
495         }
496 @@ -3718,7 +3707,7 @@ out:
497                 goto out2;
498         } else
499                 allocated = ret;
500 -       set_buffer_new(bh_result);
501 +       map->m_flags |= EXT4_MAP_NEW;
502         /*
503          * if we allocated more blocks than requested
504          * we need to make sure we unmap the extra block
505 @@ -3726,11 +3715,11 @@ out:
506          * unmapped later when we find the buffer_head marked
507          * new.
508          */
509 -       if (allocated > max_blocks) {
510 +       if (allocated > map->m_len) {
511                 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
512 -                                       newblock + max_blocks,
513 -                                       allocated - max_blocks);
514 -               allocated = max_blocks;
515 +                                       newblock + map->m_len,
516 +                                       allocated - map->m_len);
517 +               allocated = map->m_len;
518         }
519  
520         /*
521 @@ -3744,13 +3733,13 @@ out:
522                 ext4_da_update_reserve_space(inode, allocated, 0);
523  
524  map_out:
525 -       set_buffer_mapped(bh_result);
526 +       map->m_flags |= EXT4_MAP_MAPPED;
527  out1:
528 -       if (allocated > max_blocks)
529 -               allocated = max_blocks;
530 +       if (allocated > map->m_len)
531 +               allocated = map->m_len;
532         ext4_ext_show_leaf(inode, path);
533 -       bh_result->b_bdev = inode->i_sb->s_bdev;
534 -       bh_result->b_blocknr = newblock;
535 +       map->m_pblk = newblock;
536 +       map->m_len = allocated;
537  out2:
538         if (path) {
539                 ext4_ext_drop_refs(path);
540 @@ -3777,10 +3766,8 @@ out2:
541   *
542   * return < 0, error case.
543   */
544 -int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
545 -                       ext4_lblk_t iblock,
546 -                       unsigned int max_blocks, struct buffer_head *bh_result,
547 -                       int flags)
548 +int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
549 +                       struct ext4_map_blocks *map, int flags)
550  {
551         struct ext4_ext_path *path = NULL;
552         struct ext4_extent_header *eh;
553 @@ -3791,12 +3778,11 @@ int ext4_ext_get_blocks(handle_t *handle
554         struct ext4_allocation_request ar;
555         ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
556  
557 -       __clear_bit(BH_New, &bh_result->b_state);
558         ext_debug("blocks %u/%u requested for inode %lu\n",
559 -                       iblock, max_blocks, inode->i_ino);
560 +                 map->m_lblk, map->m_len, inode->i_ino);
561  
562         /* check in cache */
563 -       if (ext4_ext_in_cache(inode, iblock, &newex)) {
564 +       if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
565                 if (!newex.ee_start_lo && !newex.ee_start_hi) {
566                         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
567                                 /*
568 @@ -3808,18 +3794,18 @@ int ext4_ext_get_blocks(handle_t *handle
569                         /* we should allocate requested block */
570                 } else {
571                         /* block is already allocated */
572 -                       newblock = iblock
573 +                       newblock = map->m_lblk
574                                    - le32_to_cpu(newex.ee_block)
575                                    + ext4_ext_pblock(&newex);
576                         /* number of remaining blocks in the extent */
577                         allocated = ext4_ext_get_actual_len(&newex) -
578 -                                       (iblock - le32_to_cpu(newex.ee_block));
579 +                                       (map->m_lblk - le32_to_cpu(newex.ee_block));
580                         goto out;
581                 }
582         }
583  
584         /* find extent for this block */
585 -       path = ext4_ext_find_extent(inode, iblock, NULL);
586 +       path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
587         if (IS_ERR(path)) {
588                 err = PTR_ERR(path);
589                 path = NULL;
590 @@ -3836,7 +3822,7 @@ int ext4_ext_get_blocks(handle_t *handle
591         if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
592                 EXT4_ERROR_INODE(inode, "bad extent address "
593                                  "iblock: %d, depth: %d pblock %lld",
594 -                                iblock, depth, path[depth].p_block);
595 +                                map->m_lblk, depth, path[depth].p_block);
596                 err = -EIO;
597                 goto out2;
598         }
599 @@ -3854,11 +3840,11 @@ int ext4_ext_get_blocks(handle_t *handle
600                  */
601                 ee_len = ext4_ext_get_actual_len(ex);
602                 /* if found extent covers block, simply return it */
603 -               if (in_range(iblock, ee_block, ee_len)) {
604 -                       newblock = iblock - ee_block + ee_start;
605 +               if (in_range(map->m_lblk, ee_block, ee_len)) {
606 +                       newblock = map->m_lblk - ee_block + ee_start;
607                         /* number of remaining blocks in the extent */
608 -                       allocated = ee_len - (iblock - ee_block);
609 -                       ext_debug("%u fit into %u:%d -> %llu\n", iblock,
610 +                       allocated = ee_len - (map->m_lblk - ee_block);
611 +                       ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
612                                         ee_block, ee_len, newblock);
613  
614                         /*
615 @@ -3870,9 +3856,9 @@ int ext4_ext_get_blocks(handle_t *handle
616                                         ee_len, ee_start);
617                                 goto out;
618                         }
619 -                       ret = ext4_ext_handle_uninitialized_extents(
620 -                               handle, inode, iblock, max_blocks, path,
621 -                               flags, allocated, bh_result, newblock);
622 +                       ret = ext4_ext_handle_uninitialized_extents(handle,
623 +                                       inode, map, path, flags, allocated,
624 +                                       newblock);
625                         return ret;
626                 }
627         }
628 @@ -3886,7 +3872,7 @@ int ext4_ext_get_blocks(handle_t *handle
629                  * put just found gap into cache to speed up
630                  * subsequent requests
631                  */
632 -               ext4_ext_put_gap_in_cache(inode, path, iblock);
633 +               ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
634                 goto out2;
635         }
636         /*
637 @@ -3894,11 +3880,11 @@ int ext4_ext_get_blocks(handle_t *handle
638          */
639  
640         /* find neighbour allocated blocks */
641 -       ar.lleft = iblock;
642 +       ar.lleft = map->m_lblk;
643         err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
644         if (err)
645                 goto out2;
646 -       ar.lright = iblock;
647 +       ar.lright = map->m_lblk;
648         err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
649         if (err)
650                 goto out2;
651 @@ -3909,26 +3895,26 @@ int ext4_ext_get_blocks(handle_t *handle
652          * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
653          * EXT_UNINIT_MAX_LEN.
654          */
655 -       if (max_blocks > EXT_INIT_MAX_LEN &&
656 +       if (map->m_len > EXT_INIT_MAX_LEN &&
657             !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
658 -               max_blocks = EXT_INIT_MAX_LEN;
659 -       else if (max_blocks > EXT_UNINIT_MAX_LEN &&
660 +               map->m_len = EXT_INIT_MAX_LEN;
661 +       else if (map->m_len > EXT_UNINIT_MAX_LEN &&
662                  (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
663 -               max_blocks = EXT_UNINIT_MAX_LEN;
664 +               map->m_len = EXT_UNINIT_MAX_LEN;
665  
666 -       /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
667 -       newex.ee_block = cpu_to_le32(iblock);
668 -       newex.ee_len = cpu_to_le16(max_blocks);
669 +       /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
670 +       newex.ee_block = cpu_to_le32(map->m_lblk);
671 +       newex.ee_len = cpu_to_le16(map->m_len);
672         err = ext4_ext_check_overlap(inode, &newex, path);
673         if (err)
674                 allocated = ext4_ext_get_actual_len(&newex);
675         else
676 -               allocated = max_blocks;
677 +               allocated = map->m_len;
678  
679         /* allocate new block */
680         ar.inode = inode;
681 -       ar.goal = ext4_ext_find_goal(inode, path, iblock);
682 -       ar.logical = iblock;
683 +       ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
684 +       ar.logical = map->m_lblk;
685         ar.len = allocated;
686         if (S_ISREG(inode->i_mode))
687                 ar.flags = EXT4_MB_HINT_DATA;
688 @@ -3967,7 +3953,7 @@ int ext4_ext_get_blocks(handle_t *handle
689                 }
690         }
691  
692 -       err = check_eofblocks_fl(handle, inode, iblock, path, ar.len);
693 +       err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len);
694         if (err)
695                 goto out2;
696  
697 @@ -3987,9 +3973,9 @@ int ext4_ext_get_blocks(handle_t *handle
698         /* previous routine could use block we allocated */
699         newblock = ext4_ext_pblock(&newex);
700         allocated = ext4_ext_get_actual_len(&newex);
701 -       if (allocated > max_blocks)
702 -               allocated = max_blocks;
703 -       set_buffer_new(bh_result);
704 +       if (allocated > map->m_len)
705 +               allocated = map->m_len;
706 +       map->m_flags |= EXT4_MAP_NEW;
707  
708         /*
709          * Update reserved blocks/metadata blocks after successful
710 @@ -4003,17 +3989,17 @@ int ext4_ext_get_blocks(handle_t *handle
711          * when it is _not_ an uninitialized extent.
712          */
713         if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
714 -               ext4_ext_put_in_cache(inode, iblock, allocated, newblock);
715 +               ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock);
716                 ext4_update_inode_fsync_trans(handle, inode, 1);
717         } else
718                 ext4_update_inode_fsync_trans(handle, inode, 0);
719  out:
720 -       if (allocated > max_blocks)
721 -               allocated = max_blocks;
722 +       if (allocated > map->m_len)
723 +               allocated = map->m_len;
724         ext4_ext_show_leaf(inode, path);
725 -       set_buffer_mapped(bh_result);
726 -       bh_result->b_bdev = inode->i_sb->s_bdev;
727 -       bh_result->b_blocknr = newblock;
728 +       map->m_flags |= EXT4_MAP_MAPPED;
729 +       map->m_pblk = newblock;
730 +       map->m_len = allocated;
731  out2:
732         if (path) {
733                 ext4_ext_drop_refs(path);
734 @@ -4196,7 +4182,7 @@ retry:
735                 if (ret <= 0) {
736  #ifdef EXT4FS_DEBUG
737                         WARN_ON(ret <= 0);
738 -                       printk(KERN_ERR "%s: ext4_ext_get_blocks "
739 +                       printk(KERN_ERR "%s: ext4_ext_map_blocks "
740                                     "returned error inode#%lu, block=%u, "
741                                     "max_blocks=%u", __func__,
742                                     inode->i_ino, block, max_blocks);
743 @@ -4709,6 +4695,5 @@ EXPORT_SYMBOL(ext4_ext_insert_extent);
744  EXPORT_SYMBOL(ext4_mb_new_blocks);
745  EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);
746  EXPORT_SYMBOL(ext4_mark_inode_dirty);
747 -EXPORT_SYMBOL(ext4_ext_walk_space);
748  EXPORT_SYMBOL(ext4_ext_find_extent);
749  EXPORT_SYMBOL(ext4_ext_drop_refs);
750 Index: linux-stage/fs/ext4/inode.c
751 ===================================================================
752 --- linux-stage.orig/fs/ext4/inode.c    2016-07-15 09:52:28.000000000 +0300
753 +++ linux-stage/fs/ext4/inode.c 2016-07-15 09:52:29.000000000 +0300
754 @@ -200,7 +200,7 @@ int ext4_truncate_restart_trans(handle_t
755         int ret;
756  
757         /*
758 -        * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
759 +        * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
760          * moment, get_block can be called only for blocks inside i_size since
761          * page cache has been already dropped and writes are blocked by
762          * i_mutex. So we can safely drop the i_data_sem here.
763 @@ -970,9 +970,9 @@ err_out:
764  }
765  
766  /*
767 - * The ext4_ind_get_blocks() function handles non-extents inodes
768 + * The ext4_ind_map_blocks() function handles non-extents inodes
769   * (i.e., using the traditional indirect/double-indirect i_blocks
770 - * scheme) for ext4_get_blocks().
771 + * scheme) for ext4_map_blocks().
772   *
773   * Allocation strategy is simple: if we have to allocate something, we will
774   * have to go the whole way to leaf. So let's do it before attaching anything
775 @@ -991,15 +991,14 @@ err_out:
776   * return = 0, if plain lookup failed.
777   * return < 0, error case.
778   *
779 - * The ext4_ind_get_blocks() function should be called with
780 + * The ext4_ind_map_blocks() function should be called with
781   * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
782   * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
783   * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
784   * blocks.
785   */
786 -static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
787 -                              ext4_lblk_t iblock, unsigned int maxblocks,
788 -                              struct buffer_head *bh_result,
789 +static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
790 +                              struct ext4_map_blocks *map,
791                                int flags)
792  {
793         int err = -EIO;
794 @@ -1015,7 +1014,7 @@ static int ext4_ind_get_blocks(handle_t 
795  
796         J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
797         J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
798 -       depth = ext4_block_to_path(inode, iblock, offsets,
799 +       depth = ext4_block_to_path(inode, map->m_lblk, offsets,
800                                    &blocks_to_boundary);
801  
802         if (depth == 0)
803 @@ -1026,10 +1025,9 @@ static int ext4_ind_get_blocks(handle_t 
804         /* Simplest case - block found, no allocation needed */
805         if (!partial) {
806                 first_block = le32_to_cpu(chain[depth - 1].key);
807 -               clear_buffer_new(bh_result);
808                 count++;
809                 /*map more blocks*/
810 -               while (count < maxblocks && count <= blocks_to_boundary) {
811 +               while (count < map->m_len && count <= blocks_to_boundary) {
812                         ext4_fsblk_t blk;
813  
814                         blk = le32_to_cpu(*(chain[depth-1].p + count));
815 @@ -1049,7 +1047,7 @@ static int ext4_ind_get_blocks(handle_t 
816         /*
817          * Okay, we need to do block allocation.
818         */
819 -       goal = ext4_find_goal(inode, iblock, partial);
820 +       goal = ext4_find_goal(inode, map->m_lblk, partial);
821  
822         /* the number of blocks need to allocate for [d,t]indirect blocks */
823         indirect_blks = (chain + depth) - partial - 1;
824 @@ -1059,11 +1057,11 @@ static int ext4_ind_get_blocks(handle_t 
825          * direct blocks to allocate for this branch.
826          */
827         count = ext4_blks_to_allocate(partial, indirect_blks,
828 -                                       maxblocks, blocks_to_boundary);
829 +                                     map->m_len, blocks_to_boundary);
830         /*
831          * Block out ext4_truncate while we alter the tree
832          */
833 -       err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
834 +       err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks,
835                                 &count, goal,
836                                 offsets + (partial - chain), partial);
837  
838 @@ -1075,18 +1073,20 @@ static int ext4_ind_get_blocks(handle_t 
839          * may need to return -EAGAIN upwards in the worst case.  --sct
840          */
841         if (!err)
842 -               err = ext4_splice_branch(handle, inode, iblock,
843 +               err = ext4_splice_branch(handle, inode, map->m_lblk,
844                                          partial, indirect_blks, count);
845         if (err)
846                 goto cleanup;
847  
848 -       set_buffer_new(bh_result);
849 +       map->m_flags |= EXT4_MAP_NEW;
850  
851         ext4_update_inode_fsync_trans(handle, inode, 1);
852  got_it:
853 -       map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
854 +       map->m_flags |= EXT4_MAP_MAPPED;
855 +       map->m_pblk = le32_to_cpu(chain[depth-1].key);
856 +       map->m_len = count;
857         if (count > blocks_to_boundary)
858 -               set_buffer_boundary(bh_result);
859 +               map->m_flags |= EXT4_MAP_BOUNDARY;
860         err = count;
861         /* Clean up and exit */
862         partial = chain + depth - 1;    /* the whole chain */
863 @@ -1096,7 +1096,6 @@ cleanup:
864                 brelse(partial->bh);
865                 partial--;
866         }
867 -       BUFFER_TRACE(bh_result, "returned");
868  out:
869         return err;
870  }
871 @@ -1291,15 +1290,15 @@ static pgoff_t ext4_num_dirty_pages(stru
872  }
873  
874  /*
875 - * The ext4_get_blocks() function tries to look up the requested blocks,
876 + * The ext4_map_blocks() function tries to look up the requested blocks,
877   * and returns if the blocks are already mapped.
878   *
879   * Otherwise it takes the write lock of the i_data_sem and allocate blocks
880   * and store the allocated blocks in the result buffer head and mark it
881   * mapped.
882   *
883 - * If file type is extents based, it will call ext4_ext_get_blocks(),
884 - * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
885 + * If file type is extents based, it will call ext4_ext_map_blocks(),
886 + * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
887   * based files
888   *
889   * On success, it returns the number of blocks being mapped or allocate.
890 @@ -1312,35 +1311,31 @@ static pgoff_t ext4_num_dirty_pages(stru
891   *
892   * It returns the error in case of allocation failure.
893   */
894 -int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
895 -                   unsigned int max_blocks, struct buffer_head *bh,
896 -                   int flags)
897 +int ext4_map_blocks(handle_t *handle, struct inode *inode,
898 +                   struct ext4_map_blocks *map, int flags)
899  {
900         int retval;
901  
902 -       clear_buffer_mapped(bh);
903 -       clear_buffer_unwritten(bh);
904 +       map->m_flags = 0;
905 +       ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
906 +                 "logical block %lu\n", inode->i_ino, flags, map->m_len,
907 +                 (unsigned long) map->m_lblk);
908  
909 -       ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
910 -                 "logical block %lu\n", inode->i_ino, flags, max_blocks,
911 -                 (unsigned long)block);
912         /*
913          * Try to see if we can get the block without requesting a new
914          * file system block.
915          */
916         down_read((&EXT4_I(inode)->i_data_sem));
917         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
918 -               retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
919 -                               bh, 0);
920 +               retval = ext4_ext_map_blocks(handle, inode, map, 0);
921         } else {
922 -               retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
923 -                                            bh, 0);
924 +               retval = ext4_ind_map_blocks(handle, inode, map, 0);
925         }
926         up_read((&EXT4_I(inode)->i_data_sem));
927  
928 -       if (retval > 0 && buffer_mapped(bh)) {
929 +       if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
930                 int ret = check_block_validity(inode, "file system corruption",
931 -                                              block, bh->b_blocknr, retval);
932 +                                       map->m_lblk, map->m_pblk, retval);
933                 if (ret != 0)
934                         return ret;
935         }
936 @@ -1356,7 +1351,7 @@ int ext4_get_blocks(handle_t *handle, st
937          * ext4_ext_get_block() returns th create = 0
938          * with buffer head unmapped.
939          */
940 -       if (retval > 0 && buffer_mapped(bh))
941 +       if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
942                 return retval;
943  
944         /*
945 @@ -1369,7 +1364,7 @@ int ext4_get_blocks(handle_t *handle, st
946          * of BH_Unwritten and BH_Mapped flags being simultaneously
947          * set on the buffer_head.
948          */
949 -       clear_buffer_unwritten(bh);
950 +       map->m_flags &= ~EXT4_MAP_UNWRITTEN;
951  
952         /*
953          * New blocks allocate and/or writing to uninitialized extent
954 @@ -1392,13 +1387,11 @@ int ext4_get_blocks(handle_t *handle, st
955          * could have changed the inode type in between
956          */
957         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
958 -               retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
959 -                                             bh, flags);
960 +               retval = ext4_ext_map_blocks(handle, inode, map, flags);
961         } else {
962 -               retval = ext4_ind_get_blocks(handle, inode, block,
963 -                                            max_blocks, bh, flags);
964 +               retval = ext4_ind_map_blocks(handle, inode, map, flags);
965  
966 -               if (retval > 0 && buffer_new(bh)) {
967 +               if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
968                         /*
969                          * We allocated new blocks which will result in
970                          * i_data's format changing.  Force the migrate
971 @@ -1421,15 +1414,38 @@ int ext4_get_blocks(handle_t *handle, st
972                 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
973  
974         up_write((&EXT4_I(inode)->i_data_sem));
975 -       if (retval > 0 && buffer_mapped(bh)) {
976 +       if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
977                 int ret = check_block_validity(inode, "file system "
978                                                "corruption after allocation",
979 -                                              block, bh->b_blocknr, retval);
980 +                                              map->m_lblk, map->m_pblk,
981 +                                              retval);
982                 if (ret != 0)
983                         return ret;
984         }
985         return retval;
986  }
987 +EXPORT_SYMBOL(ext4_map_blocks);
988 +
989 +int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
990 +                   unsigned int max_blocks, struct buffer_head *bh,
991 +                   int flags)
992 +{
993 +       struct ext4_map_blocks map;
994 +       int ret;
995 +
996 +       map.m_lblk = block;
997 +       map.m_len = max_blocks;
998 +
999 +       ret = ext4_map_blocks(handle, inode, &map, flags);
1000 +       if (ret < 0)
1001 +               return ret;
1002 +
1003 +       bh->b_blocknr = map.m_pblk;
1004 +       bh->b_size = inode->i_sb->s_blocksize * map.m_len;
1005 +       bh->b_bdev = inode->i_sb->s_bdev;
1006 +       bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1007 +       return ret;
1008 +}
1009  
1010  /* Maximum number of blocks we map for direct IO at once. */
1011  #define DIO_MAX_BLOCKS 4096