2 * fallocate.c -- Allocate large chunks of file.
4 * Copyright (C) 2014 Oracle.
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
15 #include <sys/types.h>
20 #define min(a, b) ((a) < (b) ? (a) : (b))
25 # define dbg_printf(f, a...) do {printf(f, ## a); fflush(stdout); } while (0)
27 # define dbg_printf(f, a...)
31 * Extent-based fallocate code.
33 * Find runs of unmapped logical blocks by starting at start and walking the
34 * extents until we reach the end of the range we want.
36 * For each run of unmapped blocks, try to find the extents on either side of
37 * the range. If there's a left extent that can grow by at least a cluster and
38 * there are lblocks between start and the next lcluster after start, see if
39 * there's an implied cluster allocation; if so, zero the blocks (if the left
40 * extent is initialized) and adjust the extent. Ditto for the blocks between
41 * the end of the last full lcluster and end, if there's a right extent.
43 * Try to attach as much as we can to the left extent, then try to attach as
44 * much as we can to the right extent. For the remainder, try to allocate the
45 * whole range; map in whatever we get; and repeat until we're done.
47 * To attach to a left extent, figure out the maximum amount we can add to the
48 * extent and try to allocate that much, and append if successful. To attach
49 * to a right extent, figure out the max we can add to the extent, try to
50 * allocate that much, and prepend if successful.
52 * We need an alloc_range function that tells us how much we can allocate given
53 * a maximum length and one of a suggested start, a fixed start, or a fixed end
56 * Every time we modify the extent tree we also need to update the block stats.
58 * At the end, update i_blocks and i_size appropriately.
61 static void dbg_print_extent(const char *desc EXT2FS_ATTR((unused)),
62 const struct ext2fs_extent *extent EXT2FS_ATTR((unused)))
67 printf("extent: lblk %llu--%llu, len %u, pblk %llu, flags: ",
68 extent->e_lblk, extent->e_lblk + extent->e_len - 1,
69 extent->e_len, extent->e_pblk);
70 if (extent->e_flags & EXT2_EXTENT_FLAGS_LEAF)
71 fputs("LEAF ", stdout);
72 if (extent->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
73 fputs("UNINIT ", stdout);
74 if (extent->e_flags & EXT2_EXTENT_FLAGS_SECOND_VISIT)
75 fputs("2ND_VISIT ", stdout);
77 fputs("(none)", stdout);
83 static errcode_t claim_range(ext2_filsys fs, struct ext2_inode *inode,
84 blk64_t blk, blk64_t len)
88 clusters = (len + EXT2FS_CLUSTER_RATIO(fs) - 1) /
89 EXT2FS_CLUSTER_RATIO(fs);
90 ext2fs_block_alloc_stats_range(fs, blk,
91 clusters * EXT2FS_CLUSTER_RATIO(fs), +1);
92 return ext2fs_iblk_add_blocks(fs, inode, clusters);
95 static errcode_t ext_falloc_helper(ext2_filsys fs,
98 struct ext2_inode *inode,
99 ext2_extent_handle_t handle,
100 struct ext2fs_extent *left_ext,
101 struct ext2fs_extent *right_ext,
102 blk64_t range_start, blk64_t range_len,
105 struct ext2fs_extent newex, ex;
107 blk64_t fillable, pblk, plen, x, y;
108 blk64_t eof_blk = 0, cluster_fill = 0;
110 blk_t max_extent_len, max_uninit_len, max_init_len;
113 printf("%s: ", __func__);
115 printf("left_ext=%llu--%llu, ", left_ext->e_lblk,
116 left_ext->e_lblk + left_ext->e_len - 1);
118 printf("right_ext=%llu--%llu, ", right_ext->e_lblk,
119 right_ext->e_lblk + right_ext->e_len - 1);
120 printf("start=%llu len=%llu, goal=%llu\n", range_start, range_len,
124 /* Can't create initialized extents past EOF? */
125 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF))
126 eof_blk = EXT2_I_SIZE(inode) / fs->blocksize;
128 /* The allocation goal must be as far into a cluster as range_start. */
129 alloc_goal = (alloc_goal & ~EXT2FS_CLUSTER_MASK(fs)) |
130 (range_start & EXT2FS_CLUSTER_MASK(fs));
132 max_uninit_len = EXT_UNINIT_MAX_LEN & ~EXT2FS_CLUSTER_MASK(fs);
133 max_init_len = EXT_INIT_MAX_LEN & ~EXT2FS_CLUSTER_MASK(fs);
135 /* We must lengthen the left extent to the end of the cluster */
136 if (left_ext && EXT2FS_CLUSTER_RATIO(fs) > 1) {
137 /* How many more blocks can be attached to left_ext? */
138 if (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
139 fillable = max_uninit_len - left_ext->e_len;
141 fillable = max_init_len - left_ext->e_len;
143 if (fillable > range_len)
144 fillable = range_len;
149 * If range_start isn't on a cluster boundary, try an
150 * implied cluster allocation for left_ext.
152 cluster_fill = EXT2FS_CLUSTER_RATIO(fs) -
153 (range_start & EXT2FS_CLUSTER_MASK(fs));
154 cluster_fill &= EXT2FS_CLUSTER_MASK(fs);
155 if (cluster_fill == 0)
158 if (cluster_fill > fillable)
159 cluster_fill = fillable;
161 /* Don't expand an initialized left_ext beyond EOF */
162 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF)) {
163 x = left_ext->e_lblk + left_ext->e_len - 1;
164 dbg_printf("%s: lend=%llu newlend=%llu eofblk=%llu\n",
165 __func__, x, x + cluster_fill, eof_blk);
166 if (eof_blk >= x && eof_blk <= x + cluster_fill)
167 cluster_fill = eof_blk - x;
168 if (cluster_fill == 0)
172 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
175 left_ext->e_len += cluster_fill;
176 range_start += cluster_fill;
177 range_len -= cluster_fill;
178 alloc_goal += cluster_fill;
180 dbg_print_extent("ext_falloc clus left+", left_ext);
181 err = ext2fs_extent_replace(handle, 0, left_ext);
184 err = ext2fs_extent_fix_parents(handle);
189 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) {
190 err = ext2fs_zero_blocks2(fs, left_ext->e_pblk +
192 cluster_fill, cluster_fill,
200 /* We must lengthen the right extent to the beginning of the cluster */
201 if (right_ext && EXT2FS_CLUSTER_RATIO(fs) > 1) {
202 /* How much can we attach to right_ext? */
203 if (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
204 fillable = max_uninit_len - right_ext->e_len;
206 fillable = max_init_len - right_ext->e_len;
208 if (fillable > range_len)
209 fillable = range_len;
214 * If range_end isn't on a cluster boundary, try an implied
215 * cluster allocation for right_ext.
217 cluster_fill = right_ext->e_lblk & EXT2FS_CLUSTER_MASK(fs);
218 if (cluster_fill == 0)
221 err = ext2fs_extent_goto(handle, right_ext->e_lblk);
225 if (cluster_fill > fillable)
226 cluster_fill = fillable;
227 right_ext->e_lblk -= cluster_fill;
228 right_ext->e_pblk -= cluster_fill;
229 right_ext->e_len += cluster_fill;
230 range_len -= cluster_fill;
232 dbg_print_extent("ext_falloc clus right+", right_ext);
233 err = ext2fs_extent_replace(handle, 0, right_ext);
236 err = ext2fs_extent_fix_parents(handle);
240 /* Zero blocks if necessary */
241 if (!(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) {
242 err = ext2fs_zero_blocks2(fs, right_ext->e_pblk,
243 cluster_fill, NULL, NULL);
250 /* Merge both extents together, perhaps? */
251 if (left_ext && right_ext) {
252 /* Are the two extents mergeable? */
253 if ((left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) !=
254 (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT))
257 /* User requires init/uninit but extent is uninit/init. */
258 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
259 (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
260 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
261 !(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
265 * Skip initialized extent unless user wants to zero blocks
266 * or requires init extent.
268 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
269 (!(flags & EXT2_FALLOCATE_ZERO_BLOCKS) ||
270 !(flags & EXT2_FALLOCATE_FORCE_INIT)))
273 /* Will it even fit? */
274 x = left_ext->e_len + range_len + right_ext->e_len;
275 if (x > (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT ?
276 max_uninit_len : max_init_len))
279 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
283 /* Allocate blocks */
284 y = left_ext->e_pblk + left_ext->e_len;
285 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
286 EXT2_NEWRANGE_MIN_LENGTH, y,
287 right_ext->e_pblk - y + 1, NULL,
291 if (pblk + plen != right_ext->e_pblk)
293 err = claim_range(fs, inode, pblk, plen);
299 dbg_print_extent("ext_falloc merge", left_ext);
300 err = ext2fs_extent_replace(handle, 0, left_ext);
303 err = ext2fs_extent_fix_parents(handle);
306 err = ext2fs_extent_get(handle, EXT2_EXTENT_NEXT_LEAF, &newex);
309 err = ext2fs_extent_delete(handle, 0);
312 err = ext2fs_extent_fix_parents(handle);
315 *right_ext = *left_ext;
318 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
319 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
320 err = ext2fs_zero_blocks2(fs, range_start, range_len,
330 /* Extend the left extent */
332 /* How many more blocks can be attached to left_ext? */
333 if (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
334 fillable = max_uninit_len - left_ext->e_len;
335 else if (flags & EXT2_FALLOCATE_ZERO_BLOCKS)
336 fillable = max_init_len - left_ext->e_len;
340 /* User requires init/uninit but extent is uninit/init. */
341 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
342 (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
343 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
344 !(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
347 if (fillable > range_len)
348 fillable = range_len;
350 /* Don't expand an initialized left_ext beyond EOF */
351 x = left_ext->e_lblk + left_ext->e_len - 1;
352 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF)) {
353 dbg_printf("%s: lend=%llu newlend=%llu eofblk=%llu\n",
354 __func__, x, x + fillable, eof_blk);
355 if (eof_blk >= x && eof_blk <= x + fillable)
356 fillable = eof_blk - x;
362 /* Test if the right edge of the range is already mapped? */
363 if (EXT2FS_CLUSTER_RATIO(fs) > 1) {
364 err = ext2fs_map_cluster_block(fs, ino, inode,
365 x + fillable, &pblk);
369 fillable -= 1 + ((x + fillable)
370 & EXT2FS_CLUSTER_MASK(fs));
375 /* Allocate range of blocks */
376 x = left_ext->e_pblk + left_ext->e_len;
377 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
378 EXT2_NEWRANGE_MIN_LENGTH,
379 x, fillable, NULL, &pblk, &plen);
382 err = claim_range(fs, inode, pblk, plen);
386 /* Modify left_ext */
387 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
392 left_ext->e_len += plen;
393 dbg_print_extent("ext_falloc left+", left_ext);
394 err = ext2fs_extent_replace(handle, 0, left_ext);
397 err = ext2fs_extent_fix_parents(handle);
401 /* Zero blocks if necessary */
402 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
403 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
404 err = ext2fs_zero_blocks2(fs, pblk, plen, NULL, NULL);
411 /* Extend the right extent */
413 /* How much can we attach to right_ext? */
414 if (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
415 fillable = max_uninit_len - right_ext->e_len;
416 else if (flags & EXT2_FALLOCATE_ZERO_BLOCKS)
417 fillable = max_init_len - right_ext->e_len;
421 /* User requires init/uninit but extent is uninit/init. */
422 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
423 (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
424 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
425 !(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
428 if (fillable > range_len)
429 fillable = range_len;
433 /* Test if the left edge of the range is already mapped? */
434 if (EXT2FS_CLUSTER_RATIO(fs) > 1) {
435 err = ext2fs_map_cluster_block(fs, ino, inode,
436 right_ext->e_lblk - fillable, &pblk);
440 fillable -= EXT2FS_CLUSTER_RATIO(fs) -
441 ((right_ext->e_lblk - fillable)
442 & EXT2FS_CLUSTER_MASK(fs));
448 * FIXME: It would be nice if we could handle allocating a
449 * variable range from a fixed end point instead of just
450 * skipping to the general allocator if the whole range is
453 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
454 EXT2_NEWRANGE_MIN_LENGTH,
455 right_ext->e_pblk - fillable,
456 fillable, NULL, &pblk, &plen);
459 err = claim_range(fs, inode,
460 pblk & ~EXT2FS_CLUSTER_MASK(fs),
461 plen + (pblk & EXT2FS_CLUSTER_MASK(fs)));
465 /* Modify right_ext */
466 err = ext2fs_extent_goto(handle, right_ext->e_lblk);
470 right_ext->e_lblk -= plen;
471 right_ext->e_pblk -= plen;
472 right_ext->e_len += plen;
473 dbg_print_extent("ext_falloc right+", right_ext);
474 err = ext2fs_extent_replace(handle, 0, right_ext);
477 err = ext2fs_extent_fix_parents(handle);
481 /* Zero blocks if necessary */
482 if (!(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
483 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
484 err = ext2fs_zero_blocks2(fs, pblk,
485 plen + cluster_fill, NULL, NULL);
492 /* Try implied cluster alloc on the left and right ends */
493 if (range_len > 0 && (range_start & EXT2FS_CLUSTER_MASK(fs))) {
494 cluster_fill = EXT2FS_CLUSTER_RATIO(fs) -
495 (range_start & EXT2FS_CLUSTER_MASK(fs));
496 cluster_fill &= EXT2FS_CLUSTER_MASK(fs);
497 if (cluster_fill > range_len)
498 cluster_fill = range_len;
499 newex.e_lblk = range_start;
500 err = ext2fs_map_cluster_block(fs, ino, inode, newex.e_lblk,
505 goto try_right_implied;
507 newex.e_len = cluster_fill;
508 newex.e_flags = (flags & EXT2_FALLOCATE_FORCE_INIT ? 0 :
509 EXT2_EXTENT_FLAGS_UNINIT);
510 dbg_print_extent("ext_falloc iclus left+", &newex);
511 ext2fs_extent_goto(handle, newex.e_lblk);
512 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
514 if (err == EXT2_ET_NO_CURRENT_NODE)
519 if (ex.e_lblk > newex.e_lblk)
520 op = 0; /* insert before */
522 op = EXT2_EXTENT_INSERT_AFTER;
523 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
524 __func__, op ? "after" : "before", ex.e_lblk,
526 err = ext2fs_extent_insert(handle, op, &newex);
529 err = ext2fs_extent_fix_parents(handle);
533 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
534 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
535 err = ext2fs_zero_blocks2(fs, newex.e_pblk,
536 newex.e_len, NULL, NULL);
541 range_start += cluster_fill;
542 range_len -= cluster_fill;
546 y = range_start + range_len;
547 if (range_len > 0 && (y & EXT2FS_CLUSTER_MASK(fs))) {
548 cluster_fill = y & EXT2FS_CLUSTER_MASK(fs);
549 if (cluster_fill > range_len)
550 cluster_fill = range_len;
551 newex.e_lblk = y & ~EXT2FS_CLUSTER_MASK(fs);
552 err = ext2fs_map_cluster_block(fs, ino, inode, newex.e_lblk,
559 newex.e_len = cluster_fill;
560 newex.e_flags = (flags & EXT2_FALLOCATE_FORCE_INIT ? 0 :
561 EXT2_EXTENT_FLAGS_UNINIT);
562 dbg_print_extent("ext_falloc iclus right+", &newex);
563 ext2fs_extent_goto(handle, newex.e_lblk);
564 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
566 if (err == EXT2_ET_NO_CURRENT_NODE)
571 if (ex.e_lblk > newex.e_lblk)
572 op = 0; /* insert before */
574 op = EXT2_EXTENT_INSERT_AFTER;
575 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
576 __func__, op ? "after" : "before", ex.e_lblk,
578 err = ext2fs_extent_insert(handle, op, &newex);
581 err = ext2fs_extent_fix_parents(handle);
585 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
586 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
587 err = ext2fs_zero_blocks2(fs, newex.e_pblk,
588 newex.e_len, NULL, NULL);
593 range_len -= cluster_fill;
600 newex.e_lblk = range_start;
601 if (flags & EXT2_FALLOCATE_FORCE_INIT) {
602 max_extent_len = max_init_len;
605 max_extent_len = max_uninit_len;
606 newex.e_flags = EXT2_EXTENT_FLAGS_UNINIT;
610 for (x = 0; x < y;) {
611 cluster_fill = newex.e_lblk & EXT2FS_CLUSTER_MASK(fs);
612 fillable = min(range_len + cluster_fill, max_extent_len);
613 err = ext2fs_new_range(fs, 0, pblk & ~EXT2FS_CLUSTER_MASK(fs),
618 err = claim_range(fs, inode, pblk, plen);
623 newex.e_pblk = pblk + cluster_fill;
624 newex.e_len = plen - cluster_fill;
625 dbg_print_extent("ext_falloc create", &newex);
626 ext2fs_extent_goto(handle, newex.e_lblk);
627 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
629 if (err == EXT2_ET_NO_CURRENT_NODE)
634 if (ex.e_lblk > newex.e_lblk)
635 op = 0; /* insert before */
637 op = EXT2_EXTENT_INSERT_AFTER;
638 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
639 __func__, op ? "after" : "before", ex.e_lblk,
641 err = ext2fs_extent_insert(handle, op, &newex);
644 err = ext2fs_extent_fix_parents(handle);
648 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
649 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
650 err = ext2fs_zero_blocks2(fs, pblk, plen, NULL, NULL);
655 /* Update variables at end of loop */
656 x += plen - cluster_fill;
657 range_len -= plen - cluster_fill;
658 newex.e_lblk += plen - cluster_fill;
659 pblk += plen - cluster_fill;
660 if (pblk >= ext2fs_blocks_count(fs->super))
661 pblk = fs->super->s_first_data_block;
668 static errcode_t extent_fallocate(ext2_filsys fs, int flags, ext2_ino_t ino,
669 struct ext2_inode *inode, blk64_t goal,
670 blk64_t start, blk64_t len)
672 ext2_extent_handle_t handle;
673 struct ext2fs_extent left_extent, right_extent;
674 struct ext2fs_extent *left_adjacent, *right_adjacent;
676 blk64_t range_start, range_end = 0, end, next;
677 blk64_t count, goal_distance;
679 end = start + len - 1;
680 err = ext2fs_extent_open2(fs, ino, inode, &handle);
685 * Find the extent closest to the start of the alloc range. We don't
686 * check the return value because _goto() sets the current node to the
687 * next-lowest extent if 'start' is in a hole; or the next-highest
688 * extent if there aren't any lower ones; or doesn't set a current node
689 * if there was a real error reading the extent tree. In that case,
690 * _get() will error out.
693 ext2fs_extent_goto(handle, start);
694 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT, &left_extent);
695 if (err == EXT2_ET_NO_CURRENT_NODE) {
696 blk64_t max_blocks = ext2fs_blocks_count(fs->super);
699 goal = ext2fs_find_inode_goal(fs, ino, inode, start);
700 err = ext2fs_find_first_zero_block_bitmap2(fs->block_map,
701 goal, max_blocks - 1, &goal);
703 err = ext_falloc_helper(fs, flags, ino, inode, handle, NULL,
704 NULL, start, len, goal);
709 dbg_print_extent("ext_falloc initial", &left_extent);
710 next = left_extent.e_lblk + left_extent.e_len;
711 if (left_extent.e_lblk > start) {
712 /* The nearest extent we found was beyond start??? */
713 goal = left_extent.e_pblk - (left_extent.e_lblk - start);
714 err = ext_falloc_helper(fs, flags, ino, inode, handle, NULL,
716 left_extent.e_lblk - start, goal);
721 } else if (next >= start) {
723 left_adjacent = &left_extent;
726 left_adjacent = NULL;
728 goal = left_extent.e_pblk + (range_start - left_extent.e_lblk);
731 err = ext2fs_extent_get(handle, EXT2_EXTENT_NEXT_LEAF,
733 dbg_printf("%s: ino=%d get next =%d\n", __func__, ino,
735 dbg_print_extent("ext_falloc next", &right_extent);
736 /* Stop if we've seen this extent before */
737 if (!err && right_extent.e_lblk <= left_extent.e_lblk)
738 err = EXT2_ET_EXTENT_NO_NEXT;
740 if (err && err != EXT2_ET_EXTENT_NO_NEXT)
742 if (err == EXT2_ET_EXTENT_NO_NEXT ||
743 right_extent.e_lblk > end + 1) {
745 right_adjacent = NULL;
747 /* Handle right_extent.e_lblk <= end */
748 range_end = right_extent.e_lblk - 1;
749 right_adjacent = &right_extent;
751 goal_distance = range_start - next;
752 if (err != EXT2_ET_EXTENT_NO_NEXT &&
753 goal_distance > (range_end - right_extent.e_lblk))
754 goal = right_extent.e_pblk -
755 (right_extent.e_lblk - range_start);
757 dbg_printf("%s: ino=%d rstart=%llu rend=%llu\n", __func__, ino,
758 range_start, range_end);
760 if (range_start <= range_end) {
761 count = range_end - range_start + 1;
762 err = ext_falloc_helper(fs, flags, ino, inode, handle,
763 left_adjacent, right_adjacent,
764 range_start, count, goal);
769 if (range_end == end)
772 err = ext2fs_extent_goto(handle, right_extent.e_lblk);
775 next = right_extent.e_lblk + right_extent.e_len;
776 left_extent = right_extent;
777 left_adjacent = &left_extent;
779 goal = left_extent.e_pblk + (range_start - left_extent.e_lblk);
780 } while (range_end < end);
783 ext2fs_extent_free(handle);
788 * Map physical blocks to a range of logical blocks within a file. The range
789 * of logical blocks are (start, start + len). If there are already extents,
790 * the mappings will try to extend the mappings; otherwise, it will try to map
791 * start as if logical block 0 points to goal. If goal is ~0ULL, then the goal
792 * is calculated based on the inode group.
795 * - EXT2_FALLOCATE_ZERO_BLOCKS: Zero the blocks that are allocated.
796 * - EXT2_FALLOCATE_FORCE_INIT: Create only initialized extents.
797 * - EXT2_FALLOCATE_FORCE_UNINIT: Create only uninitialized extents.
798 * - EXT2_FALLOCATE_INIT_BEYOND_EOF: Create extents beyond EOF.
800 * If neither FORCE_INIT nor FORCE_UNINIT are specified, this function will
801 * try to expand any extents it finds, zeroing blocks as necessary.
803 errcode_t ext2fs_fallocate(ext2_filsys fs, int flags, ext2_ino_t ino,
804 struct ext2_inode *inode, blk64_t goal,
805 blk64_t start, blk64_t len)
807 struct ext2_inode inode_buf;
808 blk64_t blk, x, zero_blk, last = 0;
812 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
813 (flags & EXT2_FALLOCATE_FORCE_UNINIT)) ||
814 (flags & ~EXT2_FALLOCATE_ALL_FLAGS))
815 return EXT2_ET_INVALID_ARGUMENT;
817 if (len > ext2fs_blocks_count(fs->super))
818 return EXT2_ET_BLOCK_ALLOC_FAIL;
822 /* Read inode structure if necessary */
824 err = ext2fs_read_inode(fs, ino, &inode_buf);
829 dbg_printf("%s: ino=%d start=%llu len=%llu goal=%llu\n", __func__, ino,
832 if (inode->i_flags & EXT4_EXTENTS_FL) {
833 err = extent_fallocate(fs, flags, ino, inode, goal, start, len);
837 /* XXX: Allocate a bunch of blocks the slow way */
838 for (blk = start; blk < start + len; blk++) {
839 err = ext2fs_bmap2(fs, ino, inode, NULL, 0, blk, 0, &x);
845 err = ext2fs_bmap2(fs, ino, inode, NULL, BMAP_ALLOC,
849 if ((zero_len && (x != last+1)) ||
850 (zero_len >= 65536)) {
851 err = ext2fs_zero_blocks2(fs, zero_blk, zero_len,
867 if (inode == &inode_buf)
868 ext2fs_write_inode(fs, ino, inode);
871 ext2fs_zero_blocks2(fs, zero_blk, zero_len, NULL, NULL);