2 * fallocate.c -- Allocate large chunks of file.
4 * Copyright (C) 2014 Oracle.
7 * This file may be redistributed under the terms of the GNU Library
8 * General Public License, version 2.
16 #define min(a, b) ((a) < (b) ? (a) : (b))
21 # define dbg_printf(f, a...) do {printf(f, ## a); fflush(stdout); } while (0)
23 # define dbg_printf(f, a...)
27 * Extent-based fallocate code.
29 * Find runs of unmapped logical blocks by starting at start and walking the
30 * extents until we reach the end of the range we want.
32 * For each run of unmapped blocks, try to find the extents on either side of
33 * the range. If there's a left extent that can grow by at least a cluster and
34 * there are lblocks between start and the next lcluster after start, see if
35 * there's an implied cluster allocation; if so, zero the blocks (if the left
36 * extent is initialized) and adjust the extent. Ditto for the blocks between
37 * the end of the last full lcluster and end, if there's a right extent.
39 * Try to attach as much as we can to the left extent, then try to attach as
40 * much as we can to the right extent. For the remainder, try to allocate the
41 * whole range; map in whatever we get; and repeat until we're done.
43 * To attach to a left extent, figure out the maximum amount we can add to the
44 * extent and try to allocate that much, and append if successful. To attach
45 * to a right extent, figure out the max we can add to the extent, try to
46 * allocate that much, and prepend if successful.
48 * We need an alloc_range function that tells us how much we can allocate given
49 * a maximum length and one of a suggested start, a fixed start, or a fixed end
52 * Every time we modify the extent tree we also need to update the block stats.
54 * At the end, update i_blocks and i_size appropriately.
57 static void dbg_print_extent(char *desc, struct ext2fs_extent *extent)
62 printf("extent: lblk %llu--%llu, len %u, pblk %llu, flags: ",
63 extent->e_lblk, extent->e_lblk + extent->e_len - 1,
64 extent->e_len, extent->e_pblk);
65 if (extent->e_flags & EXT2_EXTENT_FLAGS_LEAF)
66 fputs("LEAF ", stdout);
67 if (extent->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
68 fputs("UNINIT ", stdout);
69 if (extent->e_flags & EXT2_EXTENT_FLAGS_SECOND_VISIT)
70 fputs("2ND_VISIT ", stdout);
72 fputs("(none)", stdout);
78 static errcode_t claim_range(ext2_filsys fs, struct ext2_inode *inode,
79 blk64_t blk, blk64_t len)
83 clusters = (len + EXT2FS_CLUSTER_RATIO(fs) - 1) /
84 EXT2FS_CLUSTER_RATIO(fs);
85 ext2fs_block_alloc_stats_range(fs, blk,
86 clusters * EXT2FS_CLUSTER_RATIO(fs), +1);
87 return ext2fs_iblk_add_blocks(fs, inode, clusters);
90 static errcode_t ext_falloc_helper(ext2_filsys fs,
93 struct ext2_inode *inode,
94 ext2_extent_handle_t handle,
95 struct ext2fs_extent *left_ext,
96 struct ext2fs_extent *right_ext,
97 blk64_t range_start, blk64_t range_len,
100 struct ext2fs_extent newex, ex;
102 blk64_t fillable, pblk, plen, x, y;
103 blk64_t eof_blk = 0, cluster_fill = 0;
105 blk_t max_extent_len, max_uninit_len, max_init_len;
108 printf("%s: ", __func__);
110 printf("left_ext=%llu--%llu, ", left_ext->e_lblk,
111 left_ext->e_lblk + left_ext->e_len - 1);
113 printf("right_ext=%llu--%llu, ", right_ext->e_lblk,
114 right_ext->e_lblk + right_ext->e_len - 1);
115 printf("start=%llu len=%llu, goal=%llu\n", range_start, range_len,
119 /* Can't create initialized extents past EOF? */
120 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF))
121 eof_blk = EXT2_I_SIZE(inode) / fs->blocksize;
123 /* The allocation goal must be as far into a cluster as range_start. */
124 alloc_goal = (alloc_goal & ~EXT2FS_CLUSTER_MASK(fs)) |
125 (range_start & EXT2FS_CLUSTER_MASK(fs));
127 max_uninit_len = EXT_UNINIT_MAX_LEN & ~EXT2FS_CLUSTER_MASK(fs);
128 max_init_len = EXT_INIT_MAX_LEN & ~EXT2FS_CLUSTER_MASK(fs);
130 /* We must lengthen the left extent to the end of the cluster */
131 if (left_ext && EXT2FS_CLUSTER_RATIO(fs) > 1) {
132 /* How many more blocks can be attached to left_ext? */
133 if (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
134 fillable = max_uninit_len - left_ext->e_len;
136 fillable = max_init_len - left_ext->e_len;
138 if (fillable > range_len)
139 fillable = range_len;
144 * If range_start isn't on a cluster boundary, try an
145 * implied cluster allocation for left_ext.
147 cluster_fill = EXT2FS_CLUSTER_RATIO(fs) -
148 (range_start & EXT2FS_CLUSTER_MASK(fs));
149 cluster_fill &= EXT2FS_CLUSTER_MASK(fs);
150 if (cluster_fill == 0)
153 if (cluster_fill > fillable)
154 cluster_fill = fillable;
156 /* Don't expand an initialized left_ext beyond EOF */
157 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF)) {
158 x = left_ext->e_lblk + left_ext->e_len - 1;
159 dbg_printf("%s: lend=%llu newlend=%llu eofblk=%llu\n",
160 __func__, x, x + cluster_fill, eof_blk);
161 if (eof_blk >= x && eof_blk <= x + cluster_fill)
162 cluster_fill = eof_blk - x;
163 if (cluster_fill == 0)
167 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
170 left_ext->e_len += cluster_fill;
171 range_start += cluster_fill;
172 range_len -= cluster_fill;
173 alloc_goal += cluster_fill;
175 dbg_print_extent("ext_falloc clus left+", left_ext);
176 err = ext2fs_extent_replace(handle, 0, left_ext);
179 err = ext2fs_extent_fix_parents(handle);
184 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) {
185 err = ext2fs_zero_blocks2(fs, left_ext->e_pblk +
187 cluster_fill, cluster_fill,
195 /* We must lengthen the right extent to the beginning of the cluster */
196 if (right_ext && EXT2FS_CLUSTER_RATIO(fs) > 1) {
197 /* How much can we attach to right_ext? */
198 if (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
199 fillable = max_uninit_len - right_ext->e_len;
201 fillable = max_init_len - right_ext->e_len;
203 if (fillable > range_len)
204 fillable = range_len;
209 * If range_end isn't on a cluster boundary, try an implied
210 * cluster allocation for right_ext.
212 cluster_fill = right_ext->e_lblk & EXT2FS_CLUSTER_MASK(fs);
213 if (cluster_fill == 0)
216 err = ext2fs_extent_goto(handle, right_ext->e_lblk);
220 if (cluster_fill > fillable)
221 cluster_fill = fillable;
222 right_ext->e_lblk -= cluster_fill;
223 right_ext->e_pblk -= cluster_fill;
224 right_ext->e_len += cluster_fill;
225 range_len -= cluster_fill;
227 dbg_print_extent("ext_falloc clus right+", right_ext);
228 err = ext2fs_extent_replace(handle, 0, right_ext);
231 err = ext2fs_extent_fix_parents(handle);
235 /* Zero blocks if necessary */
236 if (!(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) {
237 err = ext2fs_zero_blocks2(fs, right_ext->e_pblk,
238 cluster_fill, NULL, NULL);
245 /* Merge both extents together, perhaps? */
246 if (left_ext && right_ext) {
247 /* Are the two extents mergeable? */
248 if ((left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) !=
249 (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT))
252 /* User requires init/uninit but extent is uninit/init. */
253 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
254 (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
255 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
256 !(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
260 * Skip initialized extent unless user wants to zero blocks
261 * or requires init extent.
263 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
264 (!(flags & EXT2_FALLOCATE_ZERO_BLOCKS) ||
265 !(flags & EXT2_FALLOCATE_FORCE_INIT)))
268 /* Will it even fit? */
269 x = left_ext->e_len + range_len + right_ext->e_len;
270 if (x > (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT ?
271 max_uninit_len : max_init_len))
274 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
278 /* Allocate blocks */
279 y = left_ext->e_pblk + left_ext->e_len;
280 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
281 EXT2_NEWRANGE_MIN_LENGTH, y,
282 right_ext->e_pblk - y + 1, NULL,
286 if (pblk + plen != right_ext->e_pblk)
288 err = claim_range(fs, inode, pblk, plen);
294 dbg_print_extent("ext_falloc merge", left_ext);
295 err = ext2fs_extent_replace(handle, 0, left_ext);
298 err = ext2fs_extent_fix_parents(handle);
301 err = ext2fs_extent_get(handle, EXT2_EXTENT_NEXT_LEAF, &newex);
304 err = ext2fs_extent_delete(handle, 0);
307 err = ext2fs_extent_fix_parents(handle);
310 *right_ext = *left_ext;
313 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
314 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
315 err = ext2fs_zero_blocks2(fs, range_start, range_len,
325 /* Extend the left extent */
327 /* How many more blocks can be attached to left_ext? */
328 if (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
329 fillable = max_uninit_len - left_ext->e_len;
330 else if (flags & EXT2_FALLOCATE_ZERO_BLOCKS)
331 fillable = max_init_len - left_ext->e_len;
335 /* User requires init/uninit but extent is uninit/init. */
336 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
337 (left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
338 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
339 !(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
342 if (fillable > range_len)
343 fillable = range_len;
345 /* Don't expand an initialized left_ext beyond EOF */
346 x = left_ext->e_lblk + left_ext->e_len - 1;
347 if (!(flags & EXT2_FALLOCATE_INIT_BEYOND_EOF)) {
348 dbg_printf("%s: lend=%llu newlend=%llu eofblk=%llu\n",
349 __func__, x, x + fillable, eof_blk);
350 if (eof_blk >= x && eof_blk <= x + fillable)
351 fillable = eof_blk - x;
357 /* Test if the right edge of the range is already mapped? */
358 if (EXT2FS_CLUSTER_RATIO(fs) > 1) {
359 err = ext2fs_map_cluster_block(fs, ino, inode,
360 x + fillable, &pblk);
364 fillable -= 1 + ((x + fillable)
365 & EXT2FS_CLUSTER_MASK(fs));
370 /* Allocate range of blocks */
371 x = left_ext->e_pblk + left_ext->e_len;
372 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
373 EXT2_NEWRANGE_MIN_LENGTH,
374 x, fillable, NULL, &pblk, &plen);
377 err = claim_range(fs, inode, pblk, plen);
381 /* Modify left_ext */
382 err = ext2fs_extent_goto(handle, left_ext->e_lblk);
387 left_ext->e_len += plen;
388 dbg_print_extent("ext_falloc left+", left_ext);
389 err = ext2fs_extent_replace(handle, 0, left_ext);
392 err = ext2fs_extent_fix_parents(handle);
396 /* Zero blocks if necessary */
397 if (!(left_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
398 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
399 err = ext2fs_zero_blocks2(fs, pblk, plen, NULL, NULL);
406 /* Extend the right extent */
408 /* How much can we attach to right_ext? */
409 if (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)
410 fillable = max_uninit_len - right_ext->e_len;
411 else if (flags & EXT2_FALLOCATE_ZERO_BLOCKS)
412 fillable = max_init_len - right_ext->e_len;
416 /* User requires init/uninit but extent is uninit/init. */
417 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
418 (right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)) ||
419 ((flags & EXT2_FALLOCATE_FORCE_UNINIT) &&
420 !(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT)))
423 if (fillable > range_len)
424 fillable = range_len;
428 /* Test if the left edge of the range is already mapped? */
429 if (EXT2FS_CLUSTER_RATIO(fs) > 1) {
430 err = ext2fs_map_cluster_block(fs, ino, inode,
431 right_ext->e_lblk - fillable, &pblk);
435 fillable -= EXT2FS_CLUSTER_RATIO(fs) -
436 ((right_ext->e_lblk - fillable)
437 & EXT2FS_CLUSTER_MASK(fs));
443 * FIXME: It would be nice if we could handle allocating a
444 * variable range from a fixed end point instead of just
445 * skipping to the general allocator if the whole range is
448 err = ext2fs_new_range(fs, EXT2_NEWRANGE_FIXED_GOAL |
449 EXT2_NEWRANGE_MIN_LENGTH,
450 right_ext->e_pblk - fillable,
451 fillable, NULL, &pblk, &plen);
454 err = claim_range(fs, inode,
455 pblk & ~EXT2FS_CLUSTER_MASK(fs),
456 plen + (pblk & EXT2FS_CLUSTER_MASK(fs)));
460 /* Modify right_ext */
461 err = ext2fs_extent_goto(handle, right_ext->e_lblk);
465 right_ext->e_lblk -= plen;
466 right_ext->e_pblk -= plen;
467 right_ext->e_len += plen;
468 dbg_print_extent("ext_falloc right+", right_ext);
469 err = ext2fs_extent_replace(handle, 0, right_ext);
472 err = ext2fs_extent_fix_parents(handle);
476 /* Zero blocks if necessary */
477 if (!(right_ext->e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
478 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
479 err = ext2fs_zero_blocks2(fs, pblk,
480 plen + cluster_fill, NULL, NULL);
487 /* Try implied cluster alloc on the left and right ends */
488 if (range_len > 0 && (range_start & EXT2FS_CLUSTER_MASK(fs))) {
489 cluster_fill = EXT2FS_CLUSTER_RATIO(fs) -
490 (range_start & EXT2FS_CLUSTER_MASK(fs));
491 cluster_fill &= EXT2FS_CLUSTER_MASK(fs);
492 if (cluster_fill > range_len)
493 cluster_fill = range_len;
494 newex.e_lblk = range_start;
495 err = ext2fs_map_cluster_block(fs, ino, inode, newex.e_lblk,
500 goto try_right_implied;
502 newex.e_len = cluster_fill;
503 newex.e_flags = (flags & EXT2_FALLOCATE_FORCE_INIT ? 0 :
504 EXT2_EXTENT_FLAGS_UNINIT);
505 dbg_print_extent("ext_falloc iclus left+", &newex);
506 ext2fs_extent_goto(handle, newex.e_lblk);
507 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
509 if (err == EXT2_ET_NO_CURRENT_NODE)
514 if (ex.e_lblk > newex.e_lblk)
515 op = 0; /* insert before */
517 op = EXT2_EXTENT_INSERT_AFTER;
518 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
519 __func__, op ? "after" : "before", ex.e_lblk,
521 err = ext2fs_extent_insert(handle, op, &newex);
524 err = ext2fs_extent_fix_parents(handle);
528 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
529 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
530 err = ext2fs_zero_blocks2(fs, newex.e_pblk,
531 newex.e_len, NULL, NULL);
536 range_start += cluster_fill;
537 range_len -= cluster_fill;
541 y = range_start + range_len;
542 if (range_len > 0 && (y & EXT2FS_CLUSTER_MASK(fs))) {
543 cluster_fill = y & EXT2FS_CLUSTER_MASK(fs);
544 if (cluster_fill > range_len)
545 cluster_fill = range_len;
546 newex.e_lblk = y & ~EXT2FS_CLUSTER_MASK(fs);
547 err = ext2fs_map_cluster_block(fs, ino, inode, newex.e_lblk,
554 newex.e_len = cluster_fill;
555 newex.e_flags = (flags & EXT2_FALLOCATE_FORCE_INIT ? 0 :
556 EXT2_EXTENT_FLAGS_UNINIT);
557 dbg_print_extent("ext_falloc iclus right+", &newex);
558 ext2fs_extent_goto(handle, newex.e_lblk);
559 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
561 if (err == EXT2_ET_NO_CURRENT_NODE)
566 if (ex.e_lblk > newex.e_lblk)
567 op = 0; /* insert before */
569 op = EXT2_EXTENT_INSERT_AFTER;
570 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
571 __func__, op ? "after" : "before", ex.e_lblk,
573 err = ext2fs_extent_insert(handle, op, &newex);
576 err = ext2fs_extent_fix_parents(handle);
580 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
581 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
582 err = ext2fs_zero_blocks2(fs, newex.e_pblk,
583 newex.e_len, NULL, NULL);
588 range_len -= cluster_fill;
595 newex.e_lblk = range_start;
596 if (flags & EXT2_FALLOCATE_FORCE_INIT) {
597 max_extent_len = max_init_len;
600 max_extent_len = max_uninit_len;
601 newex.e_flags = EXT2_EXTENT_FLAGS_UNINIT;
605 for (x = 0; x < y;) {
606 cluster_fill = newex.e_lblk & EXT2FS_CLUSTER_MASK(fs);
607 fillable = min(range_len + cluster_fill, max_extent_len);
608 err = ext2fs_new_range(fs, 0, pblk & ~EXT2FS_CLUSTER_MASK(fs),
613 err = claim_range(fs, inode, pblk, plen);
618 newex.e_pblk = pblk + cluster_fill;
619 newex.e_len = plen - cluster_fill;
620 dbg_print_extent("ext_falloc create", &newex);
621 ext2fs_extent_goto(handle, newex.e_lblk);
622 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT,
624 if (err == EXT2_ET_NO_CURRENT_NODE)
629 if (ex.e_lblk > newex.e_lblk)
630 op = 0; /* insert before */
632 op = EXT2_EXTENT_INSERT_AFTER;
633 dbg_printf("%s: inserting %s lblk %llu newex=%llu\n",
634 __func__, op ? "after" : "before", ex.e_lblk,
636 err = ext2fs_extent_insert(handle, op, &newex);
639 err = ext2fs_extent_fix_parents(handle);
643 if (!(newex.e_flags & EXT2_EXTENT_FLAGS_UNINIT) &&
644 (flags & EXT2_FALLOCATE_ZERO_BLOCKS)) {
645 err = ext2fs_zero_blocks2(fs, pblk, plen, NULL, NULL);
650 /* Update variables at end of loop */
651 x += plen - cluster_fill;
652 range_len -= plen - cluster_fill;
653 newex.e_lblk += plen - cluster_fill;
654 pblk += plen - cluster_fill;
655 if (pblk >= ext2fs_blocks_count(fs->super))
656 pblk = fs->super->s_first_data_block;
663 static errcode_t extent_fallocate(ext2_filsys fs, int flags, ext2_ino_t ino,
664 struct ext2_inode *inode, blk64_t goal,
665 blk64_t start, blk64_t len)
667 ext2_extent_handle_t handle;
668 struct ext2fs_extent left_extent, right_extent;
669 struct ext2fs_extent *left_adjacent, *right_adjacent;
671 blk64_t range_start, range_end = 0, end, next;
672 blk64_t count, goal_distance;
674 end = start + len - 1;
675 err = ext2fs_extent_open2(fs, ino, inode, &handle);
680 * Find the extent closest to the start of the alloc range. We don't
681 * check the return value because _goto() sets the current node to the
682 * next-lowest extent if 'start' is in a hole; or the next-highest
683 * extent if there aren't any lower ones; or doesn't set a current node
684 * if there was a real error reading the extent tree. In that case,
685 * _get() will error out.
688 ext2fs_extent_goto(handle, start);
689 err = ext2fs_extent_get(handle, EXT2_EXTENT_CURRENT, &left_extent);
690 if (err == EXT2_ET_NO_CURRENT_NODE) {
691 blk64_t max_blocks = ext2fs_blocks_count(fs->super);
694 goal = ext2fs_find_inode_goal(fs, ino, inode, start);
695 err = ext2fs_find_first_zero_block_bitmap2(fs->block_map,
696 goal, max_blocks - 1, &goal);
698 err = ext_falloc_helper(fs, flags, ino, inode, handle, NULL,
699 NULL, start, len, goal);
704 dbg_print_extent("ext_falloc initial", &left_extent);
705 next = left_extent.e_lblk + left_extent.e_len;
706 if (left_extent.e_lblk > start) {
707 /* The nearest extent we found was beyond start??? */
708 goal = left_extent.e_pblk - (left_extent.e_lblk - start);
709 err = ext_falloc_helper(fs, flags, ino, inode, handle, NULL,
711 left_extent.e_lblk - start, goal);
716 } else if (next >= start) {
718 left_adjacent = &left_extent;
721 left_adjacent = NULL;
723 goal = left_extent.e_pblk + (range_start - left_extent.e_lblk);
724 goal_distance = range_start - next;
727 err = ext2fs_extent_get(handle, EXT2_EXTENT_NEXT_LEAF,
729 dbg_printf("%s: ino=%d get next =%d\n", __func__, ino,
731 dbg_print_extent("ext_falloc next", &right_extent);
732 /* Stop if we've seen this extent before */
733 if (!err && right_extent.e_lblk <= left_extent.e_lblk)
734 err = EXT2_ET_EXTENT_NO_NEXT;
736 if (err && err != EXT2_ET_EXTENT_NO_NEXT)
738 if (err == EXT2_ET_EXTENT_NO_NEXT ||
739 right_extent.e_lblk > end + 1) {
741 right_adjacent = NULL;
743 /* Handle right_extent.e_lblk <= end */
744 range_end = right_extent.e_lblk - 1;
745 right_adjacent = &right_extent;
747 if (err != EXT2_ET_EXTENT_NO_NEXT &&
748 goal_distance > (range_end - right_extent.e_lblk)) {
749 goal = right_extent.e_pblk -
750 (right_extent.e_lblk - range_start);
751 goal_distance = range_end - right_extent.e_lblk;
754 dbg_printf("%s: ino=%d rstart=%llu rend=%llu\n", __func__, ino,
755 range_start, range_end);
757 if (range_start <= range_end) {
758 count = range_end - range_start + 1;
759 err = ext_falloc_helper(fs, flags, ino, inode, handle,
760 left_adjacent, right_adjacent,
761 range_start, count, goal);
766 if (range_end == end)
769 err = ext2fs_extent_goto(handle, right_extent.e_lblk);
772 next = right_extent.e_lblk + right_extent.e_len;
773 left_extent = right_extent;
774 left_adjacent = &left_extent;
776 goal = left_extent.e_pblk + (range_start - left_extent.e_lblk);
777 goal_distance = range_start - next;
778 } while (range_end < end);
781 ext2fs_extent_free(handle);
786 * Map physical blocks to a range of logical blocks within a file. The range
787 * of logical blocks are (start, start + len). If there are already extents,
788 * the mappings will try to extend the mappings; otherwise, it will try to map
789 * start as if logical block 0 points to goal. If goal is ~0ULL, then the goal
790 * is calculated based on the inode group.
793 * - EXT2_FALLOCATE_ZERO_BLOCKS: Zero the blocks that are allocated.
794 * - EXT2_FALLOCATE_FORCE_INIT: Create only initialized extents.
795 * - EXT2_FALLOCATE_FORCE_UNINIT: Create only uninitialized extents.
796 * - EXT2_FALLOCATE_INIT_BEYOND_EOF: Create extents beyond EOF.
798 * If neither FORCE_INIT nor FORCE_UNINIT are specified, this function will
799 * try to expand any extents it finds, zeroing blocks as necessary.
801 errcode_t ext2fs_fallocate(ext2_filsys fs, int flags, ext2_ino_t ino,
802 struct ext2_inode *inode, blk64_t goal,
803 blk64_t start, blk64_t len)
805 struct ext2_inode inode_buf;
809 if (((flags & EXT2_FALLOCATE_FORCE_INIT) &&
810 (flags & EXT2_FALLOCATE_FORCE_UNINIT)) ||
811 (flags & ~EXT2_FALLOCATE_ALL_FLAGS))
812 return EXT2_ET_INVALID_ARGUMENT;
814 if (len > ext2fs_blocks_count(fs->super))
815 return EXT2_ET_BLOCK_ALLOC_FAIL;
819 /* Read inode structure if necessary */
821 err = ext2fs_read_inode(fs, ino, &inode_buf);
826 dbg_printf("%s: ino=%d start=%llu len=%llu goal=%llu\n", __func__, ino,
829 if (inode->i_flags & EXT4_EXTENTS_FL) {
830 err = extent_fallocate(fs, flags, ino, inode, goal, start, len);
834 /* XXX: Allocate a bunch of blocks the slow way */
835 for (blk = start; blk < start + len; blk++) {
836 err = ext2fs_bmap2(fs, ino, inode, NULL, 0, blk, 0, &x);
842 err = ext2fs_bmap2(fs, ino, inode, NULL,
843 BMAP_ALLOC | BMAP_UNINIT | BMAP_ZERO, blk,
850 if (inode == &inode_buf)
851 ext2fs_write_inode(fs, ino, inode);