2 * alloc_tables.c --- Allocate tables for a newly initialized
3 * filesystem. Used by mke2fs when initializing a filesystem
5 * Copyright (C) 1996 Theodore Ts'o.
8 * This file may be redistributed under the terms of the GNU Library
9 * General Public License, version 2.
25 #include <sys/types.h>
33 * This routine searches for free blocks that can allocate a full
34 * group of bitmaps or inode tables for a flexbg group. Returns the
35 * block number with a correct offset were the bitmaps and inode
36 * tables can be allocated continously and in order.
38 static blk64_t flexbg_offset(ext2_filsys fs, dgrp_t group, blk64_t start_blk,
39 ext2fs_block_bitmap bmap, int rem_grp,
42 int flexbg, flexbg_size, size;
43 blk64_t last_blk, first_free = 0;
46 flexbg_size = 1 << fs->super->s_log_groups_per_flex;
47 flexbg = group / flexbg_size;
48 size = rem_grp * elem_size;
50 if (size > (int) (fs->super->s_blocks_per_group / 4))
51 size = (int) fs->super->s_blocks_per_group / 4;
54 * Don't do a long search if the previous block search is still valid,
55 * but skip minor obstructions such as group descriptor backups.
57 if (start_blk && start_blk < ext2fs_blocks_count(fs->super) &&
58 ext2fs_get_free_blocks2(fs, start_blk, start_blk + size, elem_size,
59 bmap, &first_free) == 0)
62 start_blk = ext2fs_group_first_block2(fs, flexbg_size * flexbg);
63 last_grp = group | (flexbg_size - 1);
64 if (last_grp > fs->group_desc_count-1)
65 last_grp = fs->group_desc_count-1;
66 last_blk = ext2fs_group_last_block2(fs, last_grp);
68 /* Find the first available block */
69 if (ext2fs_get_free_blocks2(fs, start_blk, last_blk, size,
70 bmap, &first_free) == 0)
73 if (ext2fs_get_free_blocks2(fs, start_blk, last_blk, elem_size,
74 bmap, &first_free) == 0)
77 if (ext2fs_get_free_blocks2(fs, 0, last_blk, elem_size, bmap,
84 errcode_t ext2fs_allocate_group_table(ext2_filsys fs, dgrp_t group,
85 ext2fs_block_bitmap bmap)
88 blk64_t group_blk, start_blk, last_blk, new_blk;
90 int rem_grps = 0, flexbg_size = 0;
92 group_blk = ext2fs_group_first_block2(fs, group);
93 last_blk = ext2fs_group_last_block2(fs, group);
98 if (EXT2_HAS_INCOMPAT_FEATURE(fs->super,
99 EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
100 fs->super->s_log_groups_per_flex) {
101 flexbg_size = 1 << fs->super->s_log_groups_per_flex;
102 last_grp = group | (flexbg_size - 1);
103 if (last_grp > fs->group_desc_count-1)
104 last_grp = fs->group_desc_count-1;
105 rem_grps = last_grp - group + 1;
109 * Allocate the block and inode bitmaps, if necessary
112 retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk,
113 1, bmap, &start_blk);
116 start_blk += fs->inode_blocks_per_group;
117 start_blk += ((fs->stride * group) %
118 (last_blk - start_blk + 1));
119 if (start_blk >= last_blk)
120 start_blk = group_blk;
122 start_blk = group_blk;
125 blk64_t prev_block = 0;
127 if (group % flexbg_size)
128 prev_block = ext2fs_block_bitmap_loc(fs, group - 1) + 1;
129 /* FIXME: Take backup group descriptor blocks into account
130 * if the flexbg allocations will grow to overlap them... */
131 start_blk = flexbg_offset(fs, group, prev_block, bmap,
133 last_blk = ext2fs_group_last_block2(fs, last_grp);
136 if (!ext2fs_block_bitmap_loc(fs, group)) {
137 retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk,
139 if (retval == EXT2_ET_BLOCK_ALLOC_FAIL)
140 retval = ext2fs_get_free_blocks2(fs, group_blk,
141 last_blk, 1, bmap, &new_blk);
144 ext2fs_mark_block_bitmap2(bmap, new_blk);
145 ext2fs_block_bitmap_loc_set(fs, group, new_blk);
147 dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk);
148 ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1);
149 ext2fs_free_blocks_count_add(fs->super, -1);
150 ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT);
151 ext2fs_group_desc_csum_set(fs, gr);
156 blk64_t prev_block = 0;
157 if (group % flexbg_size)
158 prev_block = ext2fs_inode_bitmap_loc(fs, group - 1) + 1;
160 prev_block = ext2fs_block_bitmap_loc(fs, group) +
162 /* FIXME: Take backup group descriptor blocks into account
163 * if the flexbg allocations will grow to overlap them... */
164 start_blk = flexbg_offset(fs, group, prev_block, bmap,
166 last_blk = ext2fs_group_last_block2(fs, last_grp);
169 if (!ext2fs_inode_bitmap_loc(fs, group)) {
170 retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk,
172 if (retval == EXT2_ET_BLOCK_ALLOC_FAIL)
173 retval = ext2fs_get_free_blocks2(fs, group_blk,
174 last_blk, 1, bmap, &new_blk);
177 ext2fs_mark_block_bitmap2(bmap, new_blk);
178 ext2fs_inode_bitmap_loc_set(fs, group, new_blk);
180 dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk);
181 ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1);
182 ext2fs_free_blocks_count_add(fs->super, -1);
183 ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT);
184 ext2fs_group_desc_csum_set(fs, gr);
189 * Allocate the inode table
192 blk64_t prev_block = 0;
194 if (group % flexbg_size)
195 prev_block = ext2fs_inode_table_loc(fs, group - 1) +
196 fs->inode_blocks_per_group;
198 prev_block = ext2fs_inode_bitmap_loc(fs, group) +
201 /* FIXME: Take backup group descriptor blocks into account
202 * if the flexbg allocations will grow to overlap them... */
203 group_blk = flexbg_offset(fs, group, prev_block, bmap,
204 rem_grps, fs->inode_blocks_per_group);
205 last_blk = ext2fs_group_last_block2(fs, last_grp);
208 if (!ext2fs_inode_table_loc(fs, group)) {
209 retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk,
210 fs->inode_blocks_per_group,
215 ext2fs_block_alloc_stats_range(fs, new_blk,
216 fs->inode_blocks_per_group, +1);
218 ext2fs_mark_block_bitmap_range2(fs->block_map,
219 new_blk, fs->inode_blocks_per_group);
220 ext2fs_inode_table_loc_set(fs, group, new_blk);
222 ext2fs_group_desc_csum_set(fs, group);
226 errcode_t ext2fs_allocate_tables(ext2_filsys fs)
230 struct ext2fs_numeric_progress_struct progress;
232 if (fs->progress_ops && fs->progress_ops->init)
233 (fs->progress_ops->init)(fs, &progress, NULL,
234 fs->group_desc_count);
236 for (i = 0; i < fs->group_desc_count; i++) {
237 if (fs->progress_ops && fs->progress_ops->update)
238 (fs->progress_ops->update)(fs, &progress, i);
239 retval = ext2fs_allocate_group_table(fs, i, fs->block_map);
243 if (fs->progress_ops && fs->progress_ops->close)
244 (fs->progress_ops->close)(fs, &progress, NULL);