* PowerQuest, Inc.
*
* Copyright (C) 1999, 2000 by Theosore Ts'o
- *
+ *
* %Begin-Header%
* This file may be redistributed under the terms of the GNU Public
* License.
#define RESIZE2FS_DEBUG
#endif
+static void fix_uninit_block_bitmaps(ext2_filsys fs);
static errcode_t adjust_superblock(ext2_resize_t rfs, blk_t new_size);
static errcode_t blocks_to_move(ext2_resize_t rfs);
static errcode_t block_mover(ext2_resize_t rfs);
*/
errcode_t resize_fs(ext2_filsys fs, blk_t *new_size, int flags,
errcode_t (*progress)(ext2_resize_t rfs, int pass,
- unsigned long cur,
- unsigned long max_val))
+ unsigned long cur,
+ unsigned long max_val))
{
ext2_resize_t rfs;
errcode_t retval;
retval = ext2fs_read_bitmaps(fs);
if (retval)
return retval;
-
+
/*
* Create the data structure
*/
return retval;
memset(rfs, 0, sizeof(struct ext2_resize_struct));
+ fix_uninit_block_bitmaps(fs);
+ fs->priv_data = rfs;
rfs->old_fs = fs;
rfs->flags = flags;
rfs->itable_buf = 0;
if (retval)
goto errout;
+ fix_uninit_block_bitmaps(rfs->new_fs);
+ /* Clear the block bitmap uninit flag for the last block group */
+ rfs->new_fs->group_desc[rfs->new_fs->group_desc_count-1].bg_flags &=
+ ~EXT2_BG_BLOCK_UNINIT;
+
*new_size = rfs->new_fs->super->s_blocks_count;
retval = blocks_to_move(rfs);
rfs->new_fs->super->s_free_blocks_count,
rfs->needed_blocks);
#endif
-
+
retval = block_mover(rfs);
if (retval)
goto errout;
retval = ext2fs_calculate_summary_stats(rfs->new_fs);
if (retval)
goto errout;
-
+
retval = fix_resize_inode(rfs->new_fs);
if (retval)
goto errout;
- rfs->new_fs->flags &= ~EXT2_FLAG_MASTER_SB_ONLY;
+ rfs->new_fs->flags &= ~EXT2_FLAG_MASTER_SB_ONLY;
retval = ext2fs_close(rfs->new_fs);
if (retval)
goto errout;
rfs->flags = flags;
-
+
ext2fs_free(rfs->old_fs);
if (rfs->itable_buf)
ext2fs_free_mem(&rfs->itable_buf);
ext2fs_free_mem(&rfs);
-
+
return 0;
errout:
return retval;
}
+/*
+ * Clean up the bitmaps for unitialized bitmaps
+ */
+static void fix_uninit_block_bitmaps(ext2_filsys fs)
+{
+ blk_t i, blk, super_blk, old_desc_blk, new_desc_blk;
+ int old_desc_blocks;
+ dgrp_t g;
+
+ if (!(EXT2_HAS_RO_COMPAT_FEATURE(fs->super,
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
+ return;
+
+ for (g=0; g < fs->group_desc_count; g++) {
+ if (!(fs->group_desc[g].bg_flags & EXT2_BG_BLOCK_UNINIT))
+ continue;
+
+ blk = (g * fs->super->s_blocks_per_group) +
+ fs->super->s_first_data_block;
+
+ ext2fs_super_and_bgd_loc(fs, g, &super_blk,
+ &old_desc_blk, &new_desc_blk, 0);
+
+ if (fs->super->s_feature_incompat & EXT2_FEATURE_INCOMPAT_META_BG)
+ old_desc_blocks = fs->super->s_first_meta_bg;
+ else
+ old_desc_blocks = fs->desc_blocks +
+ fs->super->s_reserved_gdt_blocks;
+
+ for (i=0; i < fs->super->s_blocks_per_group; i++, blk++) {
+ if (blk >= fs->super->s_blocks_count)
+ break;
+ if ((blk == super_blk) ||
+ (old_desc_blk && old_desc_blocks &&
+ (blk >= old_desc_blk) &&
+ (blk < old_desc_blk + old_desc_blocks)) ||
+ (new_desc_blk && (blk == new_desc_blk)) ||
+ (blk == fs->group_desc[g].bg_block_bitmap) ||
+ (blk == fs->group_desc[g].bg_inode_bitmap) ||
+ (blk >= fs->group_desc[g].bg_inode_table &&
+ (blk < fs->group_desc[g].bg_inode_table
+ + fs->inode_blocks_per_group)))
+ ext2fs_fast_mark_block_bitmap(fs->block_map, blk);
+ else
+ ext2fs_fast_unmark_block_bitmap(fs->block_map, blk);
+ }
+ }
+}
+
/* --------------------------------------------------------------------
*
* Resize processing, phase 1.
*/
/*
+ * If the group descriptor's bitmap and inode table blocks are valid,
+ * release them in the new filesystem data structure, and mark them as
+ * reserved so the old inode table blocks don't get overwritten.
+ */
+static void free_gdp_blocks(ext2_filsys fs,
+ ext2fs_block_bitmap reserve_blocks,
+ struct ext2_group_desc *gdp)
+{
+ blk_t blk;
+ int j;
+
+ if (gdp->bg_block_bitmap &&
+ (gdp->bg_block_bitmap < fs->super->s_blocks_count)) {
+ ext2fs_block_alloc_stats(fs, gdp->bg_block_bitmap, -1);
+ ext2fs_mark_block_bitmap(reserve_blocks,
+ gdp->bg_block_bitmap);
+ }
+
+ if (gdp->bg_inode_bitmap &&
+ (gdp->bg_inode_bitmap < fs->super->s_blocks_count)) {
+ ext2fs_block_alloc_stats(fs, gdp->bg_inode_bitmap, -1);
+ ext2fs_mark_block_bitmap(reserve_blocks,
+ gdp->bg_inode_bitmap);
+ }
+
+ if (gdp->bg_inode_table == 0 ||
+ (gdp->bg_inode_table >= fs->super->s_blocks_count))
+ return;
+
+ for (blk = gdp->bg_inode_table, j = 0;
+ j < fs->inode_blocks_per_group; j++, blk++) {
+ if (blk >= fs->super->s_blocks_count)
+ break;
+ ext2fs_block_alloc_stats(fs, blk, -1);
+ ext2fs_mark_block_bitmap(reserve_blocks, blk);
+ }
+}
+
+/*
* This routine is shared by the online and offline resize routines.
* All of the information which is adjusted in memory is done here.
+ *
+ * The reserve_blocks parameter is only needed when shrinking the
+ * filesystem.
*/
-errcode_t adjust_fs_info(ext2_filsys fs, ext2_filsys old_fs, blk_t new_size)
+errcode_t adjust_fs_info(ext2_filsys fs, ext2_filsys old_fs,
+ ext2fs_block_bitmap reserve_blocks, blk_t new_size)
{
errcode_t retval;
int overhead = 0;
unsigned int meta_bg, meta_bg_size;
int has_super, csum_flag;
unsigned long long new_inodes; /* u64 to check for overflow */
+ double percent;
fs->super->s_blocks_count = new_size;
EXT2_BLOCKS_PER_GROUP(fs->super));
if (fs->group_desc_count == 0)
return EXT2_ET_TOOSMALL;
- fs->desc_blocks = ext2fs_div_ceil(fs->group_desc_count,
+ fs->desc_blocks = ext2fs_div_ceil(fs->group_desc_count,
EXT2_DESC_PER_BLOCK(fs->super));
/*
overhead = (int) (2 + fs->inode_blocks_per_group);
if (ext2fs_bg_has_super(fs, fs->group_desc_count - 1))
- overhead += 1 + fs->desc_blocks +
+ overhead += 1 + fs->desc_blocks +
fs->super->s_reserved_gdt_blocks;
/*
/*
* Adjust the number of reserved blocks
*/
- blk = (__u64)old_fs->super->s_r_blocks_count * 100 /
+ percent = (old_fs->super->s_r_blocks_count * 100.0) /
old_fs->super->s_blocks_count;
- fs->super->s_r_blocks_count = e2p_percent(blk,
- fs->super->s_blocks_count);
+ fs->super->s_r_blocks_count = (unsigned int) (percent *
+ fs->super->s_blocks_count / 100.0);
/*
* Adjust the bitmaps for size
fs->super->s_inodes_count,
fs->inode_map);
if (retval) goto errout;
-
+
real_end = ((EXT2_BLOCKS_PER_GROUP(fs->super)
* fs->group_desc_count)) - 1 +
fs->super->s_first_data_block;
&fs->group_desc);
if (retval)
goto errout;
- if (fs->desc_blocks > old_fs->desc_blocks)
- memset((char *) fs->group_desc +
+ if (fs->desc_blocks > old_fs->desc_blocks)
+ memset((char *) fs->group_desc +
(old_fs->desc_blocks * fs->blocksize), 0,
(fs->desc_blocks - old_fs->desc_blocks) *
fs->blocksize);
* s_reserved_gdt_blocks if possible to avoid needing to move
* the inode table either now or in the future.
*/
- if ((fs->super->s_feature_compat &
+ if ((fs->super->s_feature_compat &
EXT2_FEATURE_COMPAT_RESIZE_INODE) &&
(old_fs->desc_blocks != fs->desc_blocks)) {
int new;
- new = ((int) fs->super->s_reserved_gdt_blocks) +
+ new = ((int) fs->super->s_reserved_gdt_blocks) +
(old_fs->desc_blocks - fs->desc_blocks);
if (new < 0)
new = 0;
if (new > (int) fs->blocksize/4)
new = fs->blocksize/4;
fs->super->s_reserved_gdt_blocks = new;
- if (new == 0)
- fs->super->s_feature_compat &=
- ~EXT2_FEATURE_COMPAT_RESIZE_INODE;
}
/*
- * If we are shrinking the number block groups, we're done and
- * can exit now.
+ * If we are shrinking the number of block groups, we're done
+ * and can exit now.
*/
if (old_fs->group_desc_count > fs->group_desc_count) {
+ /*
+ * Check the block groups that we are chopping off
+ * and free any blocks associated with their metadata
+ */
+ for (i = fs->group_desc_count;
+ i < old_fs->group_desc_count; i++) {
+ free_gdp_blocks(fs, reserve_blocks,
+ &old_fs->group_desc[i]);
+ }
retval = 0;
goto errout;
}
if (fs->super->s_feature_incompat & EXT2_FEATURE_INCOMPAT_META_BG)
old_desc_blocks = fs->super->s_first_meta_bg;
else
- old_desc_blocks = fs->desc_blocks +
+ old_desc_blocks = fs->desc_blocks +
fs->super->s_reserved_gdt_blocks;
for (i = old_fs->group_desc_count;
i < fs->group_desc_count; i++) {
ext2fs_block_alloc_stats(fs,
group_block + has_super, +1);
}
-
+
adjblocks += 2 + fs->inode_blocks_per_group;
-
+
numblocks -= adjblocks;
fs->super->s_free_blocks_count -= adjblocks;
fs->super->s_free_inodes_count +=
blk_t group_block;
unsigned long i;
unsigned long max_group;
-
+
fs = rfs->new_fs;
ext2fs_mark_super_dirty(fs);
ext2fs_mark_bb_dirty(fs);
ext2fs_mark_ib_dirty(fs);
- retval = adjust_fs_info(fs, rfs->old_fs, new_size);
+ retval = ext2fs_allocate_block_bitmap(fs, _("reserved blocks"),
+ &rfs->reserve_blocks);
+ if (retval)
+ return retval;
+
+ retval = adjust_fs_info(fs, rfs->old_fs, rfs->reserve_blocks, new_size);
if (retval)
goto errout;
if (fs->super->s_feature_incompat & EXT2_FEATURE_INCOMPAT_META_BG)
old_desc_blocks = fs->super->s_first_meta_bg;
else
- old_desc_blocks = fs->desc_blocks +
+ old_desc_blocks = fs->desc_blocks +
fs->super->s_reserved_gdt_blocks;
for (i = 0; i < fs->group_desc_count; i++) {
ext2fs_reserve_super_and_bgd(fs, i, bmap);
-
+
/*
* Mark the blocks used for the inode table
*/
j < (unsigned int) fs->inode_blocks_per_group;
j++, b++)
ext2fs_mark_block_bitmap(bmap, b);
-
+
/*
- * Mark block used for the block bitmap
+ * Mark block used for the block bitmap
*/
ext2fs_mark_block_bitmap(bmap,
fs->group_desc[i].bg_block_bitmap);
/*
- * Mark block used for the inode bitmap
+ * Mark block used for the inode bitmap
*/
ext2fs_mark_block_bitmap(bmap,
fs->group_desc[i].bg_inode_bitmap);
int group, blk_t blk)
{
ext2_filsys fs = rfs->new_fs;
-
+
ext2fs_mark_block_bitmap(rfs->reserve_blocks, blk);
ext2fs_block_alloc_stats(fs, blk, +1);
} else if (IS_INODE_TB(fs, group, blk)) {
FS_INODE_TB(fs, group) = 0;
rfs->needed_blocks++;
+ } else if (EXT2_HAS_RO_COMPAT_FEATURE(fs->super,
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+ (fs->group_desc[group].bg_flags & EXT2_BG_BLOCK_UNINIT)) {
+ /*
+ * If the block bitmap is uninitialized, which means
+ * nothing other than standard metadata in use.
+ */
+ return;
} else if (ext2fs_test_block_bitmap(rfs->old_fs->block_map, blk) &&
!ext2fs_test_block_bitmap(meta_bmap, blk)) {
ext2fs_mark_block_bitmap(rfs->move_blocks, blk);
static errcode_t blocks_to_move(ext2_resize_t rfs)
{
int j, has_super;
- dgrp_t i, max_groups;
+ dgrp_t i, max_groups, g;
blk_t blk, group_blk;
unsigned long old_blocks, new_blocks;
unsigned int meta_bg, meta_bg_size;
errcode_t retval;
ext2_filsys fs, old_fs;
ext2fs_block_bitmap meta_bmap;
+ __u32 save_incompat_flag;
fs = rfs->new_fs;
old_fs = rfs->old_fs;
if (old_fs->super->s_blocks_count > fs->super->s_blocks_count)
fs = rfs->old_fs;
-
- retval = ext2fs_allocate_block_bitmap(fs, _("reserved blocks"),
- &rfs->reserve_blocks);
- if (retval)
- return retval;
retval = ext2fs_allocate_block_bitmap(fs, _("blocks to be moved"),
&rfs->move_blocks);
if (retval)
return retval;
- retval = ext2fs_allocate_block_bitmap(fs, _("meta-data blocks"),
+ retval = ext2fs_allocate_block_bitmap(fs, _("meta-data blocks"),
&meta_bmap);
if (retval)
return retval;
-
+
retval = mark_table_blocks(old_fs, meta_bmap);
if (retval)
return retval;
fs = rfs->new_fs;
-
+
/*
* If we're shrinking the filesystem, we need to move all of
* the blocks that don't fit any more
*/
for (blk = fs->super->s_blocks_count;
blk < old_fs->super->s_blocks_count; blk++) {
+ g = ext2fs_group_of_blk(fs, blk);
+ if (EXT2_HAS_RO_COMPAT_FEATURE(fs->super,
+ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) &&
+ (old_fs->group_desc[g].bg_flags & EXT2_BG_BLOCK_UNINIT)) {
+ /*
+ * The block bitmap is uninitialized, so skip
+ * to the next block group.
+ */
+ blk = ((g+1) * fs->super->s_blocks_per_group) +
+ fs->super->s_first_data_block - 1;
+ continue;
+ }
if (ext2fs_test_block_bitmap(old_fs->block_map, blk) &&
!ext2fs_test_block_bitmap(meta_bmap, blk)) {
ext2fs_mark_block_bitmap(rfs->move_blocks, blk);
}
ext2fs_mark_block_bitmap(rfs->reserve_blocks, blk);
}
-
+
if (fs->super->s_feature_incompat & EXT2_FEATURE_INCOMPAT_META_BG) {
old_blocks = old_fs->super->s_first_meta_bg;
new_blocks = fs->super->s_first_meta_bg;
old_blocks = old_fs->desc_blocks + old_fs->super->s_reserved_gdt_blocks;
new_blocks = fs->desc_blocks + fs->super->s_reserved_gdt_blocks;
}
-
+
if (old_blocks == new_blocks) {
retval = 0;
goto errout;
}
/*
* If we're increasing the number of descriptor blocks, life
- * gets interesting....
+ * gets interesting....
*/
meta_bg_size = EXT2_DESC_PER_BLOCK(fs->super);
for (i = 0; i < max_groups; i++) {
if (has_super) {
for (blk = group_blk+1;
blk < group_blk + 1 + new_blocks; blk++)
- mark_fs_metablock(rfs, meta_bmap,
+ mark_fs_metablock(rfs, meta_bmap,
i, blk);
}
} else {
/*
* Allocate the missing data structures
+ *
+ * XXX We have a problem with FLEX_BG and off-line
+ * resizing where we are growing the size of the
+ * filesystem. ext2fs_allocate_group_table() will try
+ * to reserve the inode table in the desired flex_bg
+ * location. However, passing rfs->reserve_blocks
+ * doesn't work since it only has reserved the blocks
+ * that will be used in the new block group -- and
+ * with flex_bg, we can and will allocate the tables
+ * outside of the block group. And we can't pass in
+ * the fs->block_map because it doesn't handle
+ * overlapping inode table movements right. So for
+ * now, we temporarily disable flex_bg to force
+ * ext2fs_allocate_group_tables() to allocate the bg
+ * metadata in side the block group, and the restore
+ * it afterwards. Ugly, until we can fix this up
+ * right later.
*/
+ save_incompat_flag = fs->super->s_feature_incompat;
+ fs->super->s_feature_incompat &= ~EXT4_FEATURE_INCOMPAT_FLEX_BG;
retval = ext2fs_allocate_group_table(fs, i,
rfs->reserve_blocks);
+ fs->super->s_feature_incompat = save_incompat_flag;
if (retval)
goto errout;
/*
* Mark the new inode table as in use in the new block
- * allocation bitmap, and move any blocks that might
+ * allocation bitmap, and move any blocks that might
* be necessary.
*/
for (blk = fs->group_desc[i].bg_inode_table, j=0;
ext2fs_mark_block_bitmap(rfs->move_blocks,
blk);
}
-
+
/*
* Make sure the old inode table is reserved in the
* block reservation bitmap.
for (blk = rfs->old_fs->group_desc[i].bg_inode_table, j=0;
j < fs->inode_blocks_per_group ; j++, blk++)
ext2fs_mark_block_bitmap(rfs->reserve_blocks, blk);
-
+
next_group:
group_blk += rfs->new_fs->super->s_blocks_per_group;
}
errout:
if (meta_bmap)
ext2fs_free_block_bitmap(meta_bmap);
-
+
return retval;
}
static blk_t get_new_block(ext2_resize_t rfs)
{
ext2_filsys fs = rfs->new_fs;
-
+
while (1) {
if (rfs->new_blk >= fs->super->s_blocks_count) {
if (rfs->alloc_state == DESPERATION)
if (rfs->flags & RESIZE_DEBUG_BMOVE)
printf("Going into desperation mode "
"for block allocations\n");
-#endif
+#endif
rfs->alloc_state = DESPERATION;
rfs->new_blk = fs->super->s_first_data_block;
continue;
}
}
+static errcode_t resize2fs_get_alloc_block(ext2_filsys fs, blk64_t goal,
+ blk64_t *ret)
+{
+ ext2_resize_t rfs = (ext2_resize_t) fs->priv_data;
+ blk_t blk;
+
+ blk = get_new_block(rfs);
+ if (!blk)
+ return ENOSPC;
+
+#ifdef RESIZE2FS_DEBUG
+ if (rfs->flags & 0xF)
+ printf("get_alloc_block allocating %u\n", blk);
+#endif
+
+ ext2fs_mark_block_bitmap(rfs->old_fs->block_map, blk);
+ ext2fs_mark_block_bitmap(rfs->new_fs->block_map, blk);
+ *ret = (blk64_t) blk;
+ return 0;
+}
+
static errcode_t block_mover(ext2_resize_t rfs)
{
blk_t blk, old_blk, new_blk;
int to_move, moved;
ext2_badblocks_list badblock_list = 0;
int bb_modified = 0;
-
+
+ fs->get_alloc_block = resize2fs_get_alloc_block;
+ old_fs->get_alloc_block = resize2fs_get_alloc_block;
+
retval = ext2fs_read_bb_inode(old_fs, &badblock_list);
if (retval)
return retval;
ext2fs_add_extent_entry(rfs->bmap, blk, new_blk);
to_move++;
}
-
+
if (to_move == 0) {
if (rfs->bmap) {
ext2fs_free_extent_table(rfs->bmap);
};
static int process_block(ext2_filsys fs, blk_t *block_nr,
- e2_blkcnt_t blockcnt,
+ e2_blkcnt_t blockcnt,
blk_t ref_block EXT2FS_ATTR((unused)),
int ref_offset EXT2FS_ATTR((unused)), void *priv_data)
{
pb->changed = 1;
#ifdef RESIZE2FS_DEBUG
if (pb->rfs->flags & RESIZE_DEBUG_BMOVE)
- printf("ino=%u, blockcnt=%lld, %u->%u\n",
+ printf("ino=%u, blockcnt=%lld, %u->%u\n",
pb->ino, blockcnt, block, new_block);
#endif
block = new_block;
/*
* Progress callback
*/
-static errcode_t progress_callback(ext2_filsys fs,
+static errcode_t progress_callback(ext2_filsys fs,
ext2_inode_scan scan EXT2FS_ATTR((unused)),
dgrp_t group, void * priv_data)
{
if (retval)
return retval;
}
-
+
return 0;
}
struct ext2_inode *inode = NULL;
ext2_inode_scan scan = NULL;
errcode_t retval;
- int group;
char *block_buf = 0;
ext2_ino_t start_to_move;
blk_t orig_size, new_block;
int inode_size;
-
+
if ((rfs->old_fs->group_desc_count <=
rfs->new_fs->group_desc_count) &&
!rfs->bmap)
start_to_move = (rfs->new_fs->group_desc_count *
rfs->new_fs->super->s_inodes_per_group);
-
+
if (rfs->progress) {
retval = (rfs->progress)(rfs, E2_RSZ_INODE_SCAN_PASS,
0, rfs->old_fs->group_desc_count);
pb.changed = 0;
if (inode->i_file_acl && rfs->bmap) {
- new_block = ext2fs_extent_translate(rfs->bmap,
+ new_block = ext2fs_extent_translate(rfs->bmap,
inode->i_file_acl);
if (new_block) {
inode->i_file_acl = new_block;
- retval = ext2fs_write_inode_full(rfs->old_fs,
+ retval = ext2fs_write_inode_full(rfs->old_fs,
ino, inode, inode_size);
if (retval) goto errout;
}
}
-
+
if (ext2fs_inode_has_valid_blocks(inode) &&
(rfs->bmap || pb.is_dir)) {
pb.ino = ino;
/*
* Find a new inode
*/
- while (1) {
- if (!ext2fs_test_inode_bitmap(rfs->new_fs->inode_map,
- new_inode))
- break;
- new_inode++;
- if (new_inode > rfs->new_fs->super->s_inodes_count) {
- retval = ENOSPC;
- goto errout;
- }
- }
+ retval = ext2fs_new_inode(rfs->new_fs, 0, 0, 0, &new_inode);
+ if (retval)
+ goto errout;
+
ext2fs_inode_alloc_stats2(rfs->new_fs, new_inode, +1,
pb.is_dir);
if (pb.changed) {
inode, inode_size);
if (retval) goto errout;
- group = (new_inode-1) / EXT2_INODES_PER_GROUP(rfs->new_fs->super);
- if (LINUX_S_ISDIR(inode->i_mode)) {
- rfs->new_fs->group_desc[group].bg_used_dirs_count++;
- ext2fs_group_desc_csum_set(rfs->new_fs, group);
- }
-
#ifdef RESIZE2FS_DEBUG
if (rfs->flags & RESIZE_DEBUG_INODEMAP)
printf("Inode moved %u->%u\n", ino, new_inode);
ext2fs_close_inode_scan(scan);
if (block_buf)
ext2fs_free_mem(&block_buf);
- if (inode)
- free(inode);
+ free(inode);
return retval;
}
struct istruct {
ext2_resize_t rfs;
errcode_t err;
- unsigned long max_dirs;
- int num;
+ unsigned int max_dirs;
+ unsigned int num;
};
-static int check_and_change_inodes(ext2_ino_t dir,
+static int check_and_change_inodes(ext2_ino_t dir,
int entry EXT2FS_ATTR((unused)),
struct ext2_dir_entry *dirent, int offset,
int blocksize EXT2FS_ATTR((unused)),
- char *buf EXT2FS_ATTR((unused)),
+ char *buf EXT2FS_ATTR((unused)),
void *priv_data)
{
struct istruct *is = (struct istruct *) priv_data;
{
errcode_t retval;
struct istruct is;
-
+
if (!rfs->imap)
return 0;
-
+
/*
* Now, we iterate over all of the directories to update the
* inode references
if (retval)
goto errout;
}
-
+
retval = ext2fs_dblist_dir_iterate(rfs->old_fs->dblist,
DIRENT_FLAG_INCLUDE_EMPTY, 0,
check_and_change_inodes, &is);
goto errout;
}
+ if (rfs->progress && (is.num < is.max_dirs))
+ (rfs->progress)(rfs, E2_RSZ_INODE_REF_UPD_PASS,
+ is.max_dirs, is.max_dirs);
+
errout:
ext2fs_free_extent_table(rfs->imap);
rfs->imap = 0;
old_blk = rfs->old_fs->group_desc[i].bg_inode_table;
new_blk = fs->group_desc[i].bg_inode_table;
diff = new_blk - old_blk;
-
+
#ifdef RESIZE2FS_DEBUG
- if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
+ if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
printf("Itable move group %d block %u->%u (diff %d)\n",
i, old_blk, new_blk, diff);
#endif
-
+
if (!diff)
continue;
retval = io_channel_read_blk(fs->io, old_blk,
fs->inode_blocks_per_group,
rfs->itable_buf);
- if (retval)
+ if (retval)
goto errout;
/*
* The end of the inode table segment often contains
break;
n = n >> EXT2_BLOCK_SIZE_BITS(fs->super);
#ifdef RESIZE2FS_DEBUG
- if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
+ if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
printf("%d blocks of zeros...\n", n);
#endif
num = fs->inode_blocks_per_group;
mark_table_blocks(fs, fs->block_map);
ext2fs_flush(fs);
#ifdef RESIZE2FS_DEBUG
- if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
+ if (rfs->flags & RESIZE_DEBUG_ITABLEMOVE)
printf("Inode table move finished.\n");
#endif
return 0;
-
+
errout:
return retval;
}
/*
- * Fix the resize inode
+ * Fix the resize inode
*/
static errcode_t fix_resize_inode(ext2_filsys fs)
{
struct ext2_inode inode;
errcode_t retval;
char * block_buf;
+ blk_t blk;
- if (!(fs->super->s_feature_compat &
+ if (!(fs->super->s_feature_compat &
EXT2_FEATURE_COMPAT_RESIZE_INODE))
return 0;
retval = ext2fs_read_inode(fs, EXT2_RESIZE_INO, &inode);
if (retval) goto errout;
+ if (fs->super->s_reserved_gdt_blocks == 0) {
+ fs->super->s_feature_compat &=
+ ~EXT2_FEATURE_COMPAT_RESIZE_INODE;
+ ext2fs_mark_super_dirty(fs);
+
+ if ((blk = inode.i_block[EXT2_DIND_BLOCK]) != 0)
+ ext2fs_block_alloc_stats(fs, blk, -1);
+
+ memset(&inode, 0, sizeof(inode));
+
+ retval = ext2fs_write_inode(fs, EXT2_RESIZE_INO, &inode);
+ goto errout;
+ }
+
ext2fs_iblk_set(fs, &inode, 1);
retval = ext2fs_write_inode(fs, EXT2_RESIZE_INO, &inode);
if (retval) goto errout;
if (!inode.i_block[EXT2_DIND_BLOCK]) {
- /*
+ /*
* Avoid zeroing out block #0; that's rude. This
* should never happen anyway since the filesystem
* should be fsck'ed and we assume it is consistent.
*/
- fprintf(stderr,
+ fprintf(stderr,
_("Should never happen: resize inode corrupt!\n"));
exit(1);
}
retval = io_channel_write_blk(fs->io, inode.i_block[EXT2_DIND_BLOCK],
1, block_buf);
if (retval) goto errout;
-
+
retval = ext2fs_create_resize_inode(fs);
if (retval)
goto errout;
}
}
fs->super->s_free_blocks_count = total_free;
-
+
/*
* Next, calculate the inode statistics
*/
data_needed -= SUPER_OVERHEAD(fs) * num_of_superblocks;
data_needed -= META_OVERHEAD(fs) * fs->group_desc_count;
+ if (fs->super->s_feature_incompat & EXT4_FEATURE_INCOMPAT_FLEX_BG) {
+ /*
+ * For ext4 we need to allow for up to a flex_bg worth
+ * of inode tables of slack space so the resize
+ * operation can be guaranteed to finish.
+ */
+ int flexbg_size = 1 << fs->super->s_log_groups_per_flex;
+ int extra_groups;
+
+ extra_groups = flexbg_size - (groups & (flexbg_size - 1));
+ data_needed += META_OVERHEAD(fs) * extra_groups;
+ }
+
/*
* figure out how many data blocks we have given the number of groups
* we need for our inodes
blks_needed = (groups-1) * EXT2_BLOCKS_PER_GROUP(fs->super);
blks_needed += overhead;
+ /*
+ * We need to reserve a few extra blocks if extents are
+ * enabled, in case we need to grow the extent tree. The more
+ * we shrink the file system, the more space we need.
+ */
+ if (fs->super->s_feature_incompat & EXT3_FEATURE_INCOMPAT_EXTENTS)
+ blks_needed += (fs->super->s_blocks_count - blks_needed)/500;
+
return blks_needed;
}