#define _INLINE_ inline
#endif
+#undef DEBUG
+
static int process_block(ext2_filsys fs, blk64_t *blocknr,
e2_blkcnt_t blockcnt, blk64_t ref_blk,
int ref_offset, void *priv_data);
ext2fs_block_bitmap fs_meta_blocks;
e2fsck_t ctx;
region_t region;
+ struct extent_tree_info eti;
};
struct process_inode_block {
ext2_ino_t ino;
- struct ext2_inode inode;
+ struct ext2_inode_large inode;
};
struct scan_callback_struct {
if (io_channel_read_blk64(fs->io, inode->i_block[0], 1, buf))
return 0;
- len = strnlen(buf, fs->blocksize);
+ if (inode->i_flags & EXT4_ENCRYPT_FL) {
+ len = ext2fs_le32_to_cpu(*((__u32 *)buf)) + 4;
+ } else {
+ len = strnlen(buf, fs->blocksize);
+ }
if (len == fs->blocksize)
return 0;
} else if (inode->i_flags & EXT4_INLINE_DATA_FL) {
return 0;
}
if (len != inode->i_size)
- return 0;
+ if ((inode->i_flags & EXT4_ENCRYPT_FL) == 0)
+ return 0;
return 1;
}
return;
}
+ /* check if there is no place for an EA header */
+ if (inode->i_extra_isize >= max - sizeof(__u32))
+ return;
+
eamagic = (__u32 *) (((char *) inode) + EXT2_GOOD_OLD_INODE_SIZE +
inode->i_extra_isize);
if (*eamagic == EXT2_EXT_ATTR_MAGIC) {
* data. If it's true, we will treat it as a directory.
*/
- extent_fs = (ctx->fs->super->s_feature_incompat &
- EXT3_FEATURE_INCOMPAT_EXTENTS);
- inlinedata_fs = (ctx->fs->super->s_feature_incompat &
- EXT4_FEATURE_INCOMPAT_INLINE_DATA);
+ extent_fs = ext2fs_has_feature_extents(ctx->fs->super);
+ inlinedata_fs = ext2fs_has_feature_inline_data(ctx->fs->super);
if (inlinedata_fs && (inode->i_flags & EXT4_INLINE_DATA_FL)) {
size_t size;
__u32 dotdot;
int dirty = 0;
/* Both feature flags not set? Just run the regular checks */
- if (!EXT2_HAS_INCOMPAT_FEATURE(fs->super,
- EXT3_FEATURE_INCOMPAT_EXTENTS) &&
- !EXT2_HAS_INCOMPAT_FEATURE(fs->super,
- EXT4_FEATURE_INCOMPAT_INLINE_DATA))
+ if (!ext2fs_has_feature_extents(fs->super) &&
+ !ext2fs_has_feature_inline_data(fs->super))
return 0;
/* Clear both flags if it's a special file */
}
/* If it looks short enough to be inline data, try to clear extents */
- if (EXT2_INODE_SIZE(fs->super) > EXT2_GOOD_OLD_INODE_SIZE)
- max_inline_ea_size = EXT2_INODE_SIZE(fs->super) -
+ if (inode_size > EXT2_GOOD_OLD_INODE_SIZE)
+ max_inline_ea_size = inode_size -
(EXT2_GOOD_OLD_INODE_SIZE +
((struct ext2_inode_large *)inode)->i_extra_isize);
else
return 0;
}
+static void pass1_readahead(e2fsck_t ctx, dgrp_t *group, ext2_ino_t *next_ino)
+{
+ ext2_ino_t inodes_in_group = 0, inodes_per_block, inodes_per_buffer;
+ dgrp_t start = *group, grp;
+ blk64_t blocks_to_read = 0;
+ errcode_t err = EXT2_ET_INVALID_ARGUMENT;
+
+ if (ctx->readahead_kb == 0)
+ goto out;
+
+ /* Keep iterating groups until we have enough to readahead */
+ inodes_per_block = EXT2_INODES_PER_BLOCK(ctx->fs->super);
+ for (grp = start; grp < ctx->fs->group_desc_count; grp++) {
+ if (ext2fs_bg_flags_test(ctx->fs, grp, EXT2_BG_INODE_UNINIT))
+ continue;
+ inodes_in_group = ctx->fs->super->s_inodes_per_group -
+ ext2fs_bg_itable_unused(ctx->fs, grp);
+ blocks_to_read += (inodes_in_group + inodes_per_block - 1) /
+ inodes_per_block;
+ if (blocks_to_read * ctx->fs->blocksize >
+ ctx->readahead_kb * 1024)
+ break;
+ }
+
+ err = e2fsck_readahead(ctx->fs, E2FSCK_READA_ITABLE, start,
+ grp - start + 1);
+ if (err == EAGAIN) {
+ ctx->readahead_kb /= 2;
+ err = 0;
+ }
+
+out:
+ if (err) {
+ /* Error; disable itable readahead */
+ *group = ctx->fs->group_desc_count;
+ *next_ino = ctx->fs->super->s_inodes_count;
+ } else {
+ /*
+ * Don't do more readahead until we've reached the first inode
+ * of the last inode scan buffer block for the last group.
+ */
+ *group = grp + 1;
+ inodes_per_buffer = (ctx->inode_buffer_blocks ?
+ ctx->inode_buffer_blocks :
+ EXT2_INODE_SCAN_DEFAULT_BUFFER_BLOCKS) *
+ ctx->fs->blocksize /
+ EXT2_INODE_SIZE(ctx->fs->super);
+ inodes_in_group--;
+ *next_ino = inodes_in_group -
+ (inodes_in_group % inodes_per_buffer) + 1 +
+ (grp * ctx->fs->super->s_inodes_per_group);
+ }
+}
+
+/*
+ * Check if the passed ino is one of the used superblock quota inodes.
+ *
+ * Before the quota inodes were journaled, older superblock quota inodes
+ * were just regular files in the filesystem and not reserved inodes. This
+ * checks if the passed ino is one of the s_*_quota_inum superblock fields,
+ * which may not always be the same as the EXT4_*_QUOTA_INO fields.
+ */
+static int quota_inum_is_super(struct ext2_super_block *sb, ext2_ino_t ino)
+{
+ enum quota_type qtype;
+
+ for (qtype = 0; qtype < MAXQUOTAS; qtype++)
+ if (*quota_sb_inump(sb, qtype) == ino)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Check if the passed ino is one of the reserved quota inodes.
+ * This checks if the inode number is one of the reserved EXT4_*_QUOTA_INO
+ * inodes. These inodes may or may not be in use by the quota feature.
+ */
+static int quota_inum_is_reserved(ext2_filsys fs, ext2_ino_t ino)
+{
+ enum quota_type qtype;
+
+ for (qtype = 0; qtype < MAXQUOTAS; qtype++)
+ if (quota_type2inum(qtype, fs->super) == ino)
+ return 1;
+
+ return 0;
+}
+
void e2fsck_pass1(e2fsck_t ctx)
{
int i;
unsigned int save_type;
int imagic_fs, extent_fs, inlinedata_fs;
int low_dtime_check = 1;
- int inode_size;
+ int inode_size = EXT2_INODE_SIZE(fs->super);
int failed_csum = 0;
+ ext2_ino_t ino_threshold = 0;
+ dgrp_t ra_group = 0;
init_resource_track(&rtrack, ctx->fs->io);
clear_problem_context(&pctx);
+ /* If we can do readahead, figure out how many groups to pull in. */
+ if (!e2fsck_can_readahead(ctx->fs))
+ ctx->readahead_kb = 0;
+ else if (ctx->readahead_kb == ~0ULL)
+ ctx->readahead_kb = e2fsck_guess_readahead(ctx->fs);
+ pass1_readahead(ctx, &ra_group, &ino_threshold);
+
if (!(ctx->options & E2F_OPT_PREEN))
fix_problem(ctx, PR_1_PASS_HEADER, &pctx);
- if ((fs->super->s_feature_compat & EXT2_FEATURE_COMPAT_DIR_INDEX) &&
+ if (ext2fs_has_feature_dir_index(fs->super) &&
!(ctx->options & E2F_OPT_NO)) {
if (ext2fs_u32_list_create(&ctx->dirs_to_hash, 50))
ctx->dirs_to_hash = 0;
}
#undef EXT2_BPP
- imagic_fs = (sb->s_feature_compat & EXT2_FEATURE_COMPAT_IMAGIC_INODES);
- extent_fs = (sb->s_feature_incompat & EXT3_FEATURE_INCOMPAT_EXTENTS);
- inlinedata_fs = (sb->s_feature_incompat &
- EXT4_FEATURE_INCOMPAT_INLINE_DATA);
+ imagic_fs = ext2fs_has_feature_imagic_inodes(sb);
+ extent_fs = ext2fs_has_feature_extents(sb);
+ inlinedata_fs = ext2fs_has_feature_inline_data(sb);
/*
* Allocate bitmaps structures
ctx->flags |= E2F_FLAG_ABORT;
return;
}
- inode_size = EXT2_INODE_SIZE(fs->super);
inode = (struct ext2_inode *)
e2fsck_allocate_memory(ctx, inode_size, "scratch inode");
fs->super->s_mkfs_time < fs->super->s_inodes_count))
low_dtime_check = 0;
- if ((fs->super->s_feature_incompat & EXT4_FEATURE_INCOMPAT_MMP) &&
+ if (ext2fs_has_feature_mmp(fs->super) &&
fs->super->s_mmp_block > fs->super->s_first_data_block &&
fs->super->s_mmp_block < ext2fs_blocks_count(fs->super))
ext2fs_mark_block_bitmap2(ctx->block_found_map,
old_op = ehandler_operation(_("getting next inode from scan"));
pctx.errcode = ext2fs_get_next_inode_full(scan, &ino,
inode, inode_size);
+ if (ino > ino_threshold)
+ pass1_readahead(ctx, &ra_group, &ino_threshold);
ehandler_operation(old_op);
if (ctx->flags & E2F_FLAG_SIGNAL_MASK)
- return;
+ goto endit;
if (pctx.errcode == EXT2_ET_BAD_BLOCK_IN_INODE_TABLE) {
/*
* If badblocks says badblocks is bad, offer to clear
pctx.errcode = ext2fs_inline_data_size(fs, ino, &size);
if (!pctx.errcode && size &&
- !fix_problem(ctx, PR_1_INLINE_DATA_FEATURE, &pctx)) {
- sb->s_feature_incompat |=
- EXT4_FEATURE_INCOMPAT_INLINE_DATA;
+ fix_problem(ctx, PR_1_INLINE_DATA_FEATURE, &pctx)) {
+ ext2fs_set_feature_inline_data(sb);
ext2fs_mark_super_dirty(fs);
inlinedata_fs = 1;
- } else if (!fix_problem(ctx, PR_1_INLINE_DATA_SET, &pctx)) {
+ } else if (fix_problem(ctx, PR_1_INLINE_DATA_SET, &pctx)) {
e2fsck_clear_inode(ctx, ino, inode, 0, "pass1");
/* skip FINISH_INODE_LOOP */
continue;
ctx->flags |= E2F_FLAG_ABORT;
goto endit;
}
+ if (LINUX_S_ISLNK(inode->i_mode))
+ inode->i_flags &= ~EXT4_INLINE_DATA_FL;
e2fsck_write_inode(ctx, ino, inode,
"pass1");
failed_csum = 0;
if ((ext2fs_extent_header_verify(inode->i_block,
sizeof(inode->i_block)) == 0) &&
fix_problem(ctx, PR_1_EXTENT_FEATURE, &pctx)) {
- sb->s_feature_incompat |= EXT3_FEATURE_INCOMPAT_EXTENTS;
+ ext2fs_set_feature_extents(sb);
ext2fs_mark_super_dirty(fs);
extent_fs = 1;
} else if (fix_problem(ctx, PR_1_EXTENTS_SET, &pctx)) {
inode_size, "pass1");
failed_csum = 0;
}
- } else if ((ino == EXT4_USR_QUOTA_INO) ||
- (ino == EXT4_GRP_QUOTA_INO)) {
+ } else if (quota_inum_is_reserved(fs, ino)) {
ext2fs_mark_inode_bitmap2(ctx->inode_used_map, ino);
- if ((fs->super->s_feature_ro_compat &
- EXT4_FEATURE_RO_COMPAT_QUOTA) &&
- ((fs->super->s_usr_quota_inum == ino) ||
- (fs->super->s_grp_quota_inum == ino))) {
+ if (ext2fs_has_feature_quota(fs->super) &&
+ quota_inum_is_super(fs->super, ino)) {
if (!LINUX_S_ISREG(inode->i_mode) &&
fix_problem(ctx, PR_1_QUOTA_BAD_MODE,
&pctx)) {
(LINUX_S_ISDIR(inode->i_mode) && inode->i_dir_acl))
mark_inode_bad(ctx, ino);
if ((fs->super->s_creator_os == EXT2_OS_LINUX) &&
- !(fs->super->s_feature_incompat &
- EXT4_FEATURE_INCOMPAT_64BIT) &&
+ !ext2fs_has_feature_64bit(fs->super) &&
inode->osd2.linux2.l_i_file_acl_high != 0)
mark_inode_bad(ctx, ino);
if ((fs->super->s_creator_os == EXT2_OS_LINUX) &&
- !(fs->super->s_feature_ro_compat &
- EXT4_FEATURE_RO_COMPAT_HUGE_FILE) &&
+ !ext2fs_has_feature_huge_file(fs->super) &&
(inode->osd2.linux2.l_i_blocks_hi != 0))
mark_inode_bad(ctx, ino);
if (inode->i_flags & EXT2_IMAGIC_FL) {
inode->i_block[EXT2_TIND_BLOCK] ||
ext2fs_file_acl_block(fs, inode))) {
inodes_to_process[process_inode_count].ino = ino;
- inodes_to_process[process_inode_count].inode = *inode;
+ inodes_to_process[process_inode_count].inode =
+ *(struct ext2_inode_large *)inode;
process_inode_count++;
} else
check_blocks(ctx, &pctx, block_buf);
}
e2fsck_pass1_dupblocks(ctx, block_buf);
}
+ ctx->flags |= E2F_FLAG_ALLOC_OK;
ext2fs_free_mem(&inodes_to_process);
endit:
e2fsck_use_inode_shortcuts(ctx, 0);
if ((ctx->flags & E2F_FLAG_SIGNAL_MASK) == 0)
print_resource_track(ctx, _("Pass 1"), &rtrack, ctx->fs->io);
+ else
+ ctx->invalid_bitmaps++;
}
#undef FINISH_INODE_LOOP
* Or if the extended attribute block is an invalid block,
* then the inode is also corrupted.
*/
- if (!(fs->super->s_feature_compat & EXT2_FEATURE_COMPAT_EXT_ATTR) ||
+ if (!ext2fs_has_feature_xattr(fs->super) ||
(blk < fs->super->s_first_data_block) ||
(blk >= ext2fs_blocks_count(fs->super))) {
mark_inode_bad(ctx, ino);
if ((!LINUX_S_ISDIR(inode->i_mode) &&
fix_problem(ctx, PR_1_HTREE_NODIR, pctx)) ||
- (!(fs->super->s_feature_compat & EXT2_FEATURE_COMPAT_DIR_INDEX) &&
+ (!ext2fs_has_feature_dir_index(fs->super) &&
fix_problem(ctx, PR_1_HTREE_SET, pctx)))
return 1;
pctx->errcode = ext2fs_extent_get_info(ehandle, &info);
if (pctx->errcode)
return;
+ if (!(ctx->options & E2F_OPT_FIXES_ONLY) &&
+ !pb->eti.force_rebuild) {
+ struct extent_tree_level *etl;
+
+ etl = pb->eti.ext_info + info.curr_level;
+ etl->num_extents += info.num_entries;
+ etl->max_extents += info.max_entries;
+ /*
+ * Implementation wart: Splitting extent blocks when appending
+ * will leave the old block with one free entry. Therefore
+ * unless the node is totally full, pretend that a non-root
+ * extent block can hold one fewer entry than it actually does,
+ * so that we don't repeatedly rebuild the extent tree.
+ */
+ if (info.curr_level && info.num_entries < info.max_entries)
+ etl->max_extents--;
+ }
pctx->errcode = ext2fs_extent_get(ehandle, EXT2_EXTENT_FIRST_SIB,
&extent);
ext2_ino_t ino = pctx->ino;
errcode_t retval;
blk64_t eof_lblk;
+ struct ext3_extent_header *eh;
+
+ /* Check for a proper extent header... */
+ eh = (struct ext3_extent_header *) &inode->i_block[0];
+ retval = ext2fs_extent_header_verify(eh, sizeof(inode->i_block));
+ if (retval) {
+ if (fix_problem(ctx, PR_1_MISSING_EXTENT_HEADER, pctx))
+ e2fsck_clear_inode(ctx, ino, inode, 0,
+ "check_blocks_extents");
+ pctx->errcode = 0;
+ return;
+ }
+ /* ...since this function doesn't fail if i_block is zeroed. */
pctx->errcode = ext2fs_extent_open2(fs, ino, inode, &ehandle);
if (pctx->errcode) {
if (fix_problem(ctx, PR_1_READ_EXTENT, pctx))
retval = ext2fs_extent_get_info(ehandle, &info);
if (retval == 0) {
- if (info.max_depth >= MAX_EXTENT_DEPTH_COUNT)
- info.max_depth = MAX_EXTENT_DEPTH_COUNT-1;
- ctx->extent_depth_count[info.max_depth]++;
+ int max_depth = info.max_depth;
+
+ if (max_depth >= MAX_EXTENT_DEPTH_COUNT)
+ max_depth = MAX_EXTENT_DEPTH_COUNT-1;
+ ctx->extent_depth_count[max_depth]++;
}
+ /* Check maximum extent depth */
+ pctx->blk = info.max_depth;
+ pctx->blk2 = ext2fs_max_extent_depth(ehandle);
+ if (pctx->blk2 < pctx->blk &&
+ fix_problem(ctx, PR_1_EXTENT_BAD_MAX_DEPTH, pctx))
+ pb->eti.force_rebuild = 1;
+
+ /* Can we collect extent tree level stats? */
+ pctx->blk = MAX_EXTENT_DEPTH_COUNT;
+ if (pctx->blk2 > pctx->blk)
+ fix_problem(ctx, PR_1E_MAX_EXTENT_TREE_DEPTH, pctx);
+ memset(pb->eti.ext_info, 0, sizeof(pb->eti.ext_info));
+ pb->eti.ino = pb->ino;
+
pb->region = region_create(0, info.max_lblk);
if (!pb->region) {
ext2fs_extent_free(ehandle);
region_free(pb->region);
pb->region = NULL;
ext2fs_extent_free(ehandle);
+
+ /* Rebuild unless it's a dir and we're rehashing it */
+ if (LINUX_S_ISDIR(inode->i_mode) &&
+ e2fsck_dir_will_be_rehashed(ctx, ino))
+ return;
+
+ if (ctx->options & E2F_OPT_CONVERT_BMAP)
+ e2fsck_rebuild_extents_later(ctx, ino);
+ else
+ e2fsck_should_rebuild_extents(ctx, pctx, &pb->eti, &info);
}
/*
pb.pctx = pctx;
pb.ctx = ctx;
pb.inode_modified = 0;
+ pb.eti.force_rebuild = 0;
pctx->ino = ino;
pctx->errcode = 0;
- extent_fs = (ctx->fs->super->s_feature_incompat &
- EXT3_FEATURE_INCOMPAT_EXTENTS);
- inlinedata_fs = (ctx->fs->super->s_feature_incompat &
- EXT4_FEATURE_INCOMPAT_INLINE_DATA);
-
- if (inode->i_flags & EXT2_COMPRBLK_FL) {
- if (fs->super->s_feature_incompat &
- EXT2_FEATURE_INCOMPAT_COMPRESSION)
- pb.compressed = 1;
- else {
- if (fix_problem(ctx, PR_1_COMPR_SET, pctx)) {
- inode->i_flags &= ~EXT2_COMPRBLK_FL;
- dirty_inode++;
- }
- }
- }
+ extent_fs = ext2fs_has_feature_extents(ctx->fs->super);
+ inlinedata_fs = ext2fs_has_feature_inline_data(ctx->fs->super);
if (check_ext_attr(ctx, pctx, block_buf)) {
if (ctx->flags & E2F_FLAG_SIGNAL_MASK)
"check_blocks");
fs->flags = (flags & EXT2_FLAG_IGNORE_CSUM_ERRORS) |
(fs->flags & ~EXT2_FLAG_IGNORE_CSUM_ERRORS);
+
+ if (ctx->options & E2F_OPT_CONVERT_BMAP) {
+#ifdef DEBUG
+ printf("bmap rebuild ino=%d\n", ino);
+#endif
+ if (!LINUX_S_ISDIR(inode->i_mode) ||
+ !e2fsck_dir_will_be_rehashed(ctx, ino))
+ e2fsck_rebuild_extents_later(ctx, ino);
+ }
}
}
end_problem_latch(ctx, PR_LATCH_BLOCK);
inode->i_flags &= ~EXT2_INDEX_FL;
dirty_inode++;
} else {
-#ifdef ENABLE_HTREE
e2fsck_add_dx_dir(ctx, ino, pb.last_block+1);
-#endif
}
}
}
}
- if (ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INODE(ctx->fs->super)) {
+ if (ino != quota_type2inum(PRJQUOTA, fs->super) &&
+ (ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INODE(ctx->fs->super))) {
quota_data_add(ctx->qctx, inode, ino,
pb.num_blocks * fs->blocksize);
quota_data_inodes(ctx->qctx, inode, ino, +1);
}
- if (!(fs->super->s_feature_ro_compat &
- EXT4_FEATURE_RO_COMPAT_HUGE_FILE) ||
+ if (!ext2fs_has_feature_huge_file(fs->super) ||
!(inode->i_flags & EXT4_HUGE_FILE_FL))
pb.num_blocks *= (fs->blocksize / 512);
pb.num_blocks *= EXT2FS_CLUSTER_RATIO(fs);
ctx->large_files++;
if ((fs->super->s_creator_os == EXT2_OS_LINUX) &&
((pb.num_blocks != ext2fs_inode_i_blocks(fs, inode)) ||
- ((fs->super->s_feature_ro_compat &
- EXT4_FEATURE_RO_COMPAT_HUGE_FILE) &&
+ (ext2fs_has_feature_huge_file(fs->super) &&
(inode->i_flags & EXT4_HUGE_FILE_FL) &&
(inode->osd2.linux2.l_i_blocks_hi != 0)))) {
pctx->num = pb.num_blocks;
pctx->num = 0;
}
+ /*
+ * The kernel gets mad if we ask it to allocate bigalloc clusters to
+ * a block mapped file, so rebuild it as an extent file. We can skip
+ * symlinks because they're never rewritten.
+ */
+ if (ext2fs_has_feature_bigalloc(fs->super) &&
+ (LINUX_S_ISREG(inode->i_mode) || LINUX_S_ISDIR(inode->i_mode)) &&
+ ext2fs_inode_data_blocks2(fs, inode) > 0 &&
+ (ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INO(fs->super)) &&
+ !(inode->i_flags & (EXT4_EXTENTS_FL | EXT4_INLINE_DATA_FL)) &&
+ fix_problem(ctx, PR_1_NO_BIGALLOC_BLOCKMAP_FILES, pctx)) {
+ pctx->errcode = e2fsck_rebuild_extents_later(ctx, ino);
+ if (pctx->errcode)
+ goto out;
+ }
+
if (ctx->dirs_to_hash && pb.is_dir &&
!(ctx->lost_and_found && ctx->lost_and_found == ino) &&
!(inode->i_flags & EXT2_INDEX_FL) &&
pctx = p->pctx;
ctx = p->ctx;
- if (p->compressed && (blk == EXT2FS_COMPRESSED_BLKADDR)) {
- /* todo: Check that the comprblk_fl is high, that the
- blkaddr pattern looks right (all non-holes up to
- first EXT2FS_COMPRESSED_BLKADDR, then all
- EXT2FS_COMPRESSED_BLKADDR up to end of cluster),
- that the feature_incompat bit is high, and that the
- inode is a regular file. If we're doing a "full
- check" (a concept introduced to e2fsck by e2compr,
- meaning that we look at data blocks as well as
- metadata) then call some library routine that
- checks the compressed data. I'll have to think
- about this, because one particularly important
- problem to be able to fix is to recalculate the
- cluster size if necessary. I think that perhaps
- we'd better do most/all e2compr-specific checks
- separately, after the non-e2compr checks. If not
- doing a full check, it may be useful to test that
- the personality is linux; e.g. if it isn't then
- perhaps this really is just an illegal block. */
- return 0;
- }
-
/*
* For a directory, add logical block zero for processing even if it's
* not mapped or we'll be perennially stuck with broken "." and ".."
* file be contiguous. (Which can never be true for really
* big files that are greater than a block group.)
*/
- if (!HOLE_BLKADDR(p->previous_block) && p->ino != EXT2_RESIZE_INO) {
+ if (p->previous_block && p->ino != EXT2_RESIZE_INO) {
if (p->previous_block+1 != blk) {
if (ctx->options & E2F_OPT_FRAGCHECK) {
char type = '?';
struct problem_context *pctx;
e2fsck_t ctx;
- /*
- * Note: This function processes blocks for the bad blocks
- * inode, which is never compressed. So we don't use HOLE_BLKADDR().
- */
-
if (!blk)
return 0;
* within the flex_bg, and if that fails then try finding the
* space anywhere in the filesystem.
*/
- is_flexbg = EXT2_HAS_INCOMPAT_FEATURE(fs->super,
- EXT4_FEATURE_INCOMPAT_FLEX_BG);
+ is_flexbg = ext2fs_has_feature_flex_bg(fs->super);
if (is_flexbg) {
flexbg_size = 1 << fs->super->s_log_groups_per_flex;
flexbg = group / flexbg_size;
return retval;
}
- retval = ext2fs_new_block2(fs, goal, 0, &new_block);
+ retval = ext2fs_new_block2(fs, goal, fs->block_map, &new_block);
if (retval)
return retval;
}
return (0);
}
+static errcode_t e2fsck_new_range(ext2_filsys fs, int flags, blk64_t goal,
+ blk64_t len, blk64_t *pblk, blk64_t *plen)
+{
+ e2fsck_t ctx = (e2fsck_t) fs->priv_data;
+ errcode_t retval;
+
+ if (ctx->block_found_map)
+ return ext2fs_new_range(fs, flags, goal, len,
+ ctx->block_found_map, pblk, plen);
+
+ if (!fs->block_map) {
+ retval = ext2fs_read_block_bitmap(fs);
+ if (retval)
+ return retval;
+ }
+
+ return ext2fs_new_range(fs, flags, goal, len, fs->block_map,
+ pblk, plen);
+}
+
static void e2fsck_block_alloc_stats(ext2_filsys fs, blk64_t blk, int inuse)
{
e2fsck_t ctx = (e2fsck_t) fs->priv_data;
}
}
+static void e2fsck_block_alloc_stats_range(ext2_filsys fs, blk64_t blk,
+ blk_t num, int inuse)
+{
+ e2fsck_t ctx = (e2fsck_t) fs->priv_data;
+
+ /* Never free a critical metadata block */
+ if (ctx->block_found_map &&
+ ctx->block_metadata_map &&
+ inuse < 0 &&
+ ext2fs_test_block_bitmap_range2(ctx->block_metadata_map, blk, num))
+ return;
+
+ if (ctx->block_found_map) {
+ if (inuse > 0)
+ ext2fs_mark_block_bitmap_range2(ctx->block_found_map,
+ blk, num);
+ else
+ ext2fs_unmark_block_bitmap_range2(ctx->block_found_map,
+ blk, num);
+ }
+}
+
void e2fsck_use_inode_shortcuts(e2fsck_t ctx, int use_shortcuts)
{
ext2_filsys fs = ctx->fs;
ext2fs_set_alloc_block_callback(ctx->fs, e2fsck_get_alloc_block, 0);
ext2fs_set_block_alloc_stats_callback(ctx->fs,
e2fsck_block_alloc_stats, 0);
+ ext2fs_set_new_range_callback(ctx->fs, e2fsck_new_range, NULL);
+ ext2fs_set_block_alloc_stats_range_callback(ctx->fs,
+ e2fsck_block_alloc_stats_range, NULL);
}