static void mark_table_blocks(e2fsck_t ctx);
static void alloc_bb_map(e2fsck_t ctx);
static void alloc_imagic_map(e2fsck_t ctx);
-static void mark_inode_bad(e2fsck_t ctx, ino_t ino);
-static void add_casefolded_dir(e2fsck_t ctx, ino_t ino);
+static void mark_inode_bad(e2fsck_t ctx, ext2_ino_t ino);
+static void add_casefolded_dir(e2fsck_t ctx, ext2_ino_t ino);
static void handle_fs_bad_blocks(e2fsck_t ctx);
static void process_inodes(e2fsck_t ctx, char *block_buf);
static EXT2_QSORT_TYPE process_inode_cmp(const void *a, const void *b);
blk64_t *quota_blocks)
{
struct ext2_inode inode;
- __u32 hash;
+ __u32 hash, signed_hash;
errcode_t retval;
/* Check if inode is within valid range */
e2fsck_read_inode(ctx, entry->e_value_inum, &inode, "pass1");
- retval = ext2fs_ext_attr_hash_entry2(ctx->fs, entry, NULL, &hash);
+ retval = ext2fs_ext_attr_hash_entry3(ctx->fs, entry, NULL, &hash,
+ &signed_hash);
if (retval) {
com_err("check_large_ea_inode", retval,
_("while hashing entry with e_value_inum = %u"),
fatal_error(ctx, 0);
}
- if (hash == entry->e_hash) {
+ if ((hash == entry->e_hash) || (signed_hash == entry->e_hash)) {
*quota_blocks = size_to_quota_blocks(ctx->fs,
entry->e_value_size);
} else {
static void inc_ea_inode_refs(e2fsck_t ctx, struct problem_context *pctx,
struct ext2_ext_attr_entry *first, void *end)
{
- struct ext2_ext_attr_entry *entry;
+ struct ext2_ext_attr_entry *entry = first;
+ struct ext2_ext_attr_entry *np = EXT2_EXT_ATTR_NEXT(entry);
- for (entry = first;
- (void *)entry < end && !EXT2_EXT_IS_LAST_ENTRY(entry);
- entry = EXT2_EXT_ATTR_NEXT(entry)) {
+ while ((void *) entry < end && (void *) np < end &&
+ !EXT2_EXT_IS_LAST_ENTRY(entry)) {
if (!entry->e_value_inum)
- continue;
+ goto next;
if (!ctx->ea_inode_refs) {
pctx->errcode = ea_refcount_create(0,
&ctx->ea_inode_refs);
}
ea_refcount_increment(ctx->ea_inode_refs, entry->e_value_inum,
0);
+ next:
+ entry = np;
+ np = EXT2_EXT_ATTR_NEXT(entry);
}
}
}
hash = ext2fs_ext_attr_hash_entry(entry,
- start + entry->e_value_offs);
+ start + entry->e_value_offs);
+ if (entry->e_hash != 0 && entry->e_hash != hash)
+ hash = ext2fs_ext_attr_hash_entry_signed(entry,
+ start + entry->e_value_offs);
/* e_hash may be 0 in older inode's ea */
if (entry->e_hash != 0 && entry->e_hash != hash) {
}
static errcode_t get_inline_data_ea_size(ext2_filsys fs, ext2_ino_t ino,
+ struct ext2_inode *inode,
size_t *sz)
{
void *p;
if (retval)
return retval;
- retval = ext2fs_xattrs_read(handle);
+ retval = ext2fs_xattrs_read_inode(handle,
+ (struct ext2_inode_large *)inode);
if (retval)
goto err;
ext2_ino_t ino_threshold = 0;
dgrp_t ra_group = 0;
struct ea_quota ea_ibody_quota;
+ time_t tm;
init_resource_track(&rtrack, ctx->fs->io);
clear_problem_context(&pctx);
goto endit;
}
block_buf = (char *) e2fsck_allocate_memory(ctx, fs->blocksize * 3,
- "block interate buffer");
+ "block iterate buffer");
if (EXT2_INODE_SIZE(fs->super) == EXT2_GOOD_OLD_INODE_SIZE)
e2fsck_use_inode_shortcuts(ctx, 1);
e2fsck_intercept_block_allocations(ctx);
if (ctx->progress && ((ctx->progress)(ctx, 1, 0,
ctx->fs->group_desc_count)))
goto endit;
- if ((fs->super->s_wtime &&
- fs->super->s_wtime < fs->super->s_inodes_count) ||
- (fs->super->s_mtime &&
- fs->super->s_mtime < fs->super->s_inodes_count) ||
- (fs->super->s_mkfs_time &&
- fs->super->s_mkfs_time < fs->super->s_inodes_count))
+
+ if (((tm = ext2fs_get_tstamp(fs->super, s_wtime)) &&
+ tm < fs->super->s_inodes_count) ||
+ ((tm = ext2fs_get_tstamp(fs->super, s_mtime)) &&
+ tm < fs->super->s_inodes_count) ||
+ ((tm = ext2fs_get_tstamp(fs->super, s_mkfs_time)) &&
+ tm < fs->super->s_inodes_count))
low_dtime_check = 0;
if (ext2fs_has_feature_mmp(fs->super) &&
(ino >= EXT2_FIRST_INODE(fs->super))) {
size_t size = 0;
- pctx.errcode = get_inline_data_ea_size(fs, ino, &size);
+ pctx.errcode = get_inline_data_ea_size(fs, ino, inode,
+ &size);
if (!pctx.errcode &&
fix_problem(ctx, PR_1_INLINE_DATA_FEATURE, &pctx)) {
ext2fs_set_feature_inline_data(sb);
flags = fs->flags;
if (failed_csum)
fs->flags |= EXT2_FLAG_IGNORE_CSUM_ERRORS;
- err = get_inline_data_ea_size(fs, ino, &size);
+ err = get_inline_data_ea_size(fs, ino, inode, &size);
fs->flags = (flags & EXT2_FLAG_IGNORE_CSUM_ERRORS) |
(fs->flags & ~EXT2_FLAG_IGNORE_CSUM_ERRORS);
inode_size, "pass1");
failed_csum = 0;
}
+ } else if (ino == fs->super->s_orphan_file_inum) {
+ ext2fs_mark_inode_bitmap2(ctx->inode_used_map, ino);
+ if (ext2fs_has_feature_orphan_file(fs->super)) {
+ if (!LINUX_S_ISREG(inode->i_mode) &&
+ fix_problem(ctx, PR_1_ORPHAN_FILE_BAD_MODE,
+ &pctx)) {
+ inode->i_mode = LINUX_S_IFREG;
+ e2fsck_write_inode(ctx, ino, inode,
+ "pass1");
+ failed_csum = 0;
+ }
+ check_blocks(ctx, &pctx, block_buf, NULL);
+ FINISH_INODE_LOOP(ctx, ino, &pctx, failed_csum);
+ continue;
+ }
+ if ((inode->i_links_count ||
+ inode->i_blocks || inode->i_block[0]) &&
+ fix_problem(ctx, PR_1_ORPHAN_FILE_NOT_CLEAR,
+ &pctx)) {
+ memset(inode, 0, inode_size);
+ ext2fs_icount_store(ctx->inode_link_info, ino,
+ 0);
+ e2fsck_write_inode_full(ctx, ino, inode,
+ inode_size, "pass1");
+ failed_csum = 0;
+ }
} else if (ino < EXT2_FIRST_INODE(fs->super)) {
problem_t problem = 0;
if (!pctx.errcode) {
e2fsck_read_inode(ctx, EXT2_RESIZE_INO, inode,
"recreate inode");
- inode->i_mtime = ctx->now;
+ ext2fs_inode_xtime_set(inode, i_mtime, ctx->now);
e2fsck_write_inode(ctx, EXT2_RESIZE_INO, inode,
"recreate inode");
}
goto endit;
}
+ if (ctx->large_dirs && !ext2fs_has_feature_largedir(fs->super)) {
+ if (fix_problem(ctx, PR_2_FEATURE_LARGE_DIRS, &pctx)) {
+ ext2fs_set_feature_largedir(fs->super);
+ fs->flags &= ~EXT2_FLAG_MASTER_SB_ONLY;
+ ext2fs_mark_super_dirty(fs);
+ }
+ if (fs->super->s_rev_level == EXT2_GOOD_OLD_REV &&
+ fix_problem(ctx, PR_1_FS_REV_LEVEL, &pctx)) {
+ ext2fs_update_dynamic_rev(fs);
+ ext2fs_mark_super_dirty(fs);
+ }
+ }
+
if (ctx->block_dup_map) {
if (ctx->options & E2F_OPT_PREEN) {
clear_problem_context(&pctx);
/*
* Mark an inode as being bad in some what
*/
-static void mark_inode_bad(e2fsck_t ctx, ino_t ino)
+static void mark_inode_bad(e2fsck_t ctx, ext2_ino_t ino)
{
struct problem_context pctx;
ext2fs_mark_inode_bitmap2(ctx->inode_bad_map, ino);
}
-static void add_casefolded_dir(e2fsck_t ctx, ino_t ino)
+static void add_casefolded_dir(e2fsck_t ctx, ext2_ino_t ino)
{
struct problem_context pctx;
break;
}
if (entry->e_value_inum == 0) {
- if (entry->e_value_offs + entry->e_value_size >
- fs->blocksize) {
+ if (entry->e_value_size > EXT2_XATTR_SIZE_MAX ||
+ (entry->e_value_offs + entry->e_value_size >
+ fs->blocksize)) {
if (fix_problem(ctx, PR_1_EA_BAD_VALUE, pctx))
goto clear_extattr;
break;
hash = ext2fs_ext_attr_hash_entry(entry, block_buf +
entry->e_value_offs);
+ if (entry->e_hash != hash)
+ hash = ext2fs_ext_attr_hash_entry_signed(entry,
+ block_buf + entry->e_value_offs);
if (entry->e_hash != hash) {
pctx->num = entry->e_hash;
return 1;
pctx->num = root->indirect_levels;
- if ((root->indirect_levels >= ext2_dir_htree_level(fs)) &&
+ /* if htree level is clearly too high, consider it to be broken */
+ if (root->indirect_levels > EXT4_HTREE_LEVEL &&
fix_problem(ctx, PR_1_HTREE_DEPTH, pctx))
return 1;
+ /* if level is only maybe too high, LARGE_DIR feature could be unset */
+ if (root->indirect_levels > ext2_dir_htree_level(fs) &&
+ !ext2fs_has_feature_largedir(fs->super)) {
+ int blockbits = EXT2_BLOCK_SIZE_BITS(fs->super) + 10;
+ unsigned idx_pb = 1 << (blockbits - 3);
+
+ /* compare inode size/blocks vs. max-sized 2-level htree */
+ if (EXT2_I_SIZE(pctx->inode) <
+ (idx_pb - 1) * (idx_pb - 2) << blockbits &&
+ pctx->inode->i_blocks <
+ (idx_pb - 1) * (idx_pb - 2) << (blockbits - 9) &&
+ fix_problem(ctx, PR_1_HTREE_DEPTH, pctx))
+ return 1;
+ }
+
+ if (root->indirect_levels > EXT4_HTREE_LEVEL_COMPAT ||
+ ext2fs_needs_large_file_feature(EXT2_I_SIZE(inode)))
+ ctx->large_dirs++;
+
return 0;
}
if (pctx->errcode)
return;
if (!(ctx->options & E2F_OPT_FIXES_ONLY) &&
- !pb->eti.force_rebuild) {
+ !pb->eti.force_rebuild &&
+ info.curr_level < MAX_EXTENT_DEPTH_COUNT) {
struct extent_tree_level *etl;
etl = pb->eti.ext_info + info.curr_level;
(extent.e_pblk + extent.e_len) >
ext2fs_blocks_count(ctx->fs->super))
problem = PR_1_EXTENT_ENDS_BEYOND;
- else if (is_leaf && is_dir &&
+ else if (is_leaf && is_dir && !pctx->inode->i_size_high &&
+ !ext2fs_has_feature_largedir(ctx->fs->super) &&
((extent.e_lblk + extent.e_len) >
(1U << (21 - ctx->fs->super->s_log_block_size))))
problem = PR_1_TOOBIG_DIR;
if (is_leaf && problem == 0 && extent.e_len > 0) {
#if 0
printf("extent_region(ino=%u, expect=%llu, "
- "lblk=%llu, len=%u)\n",
- pb->ino, pb->next_lblock,
- extent.e_lblk, extent.e_len);
+ "lblk=%llu, len=%u)\n", pb->ino,
+ (unsigned long long) pb->next_lblock,
+ (unsigned long long) extent.e_lblk,
+ extent.e_len);
#endif
if (extent.e_lblk < pb->next_lblock)
problem = PR_1_EXTENT_COLLISION;
if (extent.e_lblk != lblk) {
struct ext2_extent_info e_info;
- ext2fs_extent_get_info(ehandle, &e_info);
+ pctx->errcode = ext2fs_extent_get_info(ehandle,
+ &e_info);
+ if (pctx->errcode) {
+ pctx->str = "ext2fs_extent_get_info";
+ return;
+ }
pctx->blk = lblk;
pctx->blk2 = extent.e_lblk;
pctx->num = e_info.curr_level - 1;
}
if (ino != quota_type2inum(PRJQUOTA, fs->super) &&
+ ino != fs->super->s_orphan_file_inum &&
(ino == EXT2_ROOT_INO || ino >= EXT2_FIRST_INODE(ctx->fs->super)) &&
!(inode->i_flags & EXT4_EA_INODE_FL)) {
quota_data_add(ctx->qctx, (struct ext2_inode_large *) inode,
pb.num_blocks *= EXT2FS_CLUSTER_RATIO(fs);
#if 0
printf("inode %u, i_size = %u, last_block = %llu, i_blocks=%llu, num_blocks = %llu\n",
- ino, inode->i_size, pb.last_block, ext2fs_inode_i_blocks(fs, inode),
- pb.num_blocks);
+ ino, inode->i_size, (unsigned long long) pb.last_block,
+ (unsigned long long) ext2fs_inode_i_blocks(fs, inode),
+ (unsigned long long) pb.num_blocks);
#endif
+ size = EXT2_I_SIZE(inode);
if (pb.is_dir) {
- unsigned nblock = inode->i_size >> EXT2_BLOCK_SIZE_BITS(fs->super);
+ unsigned nblock = size >> EXT2_BLOCK_SIZE_BITS(fs->super);
if (inode->i_flags & EXT4_INLINE_DATA_FL) {
int flags;
size_t sz = 0;
EXT2_FLAG_IGNORE_CSUM_ERRORS) |
(ctx->fs->flags &
~EXT2_FLAG_IGNORE_CSUM_ERRORS);
- if (err || sz != inode->i_size) {
+ if (err || sz != size) {
bad_size = 7;
pctx->num = sz;
}
- } else if (inode->i_size & (fs->blocksize - 1))
+ } else if (size & (fs->blocksize - 1))
bad_size = 5;
else if (nblock > (pb.last_block + 1))
bad_size = 1;
bad_size = 2;
}
} else {
- size = EXT2_I_SIZE(inode);
if ((pb.last_init_lblock >= 0) &&
/* Do not allow initialized allocated blocks past i_size*/
(size < (__u64)pb.last_init_lblock * fs->blocksize) &&
pctx->num = (pb.last_block + 1) * fs->blocksize;
pctx->group = bad_size;
if (fix_problem(ctx, PR_1_BAD_I_SIZE, pctx)) {
- if (LINUX_S_ISDIR(inode->i_mode))
- pctx->num &= 0xFFFFFFFFULL;
ext2fs_inode_size_set(fs, inode, pctx->num);
if (EXT2_I_SIZE(inode) == 0 &&
(inode->i_flags & EXT4_INLINE_DATA_FL)) {
(unsigned long) pctx->ino, type,
(unsigned long) p->previous_block+1,
(unsigned long) blk,
- blockcnt);
+ (long long) blockcnt);
}
p->fragmented = 1;
}
}
if (p->is_dir && !ext2fs_has_feature_largedir(fs->super) &&
+ !pctx->inode->i_size_high &&
blockcnt > (1 << (21 - fs->super->s_log_block_size)))
problem = PR_1_TOOBIG_DIR;
if (p->is_dir && p->num_blocks + 1 >= p->max_blocks)
*/
is_flexbg = ext2fs_has_feature_flex_bg(fs->super);
if (is_flexbg) {
- flexbg_size = 1 << fs->super->s_log_groups_per_flex;
+ flexbg_size = 1U << fs->super->s_log_groups_per_flex;
flexbg = group / flexbg_size;
first_block = ext2fs_group_first_block2(fs,
flexbg_size * flexbg);