*
* Pass 1 is designed to stash away enough information so that the
* other passes should not need to read in the inode information
- * during the normal course of a filesystem check. (Althogh if an
+ * during the normal course of a filesystem check. (Although if an
* inconsistency is detected, other passes may need to read in an
* inode to fix it.)
*
#include "e2fsck.h"
#include <ext2fs/ext2_ext_attr.h>
+#include <e2p/e2p.h>
#include "problem.h"
static void alloc_imagic_map(e2fsck_t ctx);
static void mark_inode_bad(e2fsck_t ctx, ino_t ino);
static void add_encrypted_dir(e2fsck_t ctx, ino_t ino);
+static void add_casefolded_dir(e2fsck_t ctx, ino_t ino);
static void handle_fs_bad_blocks(e2fsck_t ctx);
static void process_inodes(e2fsck_t ctx, char *block_buf);
static EXT2_QSORT_TYPE process_inode_cmp(const void *a, const void *b);
int i;
/*
- * If the index flag is set, then this is a bogus
+ * If the index or extents flag is set, then this is a bogus
* device/fifo/socket
*/
- if (inode->i_flags & EXT2_INDEX_FL)
+ if (inode->i_flags & (EXT2_INDEX_FL | EXT4_EXTENTS_FL))
return 0;
/*
int e2fsck_pass1_check_symlink(ext2_filsys fs, ext2_ino_t ino,
struct ext2_inode *inode, char *buf)
{
+ unsigned int buflen;
unsigned int len;
- int i;
- ext2_extent_handle_t handle;
- struct ext2_extent_info info;
- struct ext2fs_extent extent;
if ((inode->i_size_high || inode->i_size == 0) ||
(inode->i_flags & EXT2_INDEX_FL))
return 0;
- if (inode->i_flags & EXT4_EXTENTS_FL) {
- if (inode->i_flags & EXT4_INLINE_DATA_FL)
- return 0;
- if (inode->i_size > fs->blocksize)
- return 0;
- if (ext2fs_extent_open2(fs, ino, inode, &handle))
- return 0;
- i = 0;
- if (ext2fs_extent_get_info(handle, &info) ||
- (info.num_entries != 1) ||
- (info.max_depth != 0))
- goto exit_extent;
- if (ext2fs_extent_get(handle, EXT2_EXTENT_ROOT, &extent) ||
- (extent.e_lblk != 0) ||
- (extent.e_len != 1) ||
- (extent.e_pblk < fs->super->s_first_data_block) ||
- (extent.e_pblk >= ext2fs_blocks_count(fs->super)))
- goto exit_extent;
- i = 1;
- exit_extent:
- ext2fs_extent_free(handle);
- return i;
- }
-
if (inode->i_flags & EXT4_INLINE_DATA_FL) {
size_t inline_size;
+ if (inode->i_flags & EXT4_EXTENTS_FL)
+ return 0;
if (ext2fs_inline_data_size(fs, ino, &inline_size))
return 0;
if (inode->i_size != inline_size)
}
if (ext2fs_is_fast_symlink(inode)) {
- if (inode->i_size >= sizeof(inode->i_block))
- return 0;
-
- len = strnlen((char *)inode->i_block, sizeof(inode->i_block));
- if (len == sizeof(inode->i_block))
+ if (inode->i_flags & EXT4_EXTENTS_FL)
return 0;
+ buf = (char *)inode->i_block;
+ buflen = sizeof(inode->i_block);
} else {
- if ((inode->i_size >= fs->blocksize) ||
- (inode->i_block[0] < fs->super->s_first_data_block) ||
- (inode->i_block[0] >= ext2fs_blocks_count(fs->super)))
- return 0;
+ ext2_extent_handle_t handle;
+ struct ext2_extent_info info;
+ struct ext2fs_extent extent;
+ blk64_t blk;
+ int i;
- for (i = 1; i < EXT2_N_BLOCKS; i++)
- if (inode->i_block[i])
+ if (inode->i_flags & EXT4_EXTENTS_FL) {
+ if (ext2fs_extent_open2(fs, ino, inode, &handle))
+ return 0;
+ if (ext2fs_extent_get_info(handle, &info) ||
+ (info.num_entries != 1) ||
+ (info.max_depth != 0)) {
+ ext2fs_extent_free(handle);
return 0;
+ }
+ if (ext2fs_extent_get(handle, EXT2_EXTENT_ROOT,
+ &extent) ||
+ (extent.e_lblk != 0) ||
+ (extent.e_len != 1)) {
+ ext2fs_extent_free(handle);
+ return 0;
+ }
+ blk = extent.e_pblk;
+ ext2fs_extent_free(handle);
+ } else {
+ blk = inode->i_block[0];
+
+ for (i = 1; i < EXT2_N_BLOCKS; i++)
+ if (inode->i_block[i])
+ return 0;
+ }
- if (io_channel_read_blk64(fs->io, inode->i_block[0], 1, buf))
+ if (blk < fs->super->s_first_data_block ||
+ blk >= ext2fs_blocks_count(fs->super))
return 0;
- if (inode->i_flags & EXT4_ENCRYPT_FL) {
- len = ext2fs_le32_to_cpu(*((__u32 *)buf)) + 4;
- } else {
- len = strnlen(buf, fs->blocksize);
- }
- if (len == fs->blocksize)
+ if (io_channel_read_blk64(fs->io, blk, 1, buf))
return 0;
+
+ buflen = fs->blocksize;
}
+
+ if (inode->i_flags & EXT4_ENCRYPT_FL)
+ len = ext2fs_le16_to_cpu(*(__u16 *)buf) + 2;
+ else
+ len = strnlen(buf, buflen);
+
+ if (len >= buflen)
+ return 0;
+
if (len != inode->i_size)
- if ((inode->i_flags & EXT4_ENCRYPT_FL) == 0)
- return 0;
+ return 0;
return 1;
}
errcode_t retval;
char *tdb_dir;
int enable;
- int full_map;
*ret = 0;
struct scan_callback_struct scan_struct;
struct ext2_super_block *sb = ctx->fs->super;
const char *old_op;
- int imagic_fs, extent_fs, inlinedata_fs;
+ const char *eop_next_inode = _("getting next inode from scan");
+ int imagic_fs, extent_fs, inlinedata_fs, casefold_fs;
int low_dtime_check = 1;
- int inode_size = EXT2_INODE_SIZE(fs->super);
- int bufsize;
+ unsigned int inode_size = EXT2_INODE_SIZE(fs->super);
+ unsigned int bufsize;
int failed_csum = 0;
ext2_ino_t ino_threshold = 0;
dgrp_t ra_group = 0;
imagic_fs = ext2fs_has_feature_imagic_inodes(sb);
extent_fs = ext2fs_has_feature_extents(sb);
inlinedata_fs = ext2fs_has_feature_inline_data(sb);
+ casefold_fs = ext2fs_has_feature_casefold(sb);
/*
* Allocate bitmaps structures
if (ctx->progress && ((ctx->progress)(ctx, 1, 0,
ctx->fs->group_desc_count)))
goto endit;
- if ((fs->super->s_wtime < fs->super->s_inodes_count) ||
- (fs->super->s_mtime < fs->super->s_inodes_count) ||
+ if ((fs->super->s_wtime &&
+ fs->super->s_wtime < fs->super->s_inodes_count) ||
+ (fs->super->s_mtime &&
+ fs->super->s_mtime < fs->super->s_inodes_count) ||
(fs->super->s_mkfs_time &&
fs->super->s_mkfs_time < fs->super->s_inodes_count))
low_dtime_check = 0;
if (e2fsck_mmp_update(fs))
fatal_error(ctx, 0);
}
- old_op = ehandler_operation(_("getting next inode from scan"));
+ old_op = ehandler_operation(eop_next_inode);
pctx.errcode = ext2fs_get_next_inode_full(scan, &ino,
inode, inode_size);
if (ino > ino_threshold)
continue;
}
+ if ((inode->i_flags & EXT4_CASEFOLD_FL) &&
+ ((!LINUX_S_ISDIR(inode->i_mode) &&
+ fix_problem(ctx, PR_1_CASEFOLD_NONDIR, &pctx)) ||
+ (!casefold_fs &&
+ fix_problem(ctx, PR_1_CASEFOLD_FEATURE, &pctx)))) {
+ inode->i_flags &= ~EXT4_CASEFOLD_FL;
+ e2fsck_write_inode(ctx, ino, inode, "pass1");
+ }
+
/* Conflicting inlinedata/extents inode flags? */
if ((inode->i_flags & EXT4_INLINE_DATA_FL) &&
(inode->i_flags & EXT4_EXTENTS_FL)) {
(ino >= EXT2_FIRST_INODE(fs->super))) {
size_t size = 0;
- pctx.errcode = ext2fs_inline_data_size(fs, ino, &size);
- if (!pctx.errcode && size &&
+ pctx.errcode = get_inline_data_ea_size(fs, ino, &size);
+ if (!pctx.errcode &&
fix_problem(ctx, PR_1_INLINE_DATA_FEATURE, &pctx)) {
ext2fs_set_feature_inline_data(sb);
ext2fs_mark_super_dirty(fs);
case EXT2_ET_NO_INLINE_DATA:
case EXT2_ET_EXT_ATTR_CSUM_INVALID:
case EXT2_ET_EA_BAD_VALUE_OFFSET:
+ case EXT2_ET_EA_INODE_CORRUPTED:
/* broken EA or no system.data EA; truncate */
if (fix_problem(ctx, PR_1_INLINE_DATA_NO_ATTR,
&pctx)) {
/*
* Make sure the root inode is a directory; if
* not, offer to clear it. It will be
- * regnerated in pass #3.
+ * regenerated in pass #3.
*/
if (!LINUX_S_ISDIR(inode->i_mode)) {
if (fix_problem(ctx, PR_1_ROOT_NO_DIR, &pctx))
ctx->fs_directory_count++;
if (inode->i_flags & EXT4_ENCRYPT_FL)
add_encrypted_dir(ctx, ino);
+ if (inode->i_flags & EXT4_CASEFOLD_FL)
+ add_casefolded_dir(ctx, ino);
} else if (LINUX_S_ISREG (inode->i_mode)) {
ext2fs_mark_inode_bitmap2(ctx->inode_reg_map, ino);
ctx->fs_regular_count++;
ctx->flags |= E2F_FLAG_ABORT;
}
+static void add_casefolded_dir(e2fsck_t ctx, ino_t ino)
+{
+ struct problem_context pctx;
+
+ if (!ctx->casefolded_dirs) {
+ pctx.errcode = ext2fs_u32_list_create(&ctx->casefolded_dirs, 0);
+ if (pctx.errcode)
+ goto error;
+ }
+ pctx.errcode = ext2fs_u32_list_add(ctx->casefolded_dirs, ino);
+ if (pctx.errcode == 0)
+ return;
+error:
+ fix_problem(ctx, PR_1_ALLOCATE_CASEFOLDED_DIRLIST, &pctx);
+ /* Should never get here */
+ ctx->flags |= E2F_FLAG_ABORT;
+}
+
/*
* This procedure will allocate the inode "bb" (badblock) map table
*/
clear_problem_context(&pctx);
if (ext2fs_fast_test_block_bitmap2(ctx->block_found_map, block)) {
+ if (ext2fs_has_feature_shared_blocks(ctx->fs->super) &&
+ !(ctx->options & E2F_OPT_UNSHARE_BLOCKS)) {
+ return;
+ }
if (!ctx->block_dup_map) {
pctx.errcode = e2fsck_allocate_block_bitmap(ctx->fs,
_("multiply claimed block map"),
if (ext2fs_test_block_bitmap_range2(ctx->block_found_map, block, num))
ext2fs_mark_block_bitmap_range2(ctx->block_found_map, block, num);
else {
- int i;
+ unsigned int i;
+
for (i = 0; i < num; i += EXT2FS_CLUSTER_RATIO(ctx->fs))
mark_block_used(ctx, block + i);
}
return 0;
}
- if (quota_blocks != EXT2FS_C2B(fs, 1)) {
+ if (quota_blocks != EXT2FS_C2B(fs, 1U)) {
if (!ctx->ea_block_quota_blocks) {
pctx->errcode = ea_refcount_create(0,
&ctx->ea_block_quota_blocks);
if ((root->hash_version != EXT2_HASH_LEGACY) &&
(root->hash_version != EXT2_HASH_HALF_MD4) &&
(root->hash_version != EXT2_HASH_TEA) &&
+ (root->hash_version != EXT2_HASH_SIPHASH) &&
fix_problem(ctx, PR_1_HTREE_HASHV, pctx))
return 1;
+ if (ext4_hash_in_dirent(inode)) {
+ if (root->hash_version != EXT2_HASH_SIPHASH &&
+ fix_problem(ctx, PR_1_HTREE_NEEDS_SIPHASH, pctx))
+ return 1;
+ } else {
+ if (root->hash_version == EXT2_HASH_SIPHASH &&
+ fix_problem(ctx, PR_1_HTREE_CANNOT_SIPHASH, pctx))
+ return 1;
+ }
+
if ((root->unused_flags & EXT2_HASH_FLAG_INCOMPAT) &&
fix_problem(ctx, PR_1_HTREE_INCOMPAT, pctx))
return 1;
pctx->num = root->indirect_levels;
- if ((root->indirect_levels > ext2_dir_htree_level(fs)) &&
+ if ((root->indirect_levels >= ext2_dir_htree_level(fs)) &&
fix_problem(ctx, PR_1_HTREE_DEPTH, pctx))
return 1;
else if (extent.e_lblk < start_block)
problem = PR_1_OUT_OF_ORDER_EXTENTS;
else if ((end_block && last_lblk > end_block) &&
- (!(extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT &&
- last_lblk > eof_block)))
+ !(last_lblk > eof_block &&
+ ((extent.e_flags & EXT2_EXTENT_FLAGS_UNINIT) ||
+ (pctx->inode->i_flags & EXT4_VERITY_FL))))
problem = PR_1_EXTENT_END_OUT_OF_BOUNDS;
else if (is_leaf && extent.e_len == 0)
problem = PR_1_EXTENT_LENGTH_ZERO;
inode->i_flags &= ~EXT2_INDEX_FL;
dirty_inode++;
} else {
- e2fsck_add_dx_dir(ctx, ino, pb.last_block+1);
+ e2fsck_add_dx_dir(ctx, ino, inode, pb.last_block+1);
}
}
bad_size = 2;
}
} else {
- e2_blkcnt_t blkpg = ctx->blocks_per_page;
-
size = EXT2_I_SIZE(inode);
if ((pb.last_init_lblock >= 0) &&
- /* allow allocated blocks to end of PAGE_SIZE */
+ /* Do not allow initialized allocated blocks past i_size*/
(size < (__u64)pb.last_init_lblock * fs->blocksize) &&
- (pb.last_init_lblock / blkpg * blkpg != pb.last_init_lblock ||
- size < (__u64)(pb.last_init_lblock & ~(blkpg-1)) *
- fs->blocksize))
+ !(inode->i_flags & EXT4_VERITY_FL))
bad_size = 3;
else if (!(extent_fs && (inode->i_flags & EXT4_EXTENTS_FL)) &&
size > ext2_max_sizes[fs->super->s_log_block_size])
}
}
- if (p->is_dir && blockcnt > (1 << (21 - fs->super->s_log_block_size)))
+ if (p->is_dir && !ext2fs_has_feature_largedir(fs->super) &&
+ blockcnt > (1 << (21 - fs->super->s_log_block_size)))
+ problem = PR_1_TOOBIG_DIR;
+ if (p->is_dir && p->num_blocks + 1 >= p->max_blocks)
problem = PR_1_TOOBIG_DIR;
- if (p->is_reg && p->num_blocks+1 >= p->max_blocks)
+ if (p->is_reg && p->num_blocks + 1 >= p->max_blocks)
problem = PR_1_TOOBIG_REG;
if (!p->is_dir && !p->is_reg && blockcnt > 0)
problem = PR_1_TOOBIG_SYMLINK;
}
/*
- * Thes subroutines short circuits ext2fs_get_blocks and
+ * These subroutines short circuits ext2fs_get_blocks and
* ext2fs_check_directory; we use them since we already have the inode
* structure, so there's no point in letting the ext2fs library read
* the inode again.