--- linux-4.18/fs/ext4/mballoc.c 2019-11-28 14:55:26.500545920 +0300 +++ linux-4.18/fs/ext4/mballoc.c 2019-11-28 14:53:18.600086008 +0300 @@ -1885,6 +1885,21 @@ void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, return 0; } + +static inline int ext4_mb_uninit_on_disk(struct super_block *sb, + ext4_group_t group) +{ + struct ext4_group_desc *desc; + + if (!ext4_has_group_desc_csum(sb)) + return 0; + + desc = ext4_get_group_desc(sb, group, NULL); + if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) + return 1; + + return 0; +} /* * The routine scans buddy structures (not bitmap!) from given order @@ -2060,7 +2060,15 @@ static int ext4_mb_good_group(struct /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - int ret = ext4_mb_init_group(ac->ac_sb, group); + int ret; + + /* cr=0/1 is a very optimistic search to find large + * good chunks almost for free. if buddy data is + * not ready, then this optimization makes no sense */ + + if (cr < 2 && !ext4_mb_uninit_on_disk(ac->ac_sb, group)) + return 0; + ret = ext4_mb_init_group(ac->ac_sb, group); if (ret) return ret; }