X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;ds=sidebyside;f=lib%2Fext2fs%2Falloc_tables.c;h=971a6ceb58306fb11a913a264c8afaf70c384fe2;hb=336c440ccea8f94b0728f881cddee84f730e7cc7;hp=9b4c05727af4bf27f8d4d9e1afacc36c76b6fc5e;hpb=03fa6f8ae28a87018325c892f731097cc97d9eac;p=tools%2Fe2fsprogs.git diff --git a/lib/ext2fs/alloc_tables.c b/lib/ext2fs/alloc_tables.c index 9b4c057..971a6ce 100644 --- a/lib/ext2fs/alloc_tables.c +++ b/lib/ext2fs/alloc_tables.c @@ -5,11 +5,12 @@ * Copyright (C) 1996 Theodore Ts'o. * * %Begin-Header% - * This file may be redistributed under the terms of the GNU Public - * License. + * This file may be redistributed under the terms of the GNU Library + * General Public License, version 2. * %End-Header% */ +#include "config.h" #include #include #if HAVE_UNISTD_H @@ -26,53 +27,55 @@ #include "ext2_fs.h" #include "ext2fs.h" +#include "ext2fsP.h" /* * This routine searches for free blocks that can allocate a full * group of bitmaps or inode tables for a flexbg group. Returns the * block number with a correct offset were the bitmaps and inode - * tables can be allocated continously and in order. + * tables can be allocated continuously and in order. */ -static blk_t flexbg_offset(ext2_filsys fs, dgrp_t group, blk_t start_blk, - ext2fs_block_bitmap bmap, int offset, int size, - int elem_size) +static blk64_t flexbg_offset(ext2_filsys fs, dgrp_t group, blk64_t start_blk, + ext2fs_block_bitmap bmap, int rem_grp, + int elem_size) { - int flexbg, flexbg_size; - blk_t last_blk, first_free = 0; + int flexbg, flexbg_size, size; + blk64_t last_blk, first_free = 0; dgrp_t last_grp; flexbg_size = 1 << fs->super->s_log_groups_per_flex; flexbg = group / flexbg_size; + size = rem_grp * elem_size; - if (size > (int) (fs->super->s_blocks_per_group / 8)) - size = (int) fs->super->s_blocks_per_group / 8; - - if (offset) - offset -= 1; + if (size > (int) (fs->super->s_blocks_per_group / 4)) + size = (int) fs->super->s_blocks_per_group / 4; /* - * Don't do a long search if the previous block - * search is still valid. + * Don't do a long search if the previous block search is still valid, + * but skip minor obstructions such as group descriptor backups. */ - if (start_blk && group % flexbg_size) { - if (ext2fs_test_block_bitmap_range(bmap, start_blk + elem_size, - size)) - return start_blk + elem_size; - } + if (start_blk && start_blk < ext2fs_blocks_count(fs->super) && + ext2fs_get_free_blocks2(fs, start_blk, start_blk + size, elem_size, + bmap, &first_free) == 0) + return first_free; - start_blk = ext2fs_group_first_block(fs, flexbg_size * flexbg); + start_blk = ext2fs_group_first_block2(fs, flexbg_size * flexbg); last_grp = group | (flexbg_size - 1); - if (last_grp > fs->group_desc_count) - last_grp = fs->group_desc_count; - last_blk = ext2fs_group_last_block(fs, last_grp); + if (last_grp > fs->group_desc_count-1) + last_grp = fs->group_desc_count-1; + last_blk = ext2fs_group_last_block2(fs, last_grp); /* Find the first available block */ - if (ext2fs_get_free_blocks(fs, start_blk, last_blk, 1, bmap, - &first_free)) + if (ext2fs_get_free_blocks2(fs, start_blk, last_blk, size, + bmap, &first_free) == 0) + return first_free; + + if (ext2fs_get_free_blocks2(fs, start_blk, last_blk, elem_size, + bmap, &first_free) == 0) return first_free; - if (ext2fs_get_free_blocks(fs, first_free + offset, last_blk, size, - bmap, &first_free)) + if (ext2fs_get_free_blocks2(fs, 0, last_blk, elem_size, bmap, + &first_free) == 0) return first_free; return first_free; @@ -82,32 +85,31 @@ errcode_t ext2fs_allocate_group_table(ext2_filsys fs, dgrp_t group, ext2fs_block_bitmap bmap) { errcode_t retval; - blk_t group_blk, start_blk, last_blk, new_blk, blk; + blk64_t group_blk, start_blk, last_blk, new_blk; dgrp_t last_grp = 0; - int j, rem_grps = 0, flexbg_size = 0; + int rem_grps = 0, flexbg_size = 0, table_offset = 0; - group_blk = ext2fs_group_first_block(fs, group); - last_blk = ext2fs_group_last_block(fs, group); + group_blk = ext2fs_group_first_block2(fs, group); + last_blk = ext2fs_group_last_block2(fs, group); if (!bmap) bmap = fs->block_map; - if (EXT2_HAS_INCOMPAT_FEATURE(fs->super, - EXT4_FEATURE_INCOMPAT_FLEX_BG) && + if (ext2fs_has_feature_flex_bg(fs->super) && fs->super->s_log_groups_per_flex) { flexbg_size = 1 << fs->super->s_log_groups_per_flex; last_grp = group | (flexbg_size - 1); - rem_grps = last_grp - group; - if (last_grp > fs->group_desc_count) - last_grp = fs->group_desc_count; + if (last_grp > fs->group_desc_count-1) + last_grp = fs->group_desc_count-1; + rem_grps = last_grp - group + 1; } /* * Allocate the block and inode bitmaps, if necessary */ - if (fs->stride) { - retval = ext2fs_get_free_blocks(fs, group_blk, last_blk, - 1, bmap, &start_blk); + if (fs->stride && !flexbg_size) { + retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, + 1, bmap, &start_blk); if (retval) return retval; start_blk += fs->inode_blocks_per_group; @@ -119,57 +121,77 @@ errcode_t ext2fs_allocate_group_table(ext2_filsys fs, dgrp_t group, start_blk = group_blk; if (flexbg_size) { - blk_t prev_block = 0; - if (group && fs->group_desc[group-1].bg_block_bitmap) - prev_block = fs->group_desc[group-1].bg_block_bitmap; + blk64_t prev_block = 0; + + table_offset = flexbg_size; + if (group % flexbg_size) + prev_block = ext2fs_block_bitmap_loc(fs, group - 1) + 1; + else if (last_grp == fs->group_desc_count-1) { + /* + * If we are allocating for the last flex_bg + * keep the metadata tables contiguous + */ + table_offset = last_grp & (flexbg_size - 1); + if (table_offset == 0) + table_offset = flexbg_size; + else + table_offset++; + } + /* FIXME: Take backup group descriptor blocks into account + * if the flexbg allocations will grow to overlap them... */ start_blk = flexbg_offset(fs, group, prev_block, bmap, - 0, rem_grps, 1); - last_blk = ext2fs_group_last_block(fs, last_grp); + rem_grps, 1); + last_blk = ext2fs_group_last_block2(fs, last_grp); } - if (!fs->group_desc[group].bg_block_bitmap) { - retval = ext2fs_get_free_blocks(fs, start_blk, last_blk, - 1, bmap, &new_blk); + if (!ext2fs_block_bitmap_loc(fs, group)) { + retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk, + 1, bmap, &new_blk); if (retval == EXT2_ET_BLOCK_ALLOC_FAIL) - retval = ext2fs_get_free_blocks(fs, group_blk, + retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, 1, bmap, &new_blk); if (retval) return retval; - ext2fs_mark_block_bitmap(bmap, new_blk); - fs->group_desc[group].bg_block_bitmap = new_blk; + ext2fs_mark_block_bitmap2(bmap, new_blk); + ext2fs_block_bitmap_loc_set(fs, group, new_blk); if (flexbg_size) { - dgrp_t gr = ext2fs_group_of_blk(fs, new_blk); - fs->group_desc[gr].bg_free_blocks_count--; - fs->super->s_free_blocks_count--; - fs->group_desc[gr].bg_flags &= ~EXT2_BG_BLOCK_UNINIT; + dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk); + ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1); + ext2fs_free_blocks_count_add(fs->super, -1); + ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT); ext2fs_group_desc_csum_set(fs, gr); } } if (flexbg_size) { - blk_t prev_block = 0; - if (group && fs->group_desc[group-1].bg_inode_bitmap) - prev_block = fs->group_desc[group-1].bg_inode_bitmap; + blk64_t prev_block = 0; + if (group % flexbg_size) + prev_block = ext2fs_inode_bitmap_loc(fs, group - 1) + 1; + else + prev_block = ext2fs_block_bitmap_loc(fs, group) + + table_offset; + /* FIXME: Take backup group descriptor blocks into account + * if the flexbg allocations will grow to overlap them... */ start_blk = flexbg_offset(fs, group, prev_block, bmap, - flexbg_size, rem_grps, 1); - last_blk = ext2fs_group_last_block(fs, last_grp); + rem_grps, 1); + last_blk = ext2fs_group_last_block2(fs, last_grp); } - if (!fs->group_desc[group].bg_inode_bitmap) { - retval = ext2fs_get_free_blocks(fs, start_blk, last_blk, - 1, bmap, &new_blk); + if (!ext2fs_inode_bitmap_loc(fs, group)) { + retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk, + 1, bmap, &new_blk); if (retval == EXT2_ET_BLOCK_ALLOC_FAIL) - retval = ext2fs_get_free_blocks(fs, group_blk, - last_blk, 1, bmap, &new_blk); + retval = ext2fs_get_free_blocks2(fs, group_blk, + last_blk, 1, bmap, &new_blk); if (retval) return retval; - ext2fs_mark_block_bitmap(bmap, new_blk); - fs->group_desc[group].bg_inode_bitmap = new_blk; + ext2fs_mark_block_bitmap2(bmap, new_blk); + ext2fs_inode_bitmap_loc_set(fs, group, new_blk); if (flexbg_size) { - dgrp_t gr = ext2fs_group_of_blk(fs, new_blk); - fs->group_desc[gr].bg_free_blocks_count--; - fs->super->s_free_blocks_count--; - fs->group_desc[gr].bg_flags &= ~EXT2_BG_BLOCK_UNINIT; + dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk); + ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1); + ext2fs_free_blocks_count_add(fs->super, -1); + ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT); ext2fs_group_desc_csum_set(fs, gr); } } @@ -178,36 +200,55 @@ errcode_t ext2fs_allocate_group_table(ext2_filsys fs, dgrp_t group, * Allocate the inode table */ if (flexbg_size) { - int prev_block = 0; - if (group && fs->group_desc[group-1].bg_inode_table) - prev_block = fs->group_desc[group-1].bg_inode_table; + blk64_t prev_block = 0; + + if (group % flexbg_size) + prev_block = ext2fs_inode_table_loc(fs, group - 1) + + fs->inode_blocks_per_group; + else + prev_block = ext2fs_inode_bitmap_loc(fs, group) + + table_offset; + + /* FIXME: Take backup group descriptor blocks into account + * if the flexbg allocations will grow to overlap them... */ group_blk = flexbg_offset(fs, group, prev_block, bmap, - flexbg_size * 2, - fs->inode_blocks_per_group * - rem_grps, - fs->inode_blocks_per_group); - last_blk = ext2fs_group_last_block(fs, last_grp); + rem_grps, fs->inode_blocks_per_group); + last_blk = ext2fs_group_last_block2(fs, last_grp); } - if (!fs->group_desc[group].bg_inode_table) { - retval = ext2fs_get_free_blocks(fs, group_blk, last_blk, + if (!ext2fs_inode_table_loc(fs, group)) { + retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, fs->inode_blocks_per_group, bmap, &new_blk); if (retval) return retval; - for (j=0, blk = new_blk; - j < fs->inode_blocks_per_group; - j++, blk++) { - ext2fs_mark_block_bitmap(bmap, blk); - if (flexbg_size) { - dgrp_t gr = ext2fs_group_of_blk(fs, blk); - fs->group_desc[gr].bg_free_blocks_count--; - fs->super->s_free_blocks_count--; - fs->group_desc[gr].bg_flags &= ~EXT2_BG_BLOCK_UNINIT; + + ext2fs_mark_block_bitmap_range2(bmap, + new_blk, fs->inode_blocks_per_group); + if (flexbg_size) { + blk64_t num, blk; + num = fs->inode_blocks_per_group; + blk = new_blk; + while (num) { + int gr = ext2fs_group_of_blk2(fs, blk); + last_blk = ext2fs_group_last_block2(fs, gr); + blk64_t n = num; + + if (blk + num > last_blk) + n = last_blk - blk + 1; + + ext2fs_bg_free_blocks_count_set(fs, gr, + ext2fs_bg_free_blocks_count(fs, gr) - + n/EXT2FS_CLUSTER_RATIO(fs)); + ext2fs_bg_flags_clear(fs, gr, + EXT2_BG_BLOCK_UNINIT); ext2fs_group_desc_csum_set(fs, gr); + ext2fs_free_blocks_count_add(fs->super, -n); + blk += n; + num -= n; } } - fs->group_desc[group].bg_inode_table = new_blk; + ext2fs_inode_table_loc_set(fs, group, new_blk); } ext2fs_group_desc_csum_set(fs, group); return 0; @@ -217,12 +258,21 @@ errcode_t ext2fs_allocate_tables(ext2_filsys fs) { errcode_t retval; dgrp_t i; + struct ext2fs_numeric_progress_struct progress; + + if (fs->progress_ops && fs->progress_ops->init) + (fs->progress_ops->init)(fs, &progress, NULL, + fs->group_desc_count); for (i = 0; i < fs->group_desc_count; i++) { + if (fs->progress_ops && fs->progress_ops->update) + (fs->progress_ops->update)(fs, &progress, i); retval = ext2fs_allocate_group_table(fs, i, fs->block_map); if (retval) return retval; } + if (fs->progress_ops && fs->progress_ops->close) + (fs->progress_ops->close)(fs, &progress, NULL); return 0; }