(EXT2FS_B2C(ctx->fs, blk) ==
EXT2FS_B2C(ctx->fs, pb->previous_block)) &&
(blk & EXT2FS_CLUSTER_MASK(ctx->fs)) ==
- (blockcnt & EXT2FS_CLUSTER_MASK(ctx->fs))))
+ (blockcnt & EXT2FS_CLUSTER_MASK(ctx->fs)))) {
mark_block_used(ctx, blk);
+ pb->num_blocks++;
+ }
pb->previous_block = blk;
}
if (is_dir && extent.e_len > 0)
pb->last_db_block = blockcnt - 1;
- pb->num_blocks += extent.e_len;
pb->previous_block = extent.e_pblk + extent.e_len - 1;
start_block = pb->last_block = extent.e_lblk + extent.e_len - 1;
next:
EXT4_FEATURE_RO_COMPAT_HUGE_FILE) ||
!(inode->i_flags & EXT4_HUGE_FILE_FL))
pb.num_blocks *= (fs->blocksize / 512);
+ pb.num_blocks *= EXT2FS_CLUSTER_RATIO(fs);
#if 0
- printf("inode %u, i_size = %lu, last_block = %lld, i_blocks=%lu, num_blocks = %lu\n",
+ printf("inode %u, i_size = %u, last_block = %lld, i_blocks=%llu, num_blocks = %llu\n",
ino, inode->i_size, pb.last_block, ext2fs_inode_i_blocks(fs, inode),
pb.num_blocks);
#endif
es->err = retval;
return BLOCK_ABORT;
}
+ es->newblocks++;
}
if (blockcnt > 0) {
retval = ext2fs_new_dir_block(fs, 0, 0, &block);
ext2fs_free_mem(&block);
*blocknr = new_blk;
ext2fs_block_alloc_stats2(fs, new_blk, +1);
- es->newblocks++;
if (es->done)
return (BLOCK_CHANGED | BLOCK_ABORT);
EXT4_FEATURE_RO_COMPAT_HUGE_FILE) ||
!(inode->i_flags & EXT4_HUGE_FILE_FL))
num_blocks *= fs->blocksize / 512;
+ num_blocks *= EXT2FS_CLUSTER_RATIO(fs);
b += num_blocks;
EXT4_FEATURE_RO_COMPAT_HUGE_FILE) ||
!(inode->i_flags & EXT4_HUGE_FILE_FL))
num_blocks *= fs->blocksize / 512;
+ num_blocks *= EXT2FS_CLUSTER_RATIO(fs);
if (num_blocks > b)
return EOVERFLOW;
EXT4_FEATURE_RO_COMPAT_HUGE_FILE) ||
!(inode->i_flags & EXT4_HUGE_FILE_FL))
b *= fs->blocksize / 512;
+ b *= EXT2FS_CLUSTER_RATIO(fs);
inode->i_blocks = b & 0xFFFFFFFF;
if (fs->super->s_feature_ro_compat & EXT4_FEATURE_RO_COMPAT_HUGE_FILE)