===================================================================
--- linux-2.6.18-128.1.6.orig/fs/ext4/balloc.c
+++ linux-2.6.18-128.1.6/fs/ext4/balloc.c
-@@ -321,15 +321,15 @@ ext4_read_block_bitmap(struct super_bloc
- if (bh_uptodate_or_lock(bh))
- return bh;
-
+@@ -321,16 +321,16 @@ ext4_read_block_bitmap(struct super_bloc
+ unlock_buffer(bh);
+ return bh;
+ }
- spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
ext4_init_block_bitmap(sb, bh, block_group, desc);
+ set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
- unlock_buffer(bh);
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
return bh;
}
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_unlock_group(sb, block_group);
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
- ext4_error(sb, __func__,
-@@ -778,8 +778,9 @@ do_more:
- BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
- J_ASSERT_BH(bitmap_bh,
- bh2jh(bitmap_bh)->b_committed_data != NULL);
-- ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
-- bh2jh(bitmap_bh)->b_committed_data);
-+ ext4_set_bit_atomic(ext4_group_lock_ptr(sb, block_group),
-+ bit + i,
-+ bh2jh(bitmap_bh)->b_committed_data);
-
- /*
- * We clear the bit in the bitmap after setting the committed
+ if (buffer_uptodate(bh)) {
+ /*
+ * if not uninit if bh is uptodate,
@@ -787,7 +788,7 @@ do_more:
* the allocator uses.
*/
bit + i, bitmap_bh->b_data)) {
jbd_unlock_bh_state(bitmap_bh);
ext4_error(sb, __func__,
-@@ -801,17 +802,17 @@ do_more:
- }
- jbd_unlock_bh_state(bitmap_bh);
-
+@@ -801,18 +802,18 @@ do_more:
+ blocks_freed++;
+ }
+ }
- spin_lock(sb_bgl_lock(sbi, block_group));
+ ext4_lock_group(sb, block_group);
- le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
+ blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
+ ext4_free_blks_set(sb, desc, blk_free_count);
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
- spin_unlock(sb_bgl_lock(sbi, block_group));
+ ext4_unlock_group(sb, block_group);
- percpu_counter_add(&sbi->s_freeblocks_counter, count);
+ percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- spin_lock(sb_bgl_lock(sbi, flex_group));
+ ext4_lock_group(sb, block_group);
- sbi->s_flex_groups[flex_group].free_blocks += count;
+ sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
- spin_unlock(sb_bgl_lock(sbi, flex_group));
+ ext4_unlock_group(sb, block_group);
}
/* We dirtied the bitmap block */
-@@ -1104,7 +1105,7 @@ repeat:
- }
- start = grp_goal;
-
-- if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
-+ if (!claim_block(ext4_group_lock_ptr(sb, group),
- grp_goal, bitmap_bh)) {
- /*
- * The block was allocated by another thread, or it was
-@@ -1120,7 +1121,7 @@ repeat:
- grp_goal++;
- while (num < *count && grp_goal < end
- && ext4_test_allocatable(grp_goal, bitmap_bh)
-- && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
-+ && claim_block(ext4_group_lock_ptr(sb, group),
- grp_goal, bitmap_bh)) {
- num++;
- grp_goal++;
-@@ -1872,7 +1873,7 @@ allocated:
- }
- }
- jbd_lock_bh_state(bitmap_bh);
-- spin_lock(sb_bgl_lock(sbi, group_no));
-+ ext4_lock_group(sb, group_no);
- if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
- int i;
-
-@@ -1885,7 +1886,7 @@ allocated:
- }
- }
- ext4_debug("found bit %d\n", grp_alloc_blk);
-- spin_unlock(sb_bgl_lock(sbi, group_no));
-+ ext4_unlock_group(sb, group_no);
- jbd_unlock_bh_state(bitmap_bh);
- #endif
-
-@@ -1902,19 +1903,19 @@ allocated:
- * list of some description. We don't know in advance whether
- * the caller wants to use it as metadata or data.
- */
-- spin_lock(sb_bgl_lock(sbi, group_no));
-+ ext4_lock_group(sb, group_no);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
- le16_add_cpu(&gdp->bg_free_blocks_count, -num);
- gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
-- spin_unlock(sb_bgl_lock(sbi, group_no));
-+ ext4_unlock_group(sb, group_no);
- percpu_counter_sub(&sbi->s_freeblocks_counter, num);
-
- if (sbi->s_log_groups_per_flex) {
- ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
-- spin_lock(sb_bgl_lock(sbi, flex_group));
-+ ext4_lock_group(sb, flex_group);
- sbi->s_flex_groups[flex_group].free_blocks -= num;
-- spin_unlock(sb_bgl_lock(sbi, flex_group));
-+ ext4_unlock_group(sb, flex_group);
- }
-
- BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
Index: linux-2.6.18-128.1.6/fs/ext4/ialloc.c
===================================================================
--- linux-2.6.18-128.1.6.orig/fs/ext4/ialloc.c
+++ linux-2.6.18-128.1.6/fs/ext4/ialloc.c
-@@ -118,15 +118,15 @@ ext4_read_inode_bitmap(struct super_bloc
- if (bh_uptodate_or_lock(bh))
- return bh;
-
+@@ -118,16 +118,16 @@ ext4_read_inode_bitmap(struct super_bloc
+ unlock_buffer(bh);
+ return bh;
+ }
- spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
ext4_init_inode_bitmap(sb, bh, block_group, desc);
+ set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
- unlock_buffer(bh);
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
return bh;
}
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+ ext4_unlock_group(sb, block_group);
- if (bh_submit_read(bh) < 0) {
- put_bh(bh);
- ext4_error(sb, __func__,
-@@ -221,8 +221,8 @@ void ext4_free_inode (handle_t *handle,
+ if (buffer_uptodate(bh)) {
+ /*
+ * if not uninit if bh is uptodate,
+@@ -221,9 +221,9 @@ void ext4_free_inode (handle_t *handle,
goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */
-- if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
-- bit, bitmap_bh->b_data))
-+ if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
-+ bit, bitmap_bh->b_data))
- ext4_error (sb, "ext4_free_inode",
- "bit already cleared for inode %lu", ino);
- else {
-@@ -233,22 +233,22 @@ void ext4_free_inode (handle_t *handle,
+- spin_lock(sb_bgl_lock(sbi, block_group));
++ ext4_lock_group(sb, block_group);
+ cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
+- spin_unlock(sb_bgl_lock(sbi, block_group));
++ ext4_unlock_group(sb, block_group);
+ if (!cleared)
+ ext4_error(sb, "ext4_free_inode",
+ "bit already cleared for inode %lu", ino);
+@@ -233,7 +233,7 @@ void ext4_free_inode (handle_t *handle,
if (fatal) goto error_return;
if (gdp) {
- spin_lock(sb_bgl_lock(sbi, block_group));
+ ext4_lock_group(sb, block_group);
- le16_add_cpu(&gdp->bg_free_inodes_count, 1);
- if (is_directory)
- le16_add_cpu(&gdp->bg_used_dirs_count, -1);
+ count = ext4_free_inodes_count(sb, gdp) + 1;
+ ext4_free_inodes_set(sb, gdp, count);
+ if (is_directory) {
+@@ -233,16 +233,16 @@ void ext4_free_inode (handle_t *handle,
+ }
gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp);
- spin_unlock(sb_bgl_lock(sbi, block_group));
}
}
BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
-@@ -630,7 +630,7 @@ struct inode *ext4_new_inode(handle_t *h
- if (err)
- goto fail;
-
-- if (ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
-+ if (ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
- ino, bitmap_bh->b_data)) {
- printk(KERN_ERR "goal inode %lu unavailable\n", goal);
- /* Oh well, we tried. */
-@@ -691,7 +691,7 @@ repeat_in_this_group:
- if (err)
- goto fail;
+@@ -630,7 +630,7 @@
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+
+- spin_lock(sb_bgl_lock(sbi, group));
++ ext4_lock_group(sb, group);
+ if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
+ /* not a free inode */
+ retval = 1;
+@@ -691,7 +691,7 @@
+ ino++;
+ if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
+ ino > EXT4_INODES_PER_GROUP(sb)) {
+- spin_unlock(sb_bgl_lock(sbi, group));
++ ext4_unlock_group(sb, group);
+ ext4_error(sb, __func__,
+ "reserved inode or inode > inodes count - "
+ "block_group = %u, inode=%lu", group,
+@@ -692,7 +692,7 @@
+ }
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+ err_ret:
+- spin_unlock(sb_bgl_lock(sbi, group));
++ ext4_unlock_group(sb, group);
+ return retval;
+ }
-- if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
-+ if (!ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
- ino, bitmap_bh->b_data)) {
- /* we won it */
- BUFFER_TRACE(bitmap_bh,
-@@ -751,14 +751,14 @@ got:
+@@ -751,16 +751,16 @@ got:
}
free = 0;
+ ext4_lock_group(sb, group);
/* recheck and clear flag under lock if we still need to */
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
- gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
free = ext4_free_blocks_after_init(sb, group, gdp);
- gdp->bg_free_blocks_count = cpu_to_le16(free);
+ gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+ ext4_free_blks_set(sb, gdp, free);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
+ gdp);
}
- spin_unlock(sb_bgl_lock(sbi, group));
+ ext4_unlock_group(sb, group);
/* Don't need to dirty bitmap block if we didn't change it */
if (free) {
-@@ -771,7 +771,7 @@ got:
- goto fail;
- }
-
-- spin_lock(sb_bgl_lock(sbi, group));
-+ ext4_lock_group(sb, group);
- /* If we didn't allocate from within the initialized part of the inode
- * table then we need to initialize up to this inode. */
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
-@@ -807,7 +807,7 @@ got:
- le16_add_cpu(&gdp->bg_used_dirs_count, 1);
- }
- gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
-- spin_unlock(sb_bgl_lock(sbi, group));
-+ ext4_unlock_group(sb, group);
- BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
- err = ext4_journal_dirty_metadata(handle, bh2);
- if (err) goto fail;
@@ -819,9 +819,9 @@ got:
if (sbi->s_log_groups_per_flex) {
static inline int mb_find_next_zero_bit(void *addr, int max, int start)
{
int fix = 0, ret, tmpmax;
-@@ -789,16 +777,16 @@ static int ext4_mb_init_cache(struct pag
- if (bh_uptodate_or_lock(bh[i]))
- continue;
-
+@@ -789,17 +777,17 @@ static int ext4_mb_init_cache(struct pag
+ unlock_buffer(bh[i]);
+ continue;
+ }
- spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+ ext4_lock_group(sb, first_group + i);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
ext4_init_block_bitmap(sb, bh[i],
first_group + i, desc);
+ set_bitmap_uptodate(bh[i]);
set_buffer_uptodate(bh[i]);
- unlock_buffer(bh[i]);
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+ ext4_unlock_group(sb, first_group + i);
+ unlock_buffer(bh[i]);
continue;
}
- spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
+ ext4_unlock_group(sb, first_group + i);
- get_bh(bh[i]);
- bh[i]->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh[i]);
+ if (buffer_uptodate(bh[i])) {
+ /*
+ * if not uninit if bh is uptodate,
@@ -1021,7 +1009,7 @@ static int mb_find_order_for_block(struc
return 0;
}
{
__u32 *addr;
-@@ -1034,12 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
+@@ -1034,15 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
cur += 32;
continue;
}
-- mb_clear_bit_atomic(lock, cur, bm);
+- if (lock)
+- mb_clear_bit_atomic(lock, cur, bm);
+- else
+- mb_clear_bit(cur, bm);
+ mb_clear_bit(cur, bm);
cur++;
}
{
__u32 *addr;
-@@ -1052,7 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
+@@ -1052,10 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
cur += 32;
continue;
}
-- mb_set_bit_atomic(lock, cur, bm);
+- if (lock)
+- mb_set_bit_atomic(lock, cur, bm);
+- else
+- mb_set_bit(cur, bm);
+ mb_set_bit(cur, bm);
cur++;
}
+ mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+ ac->ac_b_ex.fe_len);
+ ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
- err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+ err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
err = -EAGAIN;
goto out_err;
#ifdef AGGRESSIVE_CHECK
{
int i;
-@@ -3147,10 +3137,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
+@@ -3147,9 +3137,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
}
}
#endif
-- mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
+- spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
+- mb_set_bits(NULL, bitmap_bh->b_data,
- ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
+ mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
-
-- spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
gdp->bg_free_blocks_count =
-@@ -3160,15 +3148,16 @@ ext4_mb_mark_diskspace_used(struct ext4_
- }
- le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
- gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+@@ -3160,7 +3148,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
+ len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
+ ext4_free_blks_set(sb, gdp, len);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
- spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
+
+ ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
-
+ /*
+ * Now reduce the dirty block count also. Should not go negative
+@@ -3161,9 +3148,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
}
err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+@@ -3500,9 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
+
+ while (n) {
+ entry = rb_entry(n, struct ext4_free_data, node);
+- mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
+- bitmap, entry->start_blk,
+- entry->count);
++ mb_set_bits(bitmap, entry->start_blk, entry->count);
+ n = rb_next(n);
+ }
+ return;
@@ -3600,7 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
/*
* the function goes through all preallocation in this group and marks them
static noinline_for_stack int
ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
ext4_group_t group, ext4_grpblk_t block, int count)
-@@ -4755,7 +4744,6 @@ ext4_mb_free_metadata(handle_t *handle,
- BUG_ON(e4b->bd_bitmap_page == NULL);
- BUG_ON(e4b->bd_buddy_page == NULL);
-
-- ext4_lock_group(sb, group);
- for (i = 0; i < count; i++) {
- md = db->bb_md_cur;
- if (md && db->bb_tid != handle->h_transaction->t_tid) {
-@@ -4766,8 +4754,10 @@ ext4_mb_free_metadata(handle_t *handle,
- if (md == NULL) {
- ext4_unlock_group(sb, group);
- md = kmalloc(sizeof(*md), GFP_NOFS);
-- if (md == NULL)
-+ if (md == NULL) {
-+ ext4_lock_group(sb, group);
- return -ENOMEM;
-+ }
- md->num = 0;
- md->group = group;
-
-@@ -4800,7 +4790,6 @@ ext4_mb_free_metadata(handle_t *handle,
- db->bb_md_cur = NULL;
- }
- }
-- ext4_unlock_group(sb, group);
- return 0;
- }
-
-@@ -4901,6 +4890,13 @@ do_more:
- if (err)
- goto error_return;
-
-+ if (ac) {
-+ ac->ac_b_ex.fe_group = block_group;
-+ ac->ac_b_ex.fe_start = bit;
-+ ac->ac_b_ex.fe_len = count;
-+ ext4_mb_store_history(ac);
-+ }
-+
- err = ext4_mb_load_buddy(sb, block_group, &e4b);
- if (err)
- goto error_return;
-@@ -4912,42 +4908,31 @@ do_more:
- BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
- }
- #endif
-- mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
-- bit, count);
--
-+ ext4_lock_group(sb, block_group);
-+ mb_clear_bits(bitmap_bh->b_data, bit, count);
- /* We dirtied the bitmap block */
- BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
- err = ext4_journal_dirty_metadata(handle, bitmap_bh);
-
-- if (ac) {
-- ac->ac_b_ex.fe_group = block_group;
-- ac->ac_b_ex.fe_start = bit;
-- ac->ac_b_ex.fe_len = count;
-- ext4_mb_store_history(ac);
-- }
--
- if (metadata) {
- /* blocks being freed are metadata. these blocks shouldn't
- * be used until this transaction is committed */
- ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
- } else {
-- ext4_lock_group(sb, block_group);
- mb_free_blocks(inode, &e4b, bit, count);
- ext4_mb_return_to_preallocation(inode, &e4b, block, count);
+@@ -4912,35 +4908,30 @@ do_more:
+ new_entry->count = count;
+ new_entry->t_tid = handle->h_transaction->t_tid;
+ ext4_lock_group(sb, block_group);
+- mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+- bit, count);
++ mb_clear_bits(bitmap_bh->b_data, bit, count);
+ ext4_mb_free_metadata(handle, &e4b, new_entry);
+- ext4_unlock_group(sb, block_group);
+ } else {
+ ext4_lock_group(sb, block_group);
+ /* need to update group_info->bb_free and bitmap
+ * with group lock held. generate_buddy look at
+ * them with group lock_held
+ */
+- mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+- bit, count);
++ mb_clear_bits(bitmap_bh->b_data, bit, count);
+ mb_free_blocks(inode, &e4b, bit, count);
+ ext4_mb_return_to_preallocation(inode, &e4b, block, count);
- ext4_unlock_group(sb, block_group);
}
- spin_lock(sb_bgl_lock(sbi, block_group));
- le16_add_cpu(&gdp->bg_free_blocks_count, count);
- gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+ ret = ext4_free_blks_count(sb, gdp) + count;
+ ext4_free_blks_set(sb, gdp, ret);
+ gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
- spin_unlock(sb_bgl_lock(sbi, block_group));
+ ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeblocks_counter, count);
===================================================================
--- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
+++ linux-2.6.18-128.1.6/fs/ext4/super.c
-@@ -1934,16 +1934,18 @@ static int ext4_check_descriptors(struct
- "(block %llu)!", i, inode_table);
- return 0;
- }
+@@ -1934,18 +1934,18 @@ static int ext4_check_descriptors(struct
+ "(block %llu)!\n", i, inode_table);
+ return 0;
+ }
- spin_lock(sb_bgl_lock(sbi, i));
+ ext4_lock_group(sb, i);
- if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
- printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
- "Checksum for group %lu failed (%u!=%u)\n",
- i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
- gdp)), le16_to_cpu(gdp->bg_checksum));
-- if (!(sb->s_flags & MS_RDONLY))
-+ if (!(sb->s_flags & MS_RDONLY)) {
+ if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+ printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+ "Checksum for group %u failed (%u!=%u)\n",
+ i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
+ gdp)), le16_to_cpu(gdp->bg_checksum));
+ if (!(sb->s_flags & MS_RDONLY)) {
+- spin_unlock(sb_bgl_lock(sbi, i));
+ ext4_unlock_group(sb, i);
return 0;
-+ }
+ }
}
- spin_unlock(sb_bgl_lock(sbi, i));
+ ext4_unlock_group(sb, i);
===================================================================
--- linux-2.6.18-128.1.6.orig/fs/ext4/ext4.h
+++ linux-2.6.18-128.1.6/fs/ext4/ext4.h
-@@ -1303,6 +1303,33 @@ extern int ext4_get_blocks_wrap(handle_t
- sector_t block, unsigned long max_blocks,
- struct buffer_head *bh, int create,
- int extend_disksize);
-+
-+static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
-+ ext4_group_t group)
-+{
-+ struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
-+ return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
-+}
-+
-+static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
-+{
-+ spin_lock(ext4_group_lock_ptr(sb, group));
-+}
-+
-+static inline void ext4_unlock_group(struct super_block *sb,
-+ ext4_group_t group)
-+{
-+ spin_unlock(ext4_group_lock_ptr(sb, group));
-+}
-+
-+static inline int ext4_is_group_locked(struct super_block *sb,
-+ ext4_group_t group)
-+{
-+ return spin_is_locked(ext4_group_lock_ptr(sb, group));
-+}
-+
-+
-+
- #endif /* __KERNEL__ */
-
- #endif /* _EXT4_H */
-Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.h
-===================================================================
---- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.h
-+++ linux-2.6.18-128.1.6/fs/ext4/mballoc.h
-@@ -127,7 +127,6 @@ struct ext4_group_info {
+@@ -127,35 +127,9 @@ struct ext4_group_info {
};
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
-@@ -272,31 +271,6 @@ static void ext4_mb_put_pa(struct ext4_a
- static int ext4_mb_init_per_dev_proc(struct super_block *sb);
- static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
-
-
-static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
-{
- &(grinfo->bb_state));
-}
-
- static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
- struct ext4_free_extent *fex)
- {
+ /*
+ * Inodes and files operations
+ */
+@@ -1303,6 +1303,32 @@ extern int ext4_get_blocks_wrap(handle_t
+ set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
+ }
+
++static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
++ ext4_group_t group)
++{
++ struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
++ return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
++}
++
++static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
++{
++ spin_lock(ext4_group_lock_ptr(sb, group));
++}
++
++static inline void ext4_unlock_group(struct super_block *sb,
++ ext4_group_t group)
++{
++ spin_unlock(ext4_group_lock_ptr(sb, group));
++}
++
++static inline int ext4_is_group_locked(struct super_block *sb,
++ ext4_group_t group)
++{
++ return spin_is_locked(ext4_group_lock_ptr(sb, group));
++}
++
++
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _EXT4_H */