Whamcloud - gitweb
Branch b1_8
authorjohann <johann>
Wed, 24 Jun 2009 15:39:18 +0000 (15:39 +0000)
committerjohann <johann>
Wed, 24 Jun 2009 15:39:18 +0000 (15:39 +0000)
b=19875
i=adilger
i=yansgheng

- use mb_find_next_bit() instead of find_next_bit() in
  ext4_mb_check_ondisk_bitmap() as done in the ext4 mainline
- convert ext4_lock_group to use sb_bgl_lock, patch already
  merged upstream.

ldiskfs/kernel_patches/patches/ext4-convert-group-lock-sles11.patch [new file with mode: 0644]
ldiskfs/kernel_patches/patches/ext4-mballoc-extra-checks-sles11.patch
ldiskfs/kernel_patches/series/ldiskfs-2.6-sles11.series

diff --git a/ldiskfs/kernel_patches/patches/ext4-convert-group-lock-sles11.patch b/ldiskfs/kernel_patches/patches/ext4-convert-group-lock-sles11.patch
new file mode 100644 (file)
index 0000000..b7d08bf
--- /dev/null
@@ -0,0 +1,509 @@
+Index: linux-stage/fs/ext4/balloc.c
+===================================================================
+--- linux-stage.orig/fs/ext4/balloc.c
++++ linux-stage/fs/ext4/balloc.c
+@@ -329,16 +329,16 @@ ext4_read_block_bitmap(struct super_bloc
+               unlock_buffer(bh);
+               return bh;
+       }
+-      spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      ext4_lock_group(sb, block_group);
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               ext4_init_block_bitmap(sb, bh, block_group, desc);
+               set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
+-              spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++              ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+               return bh;
+       }
+-      spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      ext4_unlock_group(sb, block_group);
+       if (buffer_uptodate(bh)) {
+               /*
+                * if not uninit if bh is uptodate,
+@@ -454,7 +454,7 @@ void ext4_add_groupblocks(handle_t *hand
+       down_write(&grp->alloc_sem);
+       for (i = 0, blocks_freed = 0; i < count; i++) {
+               BUFFER_TRACE(bitmap_bh, "clear bit");
+-              if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
++              if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
+                                               bit + i, bitmap_bh->b_data)) {
+                       ext4_error(sb, __func__,
+                                  "bit already cleared for block %llu",
+@@ -464,18 +464,18 @@ void ext4_add_groupblocks(handle_t *hand
+                       blocks_freed++;
+               }
+       }
+-      spin_lock(sb_bgl_lock(sbi, block_group));
++      ext4_lock_group(sb, block_group);
+       blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
+       ext4_free_blks_set(sb, desc, blk_free_count);
+       desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
+-      spin_unlock(sb_bgl_lock(sbi, block_group));
++      ext4_unlock_group(sb, block_group);
+       percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-              spin_lock(sb_bgl_lock(sbi, flex_group));
++              ext4_lock_group(sb, flex_group);
+               sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
+-              spin_unlock(sb_bgl_lock(sbi, flex_group));
++              ext4_unlock_group(sb, flex_group);
+       }
+       /*
+        * request to reload the buddy with the
+Index: linux-stage/fs/ext4/ext4.h
+===================================================================
+--- linux-stage.orig/fs/ext4/ext4.h
++++ linux-stage/fs/ext4/ext4.h
+@@ -1342,33 +1342,32 @@ struct ext4_group_info {
+ };
+ #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
+-#define EXT4_GROUP_INFO_LOCKED_BIT    1
+ #define EXT4_MB_GRP_NEED_INIT(grp)    \
+       (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+-static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
++static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
++                                            ext4_group_t group)
+ {
+-      struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
++      struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
++      return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
++}
+-      bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
++static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
++{
++      spin_lock(ext4_group_lock_ptr(sb, group));
+ }
+ static inline void ext4_unlock_group(struct super_block *sb,
+                                       ext4_group_t group)
+ {
+-      struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+-
+-      bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
++      spin_unlock(ext4_group_lock_ptr(sb, group));
+ }
+ static inline int ext4_is_group_locked(struct super_block *sb,
+                                       ext4_group_t group)
+ {
+-      struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+-
+-      return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
+-                                              &(grinfo->bb_state));
++      return spin_is_locked(ext4_group_lock_ptr(sb, group));
+ }
+ /*
+Index: linux-stage/fs/ext4/ialloc.c
+===================================================================
+--- linux-stage.orig/fs/ext4/ialloc.c
++++ linux-stage/fs/ext4/ialloc.c
+@@ -123,16 +123,16 @@ ext4_read_inode_bitmap(struct super_bloc
+               unlock_buffer(bh);
+               return bh;
+       }
+-      spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      ext4_lock_group(sb, block_group);
+       if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+               ext4_init_inode_bitmap(sb, bh, block_group, desc);
+               set_bitmap_uptodate(bh);
+               set_buffer_uptodate(bh);
+-              spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++              ext4_unlock_group(sb, block_group);
+               unlock_buffer(bh);
+               return bh;
+       }
+-      spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
++      ext4_unlock_group(sb, block_group);
+       if (buffer_uptodate(bh)) {
+               /*
+                * if not uninit if bh is uptodate,
+@@ -244,8 +244,8 @@ void ext4_free_inode(handle_t *handle, s
+               goto error_return;
+       /* Ok, now we can actually update the inode bitmaps.. */
+-      if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
+-                                      bit, bitmap_bh->b_data))
++      if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
++                                 bit, bitmap_bh->b_data))
+               ext4_error(sb, "ext4_free_inode",
+                          "bit already cleared for inode %lu", ino);
+       else {
+@@ -256,7 +256,7 @@ void ext4_free_inode(handle_t *handle, s
+               if (fatal) goto error_return;
+               if (gdp) {
+-                      spin_lock(sb_bgl_lock(sbi, block_group));
++                      ext4_lock_group(sb, block_group);
+                       count = ext4_free_inodes_count(sb, gdp) + 1;
+                       ext4_free_inodes_set(sb, gdp, count);
+                       if (is_directory) {
+@@ -265,16 +265,16 @@ void ext4_free_inode(handle_t *handle, s
+                       }
+                       gdp->bg_checksum = ext4_group_desc_csum(sbi,
+                                                       block_group, gdp);
+-                      spin_unlock(sb_bgl_lock(sbi, block_group));
++                      ext4_unlock_group(sb, block_group);
+                       percpu_counter_inc(&sbi->s_freeinodes_counter);
+                       if (is_directory)
+                               percpu_counter_dec(&sbi->s_dirs_counter);
+                       if (sbi->s_log_groups_per_flex) {
+                               flex_group = ext4_flex_group(sbi, block_group);
+-                              spin_lock(sb_bgl_lock(sbi, flex_group));
++                              ext4_lock_group(sb, flex_group);
+                               sbi->s_flex_groups[flex_group].free_inodes++;
+-                              spin_unlock(sb_bgl_lock(sbi, flex_group));
++                              ext4_unlock_group(sb, flex_group);
+                       }
+               }
+               BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
+@@ -595,10 +595,10 @@ static int find_group_other(struct super
+ /*
+  * claim the inode from the inode bitmap. If the group
+- * is uninit we need to take the groups's sb_bgl_lock
++ * is uninit we need to take the groups's ext4_group_lock
+  * and clear the uninit flag. The inode bitmap update
+  * and group desc uninit flag clear should be done
+- * after holding sb_bgl_lock so that ext4_read_inode_bitmap
++ * after holding ext4_group_lock so that ext4_read_inode_bitmap
+  * doesn't race with the ext4_claim_inode
+  */
+ static int ext4_claim_inode(struct super_block *sb,
+@@ -609,7 +609,7 @@ static int ext4_claim_inode(struct super
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+       struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
+-      spin_lock(sb_bgl_lock(sbi, group));
++      ext4_lock_group(sb, group);
+       if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
+               /* not a free inode */
+               retval = 1;
+@@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super
+       ino++;
+       if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
+                       ino > EXT4_INODES_PER_GROUP(sb)) {
+-              spin_unlock(sb_bgl_lock(sbi, group));
++              ext4_unlock_group(sb, group);
+               ext4_error(sb, __func__,
+                          "reserved inode or inode > inodes count - "
+                          "block_group = %u, inode=%lu", group,
+@@ -662,7 +662,7 @@ static int ext4_claim_inode(struct super
+       }
+       gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
+ err_ret:
+-      spin_unlock(sb_bgl_lock(sbi, group));
++      ext4_unlock_group(sb, group);
+       return retval;
+ }
+@@ -854,7 +854,7 @@ got:
+               }
+               free = 0;
+-              spin_lock(sb_bgl_lock(sbi, group));
++              ext4_lock_group(sb, group);
+               /* recheck and clear flag under lock if we still need to */
+               if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+                       free = ext4_free_blocks_after_init(sb, group, gdp);
+@@ -863,7 +863,7 @@ got:
+                       gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
+                                                               gdp);
+               }
+-              spin_unlock(sb_bgl_lock(sbi, group));
++              ext4_unlock_group(sb, group);
+               /* Don't need to dirty bitmap block if we didn't change it */
+               if (free) {
+@@ -888,9 +888,9 @@ got:
+       if (sbi->s_log_groups_per_flex) {
+               flex_group = ext4_flex_group(sbi, group);
+-              spin_lock(sb_bgl_lock(sbi, flex_group));
++              ext4_lock_group(sb, flex_group);
+               sbi->s_flex_groups[flex_group].free_inodes--;
+-              spin_unlock(sb_bgl_lock(sbi, flex_group));
++              ext4_unlock_group(sb, flex_group);
+       }
+       inode->i_uid = current_fsuid();
+Index: linux-stage/fs/ext4/mballoc.c
+===================================================================
+--- linux-stage.orig/fs/ext4/mballoc.c
++++ linux-stage/fs/ext4/mballoc.c
+@@ -375,24 +375,12 @@ static inline void mb_set_bit(int bit, v
+       ext4_set_bit(bit, addr);
+ }
+-static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
+-{
+-      addr = mb_correct_addr_and_bit(&bit, addr);
+-      ext4_set_bit_atomic(lock, bit, addr);
+-}
+-
+ static inline void mb_clear_bit(int bit, void *addr)
+ {
+       addr = mb_correct_addr_and_bit(&bit, addr);
+       ext4_clear_bit(bit, addr);
+ }
+-static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
+-{
+-      addr = mb_correct_addr_and_bit(&bit, addr);
+-      ext4_clear_bit_atomic(lock, bit, addr);
+-}
+-
+ static inline int mb_find_next_zero_bit(void *addr, int max, int start)
+ {
+       int fix = 0, ret, tmpmax;
+@@ -805,17 +793,17 @@ static int ext4_mb_init_cache(struct pag
+                       unlock_buffer(bh[i]);
+                       continue;
+               }
+-              spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
++              ext4_lock_group(sb, first_group + i);
+               if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+                       ext4_init_block_bitmap(sb, bh[i],
+                                               first_group + i, desc);
+                       set_bitmap_uptodate(bh[i]);
+                       set_buffer_uptodate(bh[i]);
+-                      spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
++                      ext4_unlock_group(sb, first_group + i);
+                       unlock_buffer(bh[i]);
+                       continue;
+               }
+-              spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
++              ext4_unlock_group(sb, first_group + i);
+               if (buffer_uptodate(bh[i])) {
+                       /*
+                        * if not uninit if bh is uptodate,
+@@ -1087,7 +1075,7 @@ static int mb_find_order_for_block(struc
+       return 0;
+ }
+-static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
++static void mb_clear_bits(void *bm, int cur, int len)
+ {
+       __u32 *addr;
+@@ -1100,15 +1088,12 @@ static void mb_clear_bits(spinlock_t *lo
+                       cur += 32;
+                       continue;
+               }
+-              if (lock)
+-                      mb_clear_bit_atomic(lock, cur, bm);
+-              else
+-                      mb_clear_bit(cur, bm);
++              mb_clear_bit(cur, bm);
+               cur++;
+       }
+ }
+-static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
++static void mb_set_bits(void *bm, int cur, int len)
+ {
+       __u32 *addr;
+@@ -1121,10 +1106,7 @@ static void mb_set_bits(spinlock_t *lock
+                       cur += 32;
+                       continue;
+               }
+-              if (lock)
+-                      mb_set_bit_atomic(lock, cur, bm);
+-              else
+-                      mb_set_bit(cur, bm);
++              mb_set_bit(cur, bm);
+               cur++;
+       }
+ }
+@@ -1339,8 +1321,7 @@ static int mb_mark_used(struct ext4_budd
+               e4b->bd_info->bb_counters[ord]++;
+       }
+-      mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
+-                      EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
++      mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
+       mb_check_buddy(e4b);
+       return ret;
+@@ -2841,7 +2822,7 @@ int ext4_mb_init(struct super_block *sb,
+       return 0;
+ }
+-/* need to called with ext4 group lock (ext4_lock_group) */
++/* need to called with the ext4 group lock held */
+ static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
+ {
+       struct ext4_prealloc_space *pa;
+@@ -3240,14 +3221,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
+                * Fix the bitmap and repeat the block allocation
+                * We leak some of the blocks here.
+                */
+-              mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
+-                              bitmap_bh->b_data, ac->ac_b_ex.fe_start,
+-                              ac->ac_b_ex.fe_len);
++              ext4_lock_group(sb, ac->ac_b_ex.fe_group);
++              mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
++                          ac->ac_b_ex.fe_len);
++              ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+               err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+               if (!err)
+                       err = -EAGAIN;
+               goto out_err;
+       }
++
++      ext4_lock_group(sb, ac->ac_b_ex.fe_group);
+ #ifdef AGGRESSIVE_CHECK
+       {
+               int i;
+@@ -3257,9 +3241,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
+               }
+       }
+ #endif
+-      spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
+-      mb_set_bits(NULL, bitmap_bh->b_data,
+-                              ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
++      mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
+       if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+               gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
+               ext4_free_blks_set(sb, gdp,
+@@ -3269,7 +3251,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
+       len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
+       ext4_free_blks_set(sb, gdp, len);
+       gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
+-      spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
++
++      ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
+       percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
+       /*
+        * Now reduce the dirty block count also. Should not go negative
+@@ -3284,9 +3267,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi,
+                                                         ac->ac_b_ex.fe_group);
+-              spin_lock(sb_bgl_lock(sbi, flex_group));
++              ext4_lock_group(sb, flex_group);
+               sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
+-              spin_unlock(sb_bgl_lock(sbi, flex_group));
++              ext4_unlock_group(sb, flex_group);
+       }
+       err = ext4_journal_dirty_metadata(handle, bitmap_bh);
+@@ -3686,7 +3669,7 @@ ext4_mb_use_preallocated(struct ext4_all
+  * the function goes through all block freed in the group
+  * but not yet committed and marks them used in in-core bitmap.
+  * buddy must be generated from this bitmap
+- * Need to be called with ext4 group lock (ext4_lock_group)
++ * Need to be called with the ext4 group lock held
+  */
+ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
+                                               ext4_group_t group)
+@@ -3700,9 +3683,7 @@ static void ext4_mb_generate_from_freeli
+       while (n) {
+               entry = rb_entry(n, struct ext4_free_data, node);
+-              mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
+-                              bitmap, entry->start_blk,
+-                              entry->count);
++              mb_set_bits(bitmap, entry->start_blk, entry->count);
+               n = rb_next(n);
+       }
+       return;
+@@ -3744,7 +3725,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
+ /*
+  * the function goes through all preallocation in this group and marks them
+  * used in in-core bitmap. buddy must be generated from this bitmap
+- * Need to be called with ext4 group lock (ext4_lock_group)
++ * Need to be called with ext4 group lock held.
+  */
+ static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group)
+@@ -3790,8 +3771,7 @@ static int ext4_mb_generate_from_pa(stru
+                       continue;
+               }
+               BUG_ON(groupnr != group);
+-              mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
+-                                              bitmap, start, len);
++              mb_set_bits(bitmap, start, len);
+               preallocated += len;
+               count++;
+       }
+@@ -5124,36 +5104,32 @@ do_more:
+               new_entry->group  = block_group;
+               new_entry->count = count;
+               new_entry->t_tid = handle->h_transaction->t_tid;
++
+               ext4_lock_group(sb, block_group);
+-              mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+-                              bit, count);
++              mb_clear_bits(bitmap_bh->b_data, bit, count);
+               ext4_mb_free_metadata(handle, &e4b, new_entry);
+-              ext4_unlock_group(sb, block_group);
+       } else {
+-              ext4_lock_group(sb, block_group);
+               /* need to update group_info->bb_free and bitmap
+                * with group lock held. generate_buddy look at
+                * them with group lock_held
+                */
+-              mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
+-                              bit, count);
++              ext4_lock_group(sb, block_group);
++              mb_clear_bits(bitmap_bh->b_data, bit, count);
+               mb_free_blocks(inode, &e4b, bit, count);
+               ext4_mb_return_to_preallocation(inode, &e4b, block, count);
+-              ext4_unlock_group(sb, block_group);
+       }
+-      spin_lock(sb_bgl_lock(sbi, block_group));
+       ret = ext4_free_blks_count(sb, gdp) + count;
+       ext4_free_blks_set(sb, gdp, ret);
+       gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
+-      spin_unlock(sb_bgl_lock(sbi, block_group));
++      ext4_unlock_group(sb, block_group);
+       percpu_counter_add(&sbi->s_freeblocks_counter, count);
+       if (sbi->s_log_groups_per_flex) {
+               ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
+-              spin_lock(sb_bgl_lock(sbi, flex_group));
++              ext4_lock_group(sb, flex_group);
+               sbi->s_flex_groups[flex_group].free_blocks += count;
+-              spin_unlock(sb_bgl_lock(sbi, flex_group));
++              ext4_unlock_group(sb, flex_group);
+       }
+       ext4_mb_release_desc(&e4b);
+Index: linux-stage/fs/ext4/super.c
+===================================================================
+--- linux-stage.orig/fs/ext4/super.c
++++ linux-stage/fs/ext4/super.c
+@@ -2020,18 +2020,18 @@ static int ext4_check_descriptors(struct
+                              "(block %llu)!\n", i, inode_table);
+                       return 0;
+               }
+-              spin_lock(sb_bgl_lock(sbi, i));
++              ext4_lock_group(sb, i);
+               if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
+                       printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+                              "Checksum for group %u failed (%u!=%u)\n",
+                              i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
+                              gdp)), le16_to_cpu(gdp->bg_checksum));
+                       if (!(sb->s_flags & MS_RDONLY)) {
+-                              spin_unlock(sb_bgl_lock(sbi, i));
++                              ext4_unlock_group(sb, i);
+                               return 0;
+                       }
+               }
+-              spin_unlock(sb_bgl_lock(sbi, i));
++              ext4_unlock_group(sb, i);
+               if (!flexbg_flag)
+                       first_block += EXT4_BLOCKS_PER_GROUP(sb);
+       }
index be8d9f3..6fc1683 100644 (file)
@@ -150,13 +150,14 @@ Index: linux-2.6.27.21-0.1/fs/ext4/mballoc.c
        if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
                if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
                                ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
-@@ -3689,22 +3703,66 @@ static void ext4_mb_generate_from_freeli
+@@ -3695,22 +3709,67 @@ static void ext4_mb_generate_from_freeli
  }
  
  /*
 + * check free blocks in bitmap match free block in group descriptor
 + * do this before taking preallocated blocks into account to be able
-+ * to detect on-disk corruptions
++ * to detect on-disk corruptions. The group lock should be hold by the
++ * caller.
 + */
 +int ext4_mb_check_ondisk_bitmap(struct super_block *sb, void *bitmap,
 +                              struct ext4_group_desc *gdp, int group)
@@ -168,7 +169,7 @@ Index: linux-2.6.27.21-0.1/fs/ext4/mballoc.c
 +
 +      while (i < max) {
 +              first = i;
-+              i = find_next_bit(bitmap, max, i);
++              i = mb_find_next_bit(bitmap, max, i);
 +              if (i > max)
 +                      i = max;
 +              free += i - first;
index f3f5596..fdce94b 100644 (file)
@@ -23,3 +23,4 @@ ext4-alloc-policy-2.6-sles11.patch
 ext4-disable-delalloc-sles11.patch
 ext4-lustre-i_version.patch
 ext4-lock-cached_extent.patch
+ext4-convert-group-lock-sles11.patch