Whamcloud - gitweb
b=20668
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-convert-group-lock-rhel5.patch
1 Index: linux-2.6.18-128.1.6/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-2.6.18-128.1.6.orig/fs/ext4/balloc.c
4 +++ linux-2.6.18-128.1.6/fs/ext4/balloc.c
5 @@ -321,15 +321,15 @@ ext4_read_block_bitmap(struct super_bloc
6         if (bh_uptodate_or_lock(bh))
7                 return bh;
8  
9 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 +       ext4_lock_group(sb, block_group);
11         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12                 ext4_init_block_bitmap(sb, bh, block_group, desc);
13                 set_buffer_uptodate(bh);
14                 unlock_buffer(bh);
15 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 +               ext4_unlock_group(sb, block_group);
17                 return bh;
18         }
19 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
20 +       ext4_unlock_group(sb, block_group);
21         if (bh_submit_read(bh) < 0) {
22                 put_bh(bh);
23                 ext4_error(sb, __func__,
24 @@ -778,8 +778,9 @@ do_more:
25                 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
26                 J_ASSERT_BH(bitmap_bh,
27                                 bh2jh(bitmap_bh)->b_committed_data != NULL);
28 -               ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
29 -                               bh2jh(bitmap_bh)->b_committed_data);
30 +               ext4_set_bit_atomic(ext4_group_lock_ptr(sb, block_group),
31 +                                   bit + i,
32 +                                   bh2jh(bitmap_bh)->b_committed_data);
33  
34                 /*
35                  * We clear the bit in the bitmap after setting the committed
36 @@ -787,7 +788,7 @@ do_more:
37                  * the allocator uses.
38                  */
39                 BUFFER_TRACE(bitmap_bh, "clear bit");
40 -               if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
41 +               if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
42                                                 bit + i, bitmap_bh->b_data)) {
43                         jbd_unlock_bh_state(bitmap_bh);
44                         ext4_error(sb, __func__,
45 @@ -801,17 +802,17 @@ do_more:
46         }
47         jbd_unlock_bh_state(bitmap_bh);
48  
49 -       spin_lock(sb_bgl_lock(sbi, block_group));
50 +       ext4_lock_group(sb, block_group);
51         le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
52         desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
53 -       spin_unlock(sb_bgl_lock(sbi, block_group));
54 +       ext4_unlock_group(sb, block_group);
55         percpu_counter_add(&sbi->s_freeblocks_counter, count);
56  
57         if (sbi->s_log_groups_per_flex) {
58                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
59 -               spin_lock(sb_bgl_lock(sbi, flex_group));
60 +               ext4_lock_group(sb, block_group);
61                 sbi->s_flex_groups[flex_group].free_blocks += count;
62 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
63 +               ext4_unlock_group(sb, block_group);
64         }
65  
66         /* We dirtied the bitmap block */
67 @@ -1104,7 +1105,7 @@ repeat:
68         }
69         start = grp_goal;
70  
71 -       if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
72 +       if (!claim_block(ext4_group_lock_ptr(sb, group),
73                 grp_goal, bitmap_bh)) {
74                 /*
75                  * The block was allocated by another thread, or it was
76 @@ -1120,7 +1121,7 @@ repeat:
77         grp_goal++;
78         while (num < *count && grp_goal < end
79                 && ext4_test_allocatable(grp_goal, bitmap_bh)
80 -               && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
81 +               && claim_block(ext4_group_lock_ptr(sb, group),
82                                 grp_goal, bitmap_bh)) {
83                 num++;
84                 grp_goal++;
85 @@ -1872,7 +1873,7 @@ allocated:
86                 }
87         }
88         jbd_lock_bh_state(bitmap_bh);
89 -       spin_lock(sb_bgl_lock(sbi, group_no));
90 +       ext4_lock_group(sb, group_no);
91         if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
92                 int i;
93  
94 @@ -1885,7 +1886,7 @@ allocated:
95                 }
96         }
97         ext4_debug("found bit %d\n", grp_alloc_blk);
98 -       spin_unlock(sb_bgl_lock(sbi, group_no));
99 +       ext4_unlock_group(sb, group_no);
100         jbd_unlock_bh_state(bitmap_bh);
101  #endif
102  
103 @@ -1902,19 +1903,19 @@ allocated:
104          * list of some description.  We don't know in advance whether
105          * the caller wants to use it as metadata or data.
106          */
107 -       spin_lock(sb_bgl_lock(sbi, group_no));
108 +       ext4_lock_group(sb, group_no);
109         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
110                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
111         le16_add_cpu(&gdp->bg_free_blocks_count, -num);
112         gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
113 -       spin_unlock(sb_bgl_lock(sbi, group_no));
114 +       ext4_unlock_group(sb, group_no);
115         percpu_counter_sub(&sbi->s_freeblocks_counter, num);
116  
117         if (sbi->s_log_groups_per_flex) {
118                 ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
119 -               spin_lock(sb_bgl_lock(sbi, flex_group));
120 +               ext4_lock_group(sb, flex_group);
121                 sbi->s_flex_groups[flex_group].free_blocks -= num;
122 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
123 +               ext4_unlock_group(sb, flex_group);
124         }
125  
126         BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
127 Index: linux-2.6.18-128.1.6/fs/ext4/ialloc.c
128 ===================================================================
129 --- linux-2.6.18-128.1.6.orig/fs/ext4/ialloc.c
130 +++ linux-2.6.18-128.1.6/fs/ext4/ialloc.c
131 @@ -118,15 +118,15 @@ ext4_read_inode_bitmap(struct super_bloc
132         if (bh_uptodate_or_lock(bh))
133                 return bh;
134  
135 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 +       ext4_lock_group(sb, block_group);
137         if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
138                 ext4_init_inode_bitmap(sb, bh, block_group, desc);
139                 set_buffer_uptodate(bh);
140                 unlock_buffer(bh);
141 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
142 +               ext4_unlock_group(sb, block_group);
143                 return bh;
144         }
145 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
146 +       ext4_unlock_group(sb, block_group);
147         if (bh_submit_read(bh) < 0) {
148                 put_bh(bh);
149                 ext4_error(sb, __func__,
150 @@ -221,8 +221,8 @@ void ext4_free_inode (handle_t *handle, 
151                 goto error_return;
152  
153         /* Ok, now we can actually update the inode bitmaps.. */
154 -       if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
155 -                                       bit, bitmap_bh->b_data))
156 +       if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
157 +                                  bit, bitmap_bh->b_data))
158                 ext4_error (sb, "ext4_free_inode",
159                               "bit already cleared for inode %lu", ino);
160         else {
161 @@ -233,22 +233,22 @@ void ext4_free_inode (handle_t *handle, 
162                 if (fatal) goto error_return;
163  
164                 if (gdp) {
165 -                       spin_lock(sb_bgl_lock(sbi, block_group));
166 +                       ext4_lock_group(sb, block_group);
167                         le16_add_cpu(&gdp->bg_free_inodes_count, 1);
168                         if (is_directory)
169                                 le16_add_cpu(&gdp->bg_used_dirs_count, -1);
170                         gdp->bg_checksum = ext4_group_desc_csum(sbi,
171                                                         block_group, gdp);
172 -                       spin_unlock(sb_bgl_lock(sbi, block_group));
173 +                       ext4_unlock_group(sb, block_group);
174                         percpu_counter_inc(&sbi->s_freeinodes_counter);
175                         if (is_directory)
176                                 percpu_counter_dec(&sbi->s_dirs_counter);
177  
178                         if (sbi->s_log_groups_per_flex) {
179                                 flex_group = ext4_flex_group(sbi, block_group);
180 -                               spin_lock(sb_bgl_lock(sbi, flex_group));
181 +                               ext4_lock_group(sb, flex_group);
182                                 sbi->s_flex_groups[flex_group].free_inodes++;
183 -                               spin_unlock(sb_bgl_lock(sbi, flex_group));
184 +                               ext4_unlock_group(sb, flex_group);
185                         }
186                 }
187                 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
188 @@ -630,7 +630,7 @@ struct inode *ext4_new_inode(handle_t *h
189                 if (err)
190                         goto fail;
191  
192 -               if (ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
193 +               if (ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
194                                         ino, bitmap_bh->b_data)) {
195                         printk(KERN_ERR "goal inode %lu unavailable\n", goal);
196                         /* Oh well, we tried. */
197 @@ -691,7 +691,7 @@ repeat_in_this_group:
198                         if (err)
199                                 goto fail;
200  
201 -                       if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
202 +                       if (!ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
203                                                 ino, bitmap_bh->b_data)) {
204                                 /* we won it */
205                                 BUFFER_TRACE(bitmap_bh,
206 @@ -751,14 +751,14 @@ got:
207                 }
208  
209                 free = 0;
210 -               spin_lock(sb_bgl_lock(sbi, group));
211 +               ext4_lock_group(sb, group);
212                 /* recheck and clear flag under lock if we still need to */
213                 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
214                         gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
215                         free = ext4_free_blocks_after_init(sb, group, gdp);
216                         gdp->bg_free_blocks_count = cpu_to_le16(free);
217                 }
218 -               spin_unlock(sb_bgl_lock(sbi, group));
219 +               ext4_unlock_group(sb, group);
220  
221                 /* Don't need to dirty bitmap block if we didn't change it */
222                 if (free) {
223 @@ -771,7 +771,7 @@ got:
224                         goto fail;
225         }
226  
227 -       spin_lock(sb_bgl_lock(sbi, group));
228 +       ext4_lock_group(sb, group);
229         /* If we didn't allocate from within the initialized part of the inode
230          * table then we need to initialize up to this inode. */
231         if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
232 @@ -807,7 +807,7 @@ got:
233                 le16_add_cpu(&gdp->bg_used_dirs_count, 1);
234         }
235         gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
236 -       spin_unlock(sb_bgl_lock(sbi, group));
237 +       ext4_unlock_group(sb, group);
238         BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
239         err = ext4_journal_dirty_metadata(handle, bh2);
240         if (err) goto fail;
241 @@ -819,9 +819,9 @@ got:
242  
243         if (sbi->s_log_groups_per_flex) {
244                 flex_group = ext4_flex_group(sbi, group);
245 -               spin_lock(sb_bgl_lock(sbi, flex_group));
246 +               ext4_lock_group(sb, flex_group);
247                 sbi->s_flex_groups[flex_group].free_inodes--;
248 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
249 +               ext4_unlock_group(sb, flex_group);
250         }
251  
252         inode->i_uid = current->fsuid;
253 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.c
254 ===================================================================
255 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.c
256 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.c
257 @@ -361,24 +361,12 @@ static inline void mb_set_bit(int bit, v
258         ext4_set_bit(bit, addr);
259  }
260  
261 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
262 -{
263 -       addr = mb_correct_addr_and_bit(&bit, addr);
264 -       ext4_set_bit_atomic(lock, bit, addr);
265 -}
266 -
267  static inline void mb_clear_bit(int bit, void *addr)
268  {
269         addr = mb_correct_addr_and_bit(&bit, addr);
270         ext4_clear_bit(bit, addr);
271  }
272  
273 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
274 -{
275 -       addr = mb_correct_addr_and_bit(&bit, addr);
276 -       ext4_clear_bit_atomic(lock, bit, addr);
277 -}
278 -
279  static inline int mb_find_next_zero_bit(void *addr, int max, int start)
280  {
281         int fix = 0, ret, tmpmax;
282 @@ -789,16 +777,16 @@ static int ext4_mb_init_cache(struct pag
283                 if (bh_uptodate_or_lock(bh[i]))
284                         continue;
285  
286 -               spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
287 +               ext4_lock_group(sb, first_group + i);
288                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
289                         ext4_init_block_bitmap(sb, bh[i],
290                                                 first_group + i, desc);
291                         set_buffer_uptodate(bh[i]);
292                         unlock_buffer(bh[i]);
293 -                       spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
294 +                       ext4_unlock_group(sb, first_group + i);
295                         continue;
296                 }
297 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
298 +               ext4_unlock_group(sb, first_group + i);
299                 get_bh(bh[i]);
300                 bh[i]->b_end_io = end_buffer_read_sync;
301                 submit_bh(READ, bh[i]);
302 @@ -1021,7 +1009,7 @@ static int mb_find_order_for_block(struc
303         return 0;
304  }
305  
306 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
307 +static void mb_clear_bits(void *bm, int cur, int len)
308  {
309         __u32 *addr;
310  
311 @@ -1034,12 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
312                         cur += 32;
313                         continue;
314                 }
315 -               mb_clear_bit_atomic(lock, cur, bm);
316 +               mb_clear_bit(cur, bm);
317                 cur++;
318         }
319  }
320  
321 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
322 +static void mb_set_bits(void *bm, int cur, int len)
323  {
324         __u32 *addr;
325  
326 @@ -1052,7 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
327                         cur += 32;
328                         continue;
329                 }
330 -               mb_set_bit_atomic(lock, cur, bm);
331 +               mb_set_bit(cur, bm);
332                 cur++;
333         }
334  }
335 @@ -1268,8 +1256,7 @@ static int mb_mark_used(struct ext4_budd
336                 e4b->bd_info->bb_counters[ord]++;
337         }
338  
339 -       mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
340 -                       EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
341 +       mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
342         mb_check_buddy(e4b);
343  
344         return ret;
345 @@ -2651,7 +2638,7 @@ int ext4_mb_init(struct super_block *sb,
346         return 0;
347  }
348  
349 -/* need to called with ext4 group lock (ext4_lock_group) */
350 +/* need to called with the ext4 group lock held */
351  static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
352  {
353         struct ext4_prealloc_space *pa;
354 @@ -3130,14 +3117,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
355                  * Fix the bitmap and repeat the block allocation
356                  * We leak some of the blocks here.
357                  */
358 -               mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
359 -                               bitmap_bh->b_data, ac->ac_b_ex.fe_start,
360 -                               ac->ac_b_ex.fe_len);
361 +               ext4_lock_group(sb, ac->ac_b_ex.fe_group);
362 +               mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
363 +                           ac->ac_b_ex.fe_len);
364 +               ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
365                 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
366                 if (!err)
367                         err = -EAGAIN;
368                 goto out_err;
369         }
370 +
371 +       ext4_lock_group(sb, ac->ac_b_ex.fe_group);
372  #ifdef AGGRESSIVE_CHECK
373         {
374                 int i;
375 @@ -3147,10 +3137,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
376                 }
377         }
378  #endif
379 -       mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
380 -                               ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
381 +       mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
382  
383 -       spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
384         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
385                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
386                 gdp->bg_free_blocks_count =
387 @@ -3160,15 +3148,16 @@ ext4_mb_mark_diskspace_used(struct ext4_
388         }
389         le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
390         gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
391 -       spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
392 +
393 +       ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
394         percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
395  
396         if (sbi->s_log_groups_per_flex) {
397                 ext4_group_t flex_group = ext4_flex_group(sbi,
398                                                           ac->ac_b_ex.fe_group);
399 -               spin_lock(sb_bgl_lock(sbi, flex_group));
400 +               ext4_lock_group(sb, flex_group);
401                 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
402 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
403 +               ext4_unlock_group(sb, flex_group);
404         }
405  
406         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
407 @@ -3600,7 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
408  /*
409   * the function goes through all preallocation in this group and marks them
410   * used in in-core bitmap. buddy must be generated from this bitmap
411 - * Need to be called with ext4 group lock (ext4_lock_group)
412 + * Need to be called with ext4 group lock held.
413   */
414  static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
415                                         ext4_group_t group)
416 @@ -3646,8 +3635,7 @@ static int ext4_mb_generate_from_pa(stru
417                         continue;
418                 }
419                 BUG_ON(groupnr != group);
420 -               mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
421 -                                               bitmap, start, len);
422 +               mb_set_bits(bitmap, start, len);
423                 preallocated += len;
424                 count++;
425         }
426 @@ -4742,6 +4730,7 @@ static void ext4_mb_poll_new_transaction
427         ext4_mb_free_committed_blocks(sb);
428  }
429  
430 +/* need to be called with ldiskfs group lock held */
431  static noinline_for_stack int
432  ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
433                           ext4_group_t group, ext4_grpblk_t block, int count)
434 @@ -4755,7 +4744,6 @@ ext4_mb_free_metadata(handle_t *handle, 
435         BUG_ON(e4b->bd_bitmap_page == NULL);
436         BUG_ON(e4b->bd_buddy_page == NULL);
437  
438 -       ext4_lock_group(sb, group);
439         for (i = 0; i < count; i++) {
440                 md = db->bb_md_cur;
441                 if (md && db->bb_tid != handle->h_transaction->t_tid) {
442 @@ -4766,8 +4754,10 @@ ext4_mb_free_metadata(handle_t *handle, 
443                 if (md == NULL) {
444                         ext4_unlock_group(sb, group);
445                         md = kmalloc(sizeof(*md), GFP_NOFS);
446 -                       if (md == NULL)
447 +                       if (md == NULL) {
448 +                               ext4_lock_group(sb, group);
449                                 return -ENOMEM;
450 +                       }
451                         md->num = 0;
452                         md->group = group;
453  
454 @@ -4800,7 +4790,6 @@ ext4_mb_free_metadata(handle_t *handle, 
455                         db->bb_md_cur = NULL;
456                 }
457         }
458 -       ext4_unlock_group(sb, group);
459         return 0;
460  }
461  
462 @@ -4901,6 +4890,13 @@ do_more:
463         if (err)
464                 goto error_return;
465  
466 +       if (ac) {
467 +               ac->ac_b_ex.fe_group = block_group;
468 +               ac->ac_b_ex.fe_start = bit;
469 +               ac->ac_b_ex.fe_len = count;
470 +               ext4_mb_store_history(ac);
471 +       }
472 +
473         err = ext4_mb_load_buddy(sb, block_group, &e4b);
474         if (err)
475                 goto error_return;
476 @@ -4912,42 +4908,31 @@ do_more:
477                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
478         }
479  #endif
480 -       mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
481 -                       bit, count);
482 -
483 +       ext4_lock_group(sb, block_group);
484 +       mb_clear_bits(bitmap_bh->b_data, bit, count);
485         /* We dirtied the bitmap block */
486         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
487         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
488  
489 -       if (ac) {
490 -               ac->ac_b_ex.fe_group = block_group;
491 -               ac->ac_b_ex.fe_start = bit;
492 -               ac->ac_b_ex.fe_len = count;
493 -               ext4_mb_store_history(ac);
494 -       }
495 -
496         if (metadata) {
497                 /* blocks being freed are metadata. these blocks shouldn't
498                  * be used until this transaction is committed */
499                 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
500         } else {
501 -               ext4_lock_group(sb, block_group);
502                 mb_free_blocks(inode, &e4b, bit, count);
503                 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
504 -               ext4_unlock_group(sb, block_group);
505         }
506  
507 -       spin_lock(sb_bgl_lock(sbi, block_group));
508         le16_add_cpu(&gdp->bg_free_blocks_count, count);
509         gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
510 -       spin_unlock(sb_bgl_lock(sbi, block_group));
511 +       ext4_unlock_group(sb, block_group);
512         percpu_counter_add(&sbi->s_freeblocks_counter, count);
513  
514         if (sbi->s_log_groups_per_flex) {
515                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
516 -               spin_lock(sb_bgl_lock(sbi, flex_group));
517 +               ext4_lock_group(sb, flex_group);
518                 sbi->s_flex_groups[flex_group].free_blocks += count;
519 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
520 +               ext4_unlock_group(sb, flex_group);
521         }
522  
523         ext4_mb_release_desc(&e4b);
524 Index: linux-2.6.18-128.1.6/fs/ext4/super.c
525 ===================================================================
526 --- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
527 +++ linux-2.6.18-128.1.6/fs/ext4/super.c
528 @@ -1934,16 +1934,18 @@ static int ext4_check_descriptors(struct
529                                "(block %llu)!", i, inode_table);
530                         return 0;
531                 }
532 -               spin_lock(sb_bgl_lock(sbi, i));
533 +               ext4_lock_group(sb, i);
534                 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
535                         printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
536                                "Checksum for group %lu failed (%u!=%u)\n",
537                                i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
538                                gdp)), le16_to_cpu(gdp->bg_checksum));
539 -                       if (!(sb->s_flags & MS_RDONLY))
540 +                       if (!(sb->s_flags & MS_RDONLY)) {
541 +                               ext4_unlock_group(sb, i);
542                                 return 0;
543 +                       }
544                 }
545 -               spin_unlock(sb_bgl_lock(sbi, i));
546 +               ext4_unlock_group(sb, i);
547                 if (!flexbg_flag)
548                         first_block += EXT4_BLOCKS_PER_GROUP(sb);
549         }
550 Index: linux-2.6.18-128.1.6/fs/ext4/ext4.h
551 ===================================================================
552 --- linux-2.6.18-128.1.6.orig/fs/ext4/ext4.h
553 +++ linux-2.6.18-128.1.6/fs/ext4/ext4.h
554 @@ -1303,6 +1303,33 @@ extern int ext4_get_blocks_wrap(handle_t
555                         sector_t block, unsigned long max_blocks,
556                         struct buffer_head *bh, int create,
557                         int extend_disksize);
558 +
559 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
560 +                                             ext4_group_t group)
561 +{
562 +       struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
563 +       return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
564 +}
565 +
566 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
567 +{
568 +       spin_lock(ext4_group_lock_ptr(sb, group));
569 +}
570 +
571 +static inline void ext4_unlock_group(struct super_block *sb,
572 +                                       ext4_group_t group)
573 +{
574 +       spin_unlock(ext4_group_lock_ptr(sb, group));
575 +}
576 +
577 +static inline int ext4_is_group_locked(struct super_block *sb,
578 +                                       ext4_group_t group)
579 +{
580 +       return spin_is_locked(ext4_group_lock_ptr(sb, group));
581 +}
582 +
583 +
584 +
585  #endif /* __KERNEL__ */
586  
587  #endif /* _EXT4_H */
588 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.h
589 ===================================================================
590 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.h
591 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.h
592 @@ -127,7 +127,6 @@ struct ext4_group_info {
593  };
594  
595  #define EXT4_GROUP_INFO_NEED_INIT_BIT  0
596 -#define EXT4_GROUP_INFO_LOCKED_BIT     1
597  
598  #define EXT4_MB_GRP_NEED_INIT(grp)     \
599         (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
600 @@ -272,31 +271,6 @@ static void ext4_mb_put_pa(struct ext4_a
601  static int ext4_mb_init_per_dev_proc(struct super_block *sb);
602  static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
603  
604 -
605 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
606 -{
607 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
608 -
609 -       bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
610 -}
611 -
612 -static inline void ext4_unlock_group(struct super_block *sb,
613 -                                       ext4_group_t group)
614 -{
615 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
616 -
617 -       bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
618 -}
619 -
620 -static inline int ext4_is_group_locked(struct super_block *sb,
621 -                                       ext4_group_t group)
622 -{
623 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
624 -
625 -       return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
626 -                                               &(grinfo->bb_state));
627 -}
628 -
629  static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
630                                         struct ext4_free_extent *fex)
631  {