1 Index: linux-2.6.18-128.1.6/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-2.6.18-128.1.6.orig/fs/ext4/balloc.c
4 +++ linux-2.6.18-128.1.6/fs/ext4/balloc.c
5 @@ -321,15 +321,15 @@ ext4_read_block_bitmap(struct super_bloc
6 if (bh_uptodate_or_lock(bh))
9 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 + ext4_lock_group(sb, block_group);
11 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12 ext4_init_block_bitmap(sb, bh, block_group, desc);
13 set_buffer_uptodate(bh);
15 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 + ext4_unlock_group(sb, block_group);
19 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
20 + ext4_unlock_group(sb, block_group);
21 if (bh_submit_read(bh) < 0) {
23 ext4_error(sb, __func__,
24 @@ -778,8 +778,9 @@ do_more:
25 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
26 J_ASSERT_BH(bitmap_bh,
27 bh2jh(bitmap_bh)->b_committed_data != NULL);
28 - ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
29 - bh2jh(bitmap_bh)->b_committed_data);
30 + ext4_set_bit_atomic(ext4_group_lock_ptr(sb, block_group),
32 + bh2jh(bitmap_bh)->b_committed_data);
35 * We clear the bit in the bitmap after setting the committed
36 @@ -787,7 +788,7 @@ do_more:
39 BUFFER_TRACE(bitmap_bh, "clear bit");
40 - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
41 + if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
42 bit + i, bitmap_bh->b_data)) {
43 jbd_unlock_bh_state(bitmap_bh);
44 ext4_error(sb, __func__,
45 @@ -801,17 +802,17 @@ do_more:
47 jbd_unlock_bh_state(bitmap_bh);
49 - spin_lock(sb_bgl_lock(sbi, block_group));
50 + ext4_lock_group(sb, block_group);
51 le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
52 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
53 - spin_unlock(sb_bgl_lock(sbi, block_group));
54 + ext4_unlock_group(sb, block_group);
55 percpu_counter_add(&sbi->s_freeblocks_counter, count);
57 if (sbi->s_log_groups_per_flex) {
58 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
59 - spin_lock(sb_bgl_lock(sbi, flex_group));
60 + ext4_lock_group(sb, block_group);
61 sbi->s_flex_groups[flex_group].free_blocks += count;
62 - spin_unlock(sb_bgl_lock(sbi, flex_group));
63 + ext4_unlock_group(sb, block_group);
66 /* We dirtied the bitmap block */
67 @@ -1104,7 +1105,7 @@ repeat:
71 - if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
72 + if (!claim_block(ext4_group_lock_ptr(sb, group),
73 grp_goal, bitmap_bh)) {
75 * The block was allocated by another thread, or it was
76 @@ -1120,7 +1121,7 @@ repeat:
78 while (num < *count && grp_goal < end
79 && ext4_test_allocatable(grp_goal, bitmap_bh)
80 - && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
81 + && claim_block(ext4_group_lock_ptr(sb, group),
82 grp_goal, bitmap_bh)) {
85 @@ -1872,7 +1873,7 @@ allocated:
88 jbd_lock_bh_state(bitmap_bh);
89 - spin_lock(sb_bgl_lock(sbi, group_no));
90 + ext4_lock_group(sb, group_no);
91 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
94 @@ -1885,7 +1886,7 @@ allocated:
97 ext4_debug("found bit %d\n", grp_alloc_blk);
98 - spin_unlock(sb_bgl_lock(sbi, group_no));
99 + ext4_unlock_group(sb, group_no);
100 jbd_unlock_bh_state(bitmap_bh);
103 @@ -1902,19 +1903,19 @@ allocated:
104 * list of some description. We don't know in advance whether
105 * the caller wants to use it as metadata or data.
107 - spin_lock(sb_bgl_lock(sbi, group_no));
108 + ext4_lock_group(sb, group_no);
109 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
110 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
111 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
112 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
113 - spin_unlock(sb_bgl_lock(sbi, group_no));
114 + ext4_unlock_group(sb, group_no);
115 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
117 if (sbi->s_log_groups_per_flex) {
118 ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
119 - spin_lock(sb_bgl_lock(sbi, flex_group));
120 + ext4_lock_group(sb, flex_group);
121 sbi->s_flex_groups[flex_group].free_blocks -= num;
122 - spin_unlock(sb_bgl_lock(sbi, flex_group));
123 + ext4_unlock_group(sb, flex_group);
126 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
127 Index: linux-2.6.18-128.1.6/fs/ext4/ialloc.c
128 ===================================================================
129 --- linux-2.6.18-128.1.6.orig/fs/ext4/ialloc.c
130 +++ linux-2.6.18-128.1.6/fs/ext4/ialloc.c
131 @@ -118,15 +118,15 @@ ext4_read_inode_bitmap(struct super_bloc
132 if (bh_uptodate_or_lock(bh))
135 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
136 + ext4_lock_group(sb, block_group);
137 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
138 ext4_init_inode_bitmap(sb, bh, block_group, desc);
139 set_buffer_uptodate(bh);
141 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
142 + ext4_unlock_group(sb, block_group);
145 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
146 + ext4_unlock_group(sb, block_group);
147 if (bh_submit_read(bh) < 0) {
149 ext4_error(sb, __func__,
150 @@ -221,8 +221,8 @@ void ext4_free_inode (handle_t *handle,
153 /* Ok, now we can actually update the inode bitmaps.. */
154 - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
155 - bit, bitmap_bh->b_data))
156 + if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
157 + bit, bitmap_bh->b_data))
158 ext4_error (sb, "ext4_free_inode",
159 "bit already cleared for inode %lu", ino);
161 @@ -233,22 +233,22 @@ void ext4_free_inode (handle_t *handle,
162 if (fatal) goto error_return;
165 - spin_lock(sb_bgl_lock(sbi, block_group));
166 + ext4_lock_group(sb, block_group);
167 le16_add_cpu(&gdp->bg_free_inodes_count, 1);
169 le16_add_cpu(&gdp->bg_used_dirs_count, -1);
170 gdp->bg_checksum = ext4_group_desc_csum(sbi,
172 - spin_unlock(sb_bgl_lock(sbi, block_group));
173 + ext4_unlock_group(sb, block_group);
174 percpu_counter_inc(&sbi->s_freeinodes_counter);
176 percpu_counter_dec(&sbi->s_dirs_counter);
178 if (sbi->s_log_groups_per_flex) {
179 flex_group = ext4_flex_group(sbi, block_group);
180 - spin_lock(sb_bgl_lock(sbi, flex_group));
181 + ext4_lock_group(sb, flex_group);
182 sbi->s_flex_groups[flex_group].free_inodes++;
183 - spin_unlock(sb_bgl_lock(sbi, flex_group));
184 + ext4_unlock_group(sb, flex_group);
187 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
188 @@ -630,7 +630,7 @@ struct inode *ext4_new_inode(handle_t *h
192 - if (ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
193 + if (ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
194 ino, bitmap_bh->b_data)) {
195 printk(KERN_ERR "goal inode %lu unavailable\n", goal);
196 /* Oh well, we tried. */
197 @@ -691,7 +691,7 @@ repeat_in_this_group:
201 - if (!ext4_set_bit_atomic(sb_bgl_lock(sbi, group),
202 + if (!ext4_set_bit_atomic(ext4_group_lock_ptr(sb, group),
203 ino, bitmap_bh->b_data)) {
205 BUFFER_TRACE(bitmap_bh,
206 @@ -751,14 +751,14 @@ got:
210 - spin_lock(sb_bgl_lock(sbi, group));
211 + ext4_lock_group(sb, group);
212 /* recheck and clear flag under lock if we still need to */
213 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
214 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
215 free = ext4_free_blocks_after_init(sb, group, gdp);
216 gdp->bg_free_blocks_count = cpu_to_le16(free);
218 - spin_unlock(sb_bgl_lock(sbi, group));
219 + ext4_unlock_group(sb, group);
221 /* Don't need to dirty bitmap block if we didn't change it */
223 @@ -771,7 +771,7 @@ got:
227 - spin_lock(sb_bgl_lock(sbi, group));
228 + ext4_lock_group(sb, group);
229 /* If we didn't allocate from within the initialized part of the inode
230 * table then we need to initialize up to this inode. */
231 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
232 @@ -807,7 +807,7 @@ got:
233 le16_add_cpu(&gdp->bg_used_dirs_count, 1);
235 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
236 - spin_unlock(sb_bgl_lock(sbi, group));
237 + ext4_unlock_group(sb, group);
238 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
239 err = ext4_journal_dirty_metadata(handle, bh2);
241 @@ -819,9 +819,9 @@ got:
243 if (sbi->s_log_groups_per_flex) {
244 flex_group = ext4_flex_group(sbi, group);
245 - spin_lock(sb_bgl_lock(sbi, flex_group));
246 + ext4_lock_group(sb, flex_group);
247 sbi->s_flex_groups[flex_group].free_inodes--;
248 - spin_unlock(sb_bgl_lock(sbi, flex_group));
249 + ext4_unlock_group(sb, flex_group);
252 inode->i_uid = current->fsuid;
253 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.c
254 ===================================================================
255 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.c
256 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.c
257 @@ -361,24 +361,12 @@ static inline void mb_set_bit(int bit, v
258 ext4_set_bit(bit, addr);
261 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
263 - addr = mb_correct_addr_and_bit(&bit, addr);
264 - ext4_set_bit_atomic(lock, bit, addr);
267 static inline void mb_clear_bit(int bit, void *addr)
269 addr = mb_correct_addr_and_bit(&bit, addr);
270 ext4_clear_bit(bit, addr);
273 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
275 - addr = mb_correct_addr_and_bit(&bit, addr);
276 - ext4_clear_bit_atomic(lock, bit, addr);
279 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
281 int fix = 0, ret, tmpmax;
282 @@ -789,16 +777,16 @@ static int ext4_mb_init_cache(struct pag
283 if (bh_uptodate_or_lock(bh[i]))
286 - spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
287 + ext4_lock_group(sb, first_group + i);
288 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
289 ext4_init_block_bitmap(sb, bh[i],
290 first_group + i, desc);
291 set_buffer_uptodate(bh[i]);
292 unlock_buffer(bh[i]);
293 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
294 + ext4_unlock_group(sb, first_group + i);
297 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
298 + ext4_unlock_group(sb, first_group + i);
300 bh[i]->b_end_io = end_buffer_read_sync;
301 submit_bh(READ, bh[i]);
302 @@ -1021,7 +1009,7 @@ static int mb_find_order_for_block(struc
306 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
307 +static void mb_clear_bits(void *bm, int cur, int len)
311 @@ -1034,12 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
315 - mb_clear_bit_atomic(lock, cur, bm);
316 + mb_clear_bit(cur, bm);
321 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
322 +static void mb_set_bits(void *bm, int cur, int len)
326 @@ -1052,7 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
330 - mb_set_bit_atomic(lock, cur, bm);
331 + mb_set_bit(cur, bm);
335 @@ -1268,8 +1256,7 @@ static int mb_mark_used(struct ext4_budd
336 e4b->bd_info->bb_counters[ord]++;
339 - mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
340 - EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
341 + mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
345 @@ -2651,7 +2638,7 @@ int ext4_mb_init(struct super_block *sb,
349 -/* need to called with ext4 group lock (ext4_lock_group) */
350 +/* need to called with the ext4 group lock held */
351 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
353 struct ext4_prealloc_space *pa;
354 @@ -3130,14 +3117,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
355 * Fix the bitmap and repeat the block allocation
356 * We leak some of the blocks here.
358 - mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
359 - bitmap_bh->b_data, ac->ac_b_ex.fe_start,
360 - ac->ac_b_ex.fe_len);
361 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
362 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
363 + ac->ac_b_ex.fe_len);
364 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
365 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
371 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
372 #ifdef AGGRESSIVE_CHECK
375 @@ -3147,10 +3137,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
379 - mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
380 - ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
381 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
383 - spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
384 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
385 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
386 gdp->bg_free_blocks_count =
387 @@ -3160,15 +3148,16 @@ ext4_mb_mark_diskspace_used(struct ext4_
389 le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
390 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
391 - spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
393 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
394 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
396 if (sbi->s_log_groups_per_flex) {
397 ext4_group_t flex_group = ext4_flex_group(sbi,
398 ac->ac_b_ex.fe_group);
399 - spin_lock(sb_bgl_lock(sbi, flex_group));
400 + ext4_lock_group(sb, flex_group);
401 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
402 - spin_unlock(sb_bgl_lock(sbi, flex_group));
403 + ext4_unlock_group(sb, flex_group);
406 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
407 @@ -3600,7 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
409 * the function goes through all preallocation in this group and marks them
410 * used in in-core bitmap. buddy must be generated from this bitmap
411 - * Need to be called with ext4 group lock (ext4_lock_group)
412 + * Need to be called with ext4 group lock held.
414 static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
416 @@ -3646,8 +3635,7 @@ static int ext4_mb_generate_from_pa(stru
419 BUG_ON(groupnr != group);
420 - mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
421 - bitmap, start, len);
422 + mb_set_bits(bitmap, start, len);
426 @@ -4742,6 +4730,7 @@ static void ext4_mb_poll_new_transaction
427 ext4_mb_free_committed_blocks(sb);
430 +/* need to be called with ldiskfs group lock held */
431 static noinline_for_stack int
432 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
433 ext4_group_t group, ext4_grpblk_t block, int count)
434 @@ -4755,7 +4744,6 @@ ext4_mb_free_metadata(handle_t *handle,
435 BUG_ON(e4b->bd_bitmap_page == NULL);
436 BUG_ON(e4b->bd_buddy_page == NULL);
438 - ext4_lock_group(sb, group);
439 for (i = 0; i < count; i++) {
441 if (md && db->bb_tid != handle->h_transaction->t_tid) {
442 @@ -4766,8 +4754,10 @@ ext4_mb_free_metadata(handle_t *handle,
444 ext4_unlock_group(sb, group);
445 md = kmalloc(sizeof(*md), GFP_NOFS);
448 + ext4_lock_group(sb, group);
454 @@ -4800,7 +4790,6 @@ ext4_mb_free_metadata(handle_t *handle,
455 db->bb_md_cur = NULL;
458 - ext4_unlock_group(sb, group);
462 @@ -4901,6 +4890,13 @@ do_more:
467 + ac->ac_b_ex.fe_group = block_group;
468 + ac->ac_b_ex.fe_start = bit;
469 + ac->ac_b_ex.fe_len = count;
470 + ext4_mb_store_history(ac);
473 err = ext4_mb_load_buddy(sb, block_group, &e4b);
476 @@ -4912,42 +4908,31 @@ do_more:
477 BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
480 - mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
483 + ext4_lock_group(sb, block_group);
484 + mb_clear_bits(bitmap_bh->b_data, bit, count);
485 /* We dirtied the bitmap block */
486 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
487 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
490 - ac->ac_b_ex.fe_group = block_group;
491 - ac->ac_b_ex.fe_start = bit;
492 - ac->ac_b_ex.fe_len = count;
493 - ext4_mb_store_history(ac);
497 /* blocks being freed are metadata. these blocks shouldn't
498 * be used until this transaction is committed */
499 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
501 - ext4_lock_group(sb, block_group);
502 mb_free_blocks(inode, &e4b, bit, count);
503 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
504 - ext4_unlock_group(sb, block_group);
507 - spin_lock(sb_bgl_lock(sbi, block_group));
508 le16_add_cpu(&gdp->bg_free_blocks_count, count);
509 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
510 - spin_unlock(sb_bgl_lock(sbi, block_group));
511 + ext4_unlock_group(sb, block_group);
512 percpu_counter_add(&sbi->s_freeblocks_counter, count);
514 if (sbi->s_log_groups_per_flex) {
515 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
516 - spin_lock(sb_bgl_lock(sbi, flex_group));
517 + ext4_lock_group(sb, flex_group);
518 sbi->s_flex_groups[flex_group].free_blocks += count;
519 - spin_unlock(sb_bgl_lock(sbi, flex_group));
520 + ext4_unlock_group(sb, flex_group);
523 ext4_mb_release_desc(&e4b);
524 Index: linux-2.6.18-128.1.6/fs/ext4/super.c
525 ===================================================================
526 --- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
527 +++ linux-2.6.18-128.1.6/fs/ext4/super.c
528 @@ -1934,16 +1934,18 @@ static int ext4_check_descriptors(struct
529 "(block %llu)!", i, inode_table);
532 - spin_lock(sb_bgl_lock(sbi, i));
533 + ext4_lock_group(sb, i);
534 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
535 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
536 "Checksum for group %lu failed (%u!=%u)\n",
537 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
538 gdp)), le16_to_cpu(gdp->bg_checksum));
539 - if (!(sb->s_flags & MS_RDONLY))
540 + if (!(sb->s_flags & MS_RDONLY)) {
541 + ext4_unlock_group(sb, i);
545 - spin_unlock(sb_bgl_lock(sbi, i));
546 + ext4_unlock_group(sb, i);
548 first_block += EXT4_BLOCKS_PER_GROUP(sb);
550 Index: linux-2.6.18-128.1.6/fs/ext4/ext4.h
551 ===================================================================
552 --- linux-2.6.18-128.1.6.orig/fs/ext4/ext4.h
553 +++ linux-2.6.18-128.1.6/fs/ext4/ext4.h
554 @@ -1303,6 +1303,33 @@ extern int ext4_get_blocks_wrap(handle_t
555 sector_t block, unsigned long max_blocks,
556 struct buffer_head *bh, int create,
557 int extend_disksize);
559 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
560 + ext4_group_t group)
562 + struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
563 + return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
566 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
568 + spin_lock(ext4_group_lock_ptr(sb, group));
571 +static inline void ext4_unlock_group(struct super_block *sb,
572 + ext4_group_t group)
574 + spin_unlock(ext4_group_lock_ptr(sb, group));
577 +static inline int ext4_is_group_locked(struct super_block *sb,
578 + ext4_group_t group)
580 + return spin_is_locked(ext4_group_lock_ptr(sb, group));
585 #endif /* __KERNEL__ */
588 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.h
589 ===================================================================
590 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.h
591 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.h
592 @@ -127,7 +127,6 @@ struct ext4_group_info {
595 #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
596 -#define EXT4_GROUP_INFO_LOCKED_BIT 1
598 #define EXT4_MB_GRP_NEED_INIT(grp) \
599 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
600 @@ -272,31 +271,6 @@ static void ext4_mb_put_pa(struct ext4_a
601 static int ext4_mb_init_per_dev_proc(struct super_block *sb);
602 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
605 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
607 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
609 - bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
612 -static inline void ext4_unlock_group(struct super_block *sb,
613 - ext4_group_t group)
615 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
617 - bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
620 -static inline int ext4_is_group_locked(struct super_block *sb,
621 - ext4_group_t group)
623 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
625 - return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
626 - &(grinfo->bb_state));
629 static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
630 struct ext4_free_extent *fex)