1 Index: linux-stage/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-stage.orig/fs/ext4/balloc.c
4 +++ linux-stage/fs/ext4/balloc.c
5 @@ -329,16 +329,16 @@ ext4_read_block_bitmap(struct super_bloc
9 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 + ext4_lock_group(sb, block_group);
11 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12 ext4_init_block_bitmap(sb, bh, block_group, desc);
13 set_bitmap_uptodate(bh);
14 set_buffer_uptodate(bh);
15 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 + ext4_unlock_group(sb, block_group);
20 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
21 + ext4_unlock_group(sb, block_group);
22 if (buffer_uptodate(bh)) {
24 * if not uninit if bh is uptodate,
25 @@ -454,7 +454,7 @@ void ext4_add_groupblocks(handle_t *hand
26 down_write(&grp->alloc_sem);
27 for (i = 0, blocks_freed = 0; i < count; i++) {
28 BUFFER_TRACE(bitmap_bh, "clear bit");
29 - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
30 + if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
31 bit + i, bitmap_bh->b_data)) {
32 ext4_error(sb, __func__,
33 "bit already cleared for block %llu",
34 @@ -464,18 +464,18 @@ void ext4_add_groupblocks(handle_t *hand
38 - spin_lock(sb_bgl_lock(sbi, block_group));
39 + ext4_lock_group(sb, block_group);
40 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
41 ext4_free_blks_set(sb, desc, blk_free_count);
42 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
43 - spin_unlock(sb_bgl_lock(sbi, block_group));
44 + ext4_unlock_group(sb, block_group);
45 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
47 if (sbi->s_log_groups_per_flex) {
48 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
49 - spin_lock(sb_bgl_lock(sbi, flex_group));
50 + ext4_lock_group(sb, flex_group);
51 sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
52 - spin_unlock(sb_bgl_lock(sbi, flex_group));
53 + ext4_unlock_group(sb, flex_group);
56 * request to reload the buddy with the
57 Index: linux-stage/fs/ext4/ext4.h
58 ===================================================================
59 --- linux-stage.orig/fs/ext4/ext4.h
60 +++ linux-stage/fs/ext4/ext4.h
61 @@ -1342,33 +1342,32 @@ struct ext4_group_info {
64 #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
65 -#define EXT4_GROUP_INFO_LOCKED_BIT 1
67 #define EXT4_MB_GRP_NEED_INIT(grp) \
68 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
70 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
71 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
74 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
75 + struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
76 + return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
79 - bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
80 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
82 + spin_lock(ext4_group_lock_ptr(sb, group));
85 static inline void ext4_unlock_group(struct super_block *sb,
88 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
90 - bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
91 + spin_unlock(ext4_group_lock_ptr(sb, group));
94 static inline int ext4_is_group_locked(struct super_block *sb,
97 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
99 - return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
100 - &(grinfo->bb_state));
101 + return spin_is_locked(ext4_group_lock_ptr(sb, group));
105 Index: linux-stage/fs/ext4/ialloc.c
106 ===================================================================
107 --- linux-stage.orig/fs/ext4/ialloc.c
108 +++ linux-stage/fs/ext4/ialloc.c
109 @@ -123,16 +123,16 @@ ext4_read_inode_bitmap(struct super_bloc
113 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
114 + ext4_lock_group(sb, block_group);
115 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
116 ext4_init_inode_bitmap(sb, bh, block_group, desc);
117 set_bitmap_uptodate(bh);
118 set_buffer_uptodate(bh);
119 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
120 + ext4_unlock_group(sb, block_group);
124 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
125 + ext4_unlock_group(sb, block_group);
126 if (buffer_uptodate(bh)) {
128 * if not uninit if bh is uptodate,
129 @@ -244,8 +244,8 @@ void ext4_free_inode(handle_t *handle, s
132 /* Ok, now we can actually update the inode bitmaps.. */
133 - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
134 - bit, bitmap_bh->b_data))
135 + if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
136 + bit, bitmap_bh->b_data))
137 ext4_error(sb, "ext4_free_inode",
138 "bit already cleared for inode %lu", ino);
140 @@ -256,7 +256,7 @@ void ext4_free_inode(handle_t *handle, s
141 if (fatal) goto error_return;
144 - spin_lock(sb_bgl_lock(sbi, block_group));
145 + ext4_lock_group(sb, block_group);
146 count = ext4_free_inodes_count(sb, gdp) + 1;
147 ext4_free_inodes_set(sb, gdp, count);
149 @@ -265,16 +265,16 @@ void ext4_free_inode(handle_t *handle, s
151 gdp->bg_checksum = ext4_group_desc_csum(sbi,
153 - spin_unlock(sb_bgl_lock(sbi, block_group));
154 + ext4_unlock_group(sb, block_group);
155 percpu_counter_inc(&sbi->s_freeinodes_counter);
157 percpu_counter_dec(&sbi->s_dirs_counter);
159 if (sbi->s_log_groups_per_flex) {
160 flex_group = ext4_flex_group(sbi, block_group);
161 - spin_lock(sb_bgl_lock(sbi, flex_group));
162 + ext4_lock_group(sb, flex_group);
163 sbi->s_flex_groups[flex_group].free_inodes++;
164 - spin_unlock(sb_bgl_lock(sbi, flex_group));
165 + ext4_unlock_group(sb, flex_group);
168 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
169 @@ -595,10 +595,10 @@ static int find_group_other(struct super
172 * claim the inode from the inode bitmap. If the group
173 - * is uninit we need to take the groups's sb_bgl_lock
174 + * is uninit we need to take the groups's ext4_group_lock
175 * and clear the uninit flag. The inode bitmap update
176 * and group desc uninit flag clear should be done
177 - * after holding sb_bgl_lock so that ext4_read_inode_bitmap
178 + * after holding ext4_group_lock so that ext4_read_inode_bitmap
179 * doesn't race with the ext4_claim_inode
181 static int ext4_claim_inode(struct super_block *sb,
182 @@ -609,7 +609,7 @@ static int ext4_claim_inode(struct super
183 struct ext4_sb_info *sbi = EXT4_SB(sb);
184 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
186 - spin_lock(sb_bgl_lock(sbi, group));
187 + ext4_lock_group(sb, group);
188 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
189 /* not a free inode */
191 @@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super
193 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
194 ino > EXT4_INODES_PER_GROUP(sb)) {
195 - spin_unlock(sb_bgl_lock(sbi, group));
196 + ext4_unlock_group(sb, group);
197 ext4_error(sb, __func__,
198 "reserved inode or inode > inodes count - "
199 "block_group = %u, inode=%lu", group,
200 @@ -662,7 +662,7 @@ static int ext4_claim_inode(struct super
202 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
204 - spin_unlock(sb_bgl_lock(sbi, group));
205 + ext4_unlock_group(sb, group);
209 @@ -854,7 +854,7 @@ got:
213 - spin_lock(sb_bgl_lock(sbi, group));
214 + ext4_lock_group(sb, group);
215 /* recheck and clear flag under lock if we still need to */
216 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
217 free = ext4_free_blocks_after_init(sb, group, gdp);
218 @@ -863,7 +863,7 @@ got:
219 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
222 - spin_unlock(sb_bgl_lock(sbi, group));
223 + ext4_unlock_group(sb, group);
225 /* Don't need to dirty bitmap block if we didn't change it */
227 @@ -888,9 +888,9 @@ got:
229 if (sbi->s_log_groups_per_flex) {
230 flex_group = ext4_flex_group(sbi, group);
231 - spin_lock(sb_bgl_lock(sbi, flex_group));
232 + ext4_lock_group(sb, flex_group);
233 sbi->s_flex_groups[flex_group].free_inodes--;
234 - spin_unlock(sb_bgl_lock(sbi, flex_group));
235 + ext4_unlock_group(sb, flex_group);
238 inode->i_uid = current_fsuid();
239 Index: linux-stage/fs/ext4/mballoc.c
240 ===================================================================
241 --- linux-stage.orig/fs/ext4/mballoc.c
242 +++ linux-stage/fs/ext4/mballoc.c
243 @@ -375,24 +375,12 @@ static inline void mb_set_bit(int bit, v
244 ext4_set_bit(bit, addr);
247 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
249 - addr = mb_correct_addr_and_bit(&bit, addr);
250 - ext4_set_bit_atomic(lock, bit, addr);
253 static inline void mb_clear_bit(int bit, void *addr)
255 addr = mb_correct_addr_and_bit(&bit, addr);
256 ext4_clear_bit(bit, addr);
259 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
261 - addr = mb_correct_addr_and_bit(&bit, addr);
262 - ext4_clear_bit_atomic(lock, bit, addr);
265 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
267 int fix = 0, ret, tmpmax;
268 @@ -805,17 +793,17 @@ static int ext4_mb_init_cache(struct pag
269 unlock_buffer(bh[i]);
272 - spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
273 + ext4_lock_group(sb, first_group + i);
274 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
275 ext4_init_block_bitmap(sb, bh[i],
276 first_group + i, desc);
277 set_bitmap_uptodate(bh[i]);
278 set_buffer_uptodate(bh[i]);
279 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
280 + ext4_unlock_group(sb, first_group + i);
281 unlock_buffer(bh[i]);
284 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
285 + ext4_unlock_group(sb, first_group + i);
286 if (buffer_uptodate(bh[i])) {
288 * if not uninit if bh is uptodate,
289 @@ -1087,7 +1075,7 @@ static int mb_find_order_for_block(struc
293 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
294 +static void mb_clear_bits(void *bm, int cur, int len)
298 @@ -1100,15 +1088,12 @@ static void mb_clear_bits(spinlock_t *lo
303 - mb_clear_bit_atomic(lock, cur, bm);
305 - mb_clear_bit(cur, bm);
306 + mb_clear_bit(cur, bm);
311 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
312 +static void mb_set_bits(void *bm, int cur, int len)
316 @@ -1121,10 +1106,7 @@ static void mb_set_bits(spinlock_t *lock
321 - mb_set_bit_atomic(lock, cur, bm);
323 - mb_set_bit(cur, bm);
324 + mb_set_bit(cur, bm);
328 @@ -1339,8 +1321,7 @@ static int mb_mark_used(struct ext4_budd
329 e4b->bd_info->bb_counters[ord]++;
332 - mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
333 - EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
334 + mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
338 @@ -2841,7 +2822,7 @@ int ext4_mb_init(struct super_block *sb,
342 -/* need to called with ext4 group lock (ext4_lock_group) */
343 +/* need to called with the ext4 group lock held */
344 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
346 struct ext4_prealloc_space *pa;
347 @@ -3240,14 +3221,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
348 * Fix the bitmap and repeat the block allocation
349 * We leak some of the blocks here.
351 - mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
352 - bitmap_bh->b_data, ac->ac_b_ex.fe_start,
353 - ac->ac_b_ex.fe_len);
354 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
355 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
356 + ac->ac_b_ex.fe_len);
357 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
358 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
364 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
365 #ifdef AGGRESSIVE_CHECK
368 @@ -3257,9 +3241,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
372 - spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
373 - mb_set_bits(NULL, bitmap_bh->b_data,
374 - ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
375 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
376 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
377 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
378 ext4_free_blks_set(sb, gdp,
379 @@ -3269,7 +3251,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
380 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
381 ext4_free_blks_set(sb, gdp, len);
382 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
383 - spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
385 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
386 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
388 * Now reduce the dirty block count also. Should not go negative
389 @@ -3284,9 +3267,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
390 if (sbi->s_log_groups_per_flex) {
391 ext4_group_t flex_group = ext4_flex_group(sbi,
392 ac->ac_b_ex.fe_group);
393 - spin_lock(sb_bgl_lock(sbi, flex_group));
394 + ext4_lock_group(sb, flex_group);
395 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
396 - spin_unlock(sb_bgl_lock(sbi, flex_group));
397 + ext4_unlock_group(sb, flex_group);
400 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
401 @@ -3686,7 +3669,7 @@ ext4_mb_use_preallocated(struct ext4_all
402 * the function goes through all block freed in the group
403 * but not yet committed and marks them used in in-core bitmap.
404 * buddy must be generated from this bitmap
405 - * Need to be called with ext4 group lock (ext4_lock_group)
406 + * Need to be called with the ext4 group lock held
408 static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
410 @@ -3700,9 +3683,7 @@ static void ext4_mb_generate_from_freeli
413 entry = rb_entry(n, struct ext4_free_data, node);
414 - mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
415 - bitmap, entry->start_blk,
417 + mb_set_bits(bitmap, entry->start_blk, entry->count);
421 @@ -3744,7 +3725,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
423 * the function goes through all preallocation in this group and marks them
424 * used in in-core bitmap. buddy must be generated from this bitmap
425 - * Need to be called with ext4 group lock (ext4_lock_group)
426 + * Need to be called with ext4 group lock held.
428 static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
430 @@ -3790,8 +3771,7 @@ static int ext4_mb_generate_from_pa(stru
433 BUG_ON(groupnr != group);
434 - mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
435 - bitmap, start, len);
436 + mb_set_bits(bitmap, start, len);
440 @@ -5124,36 +5104,32 @@ do_more:
441 new_entry->group = block_group;
442 new_entry->count = count;
443 new_entry->t_tid = handle->h_transaction->t_tid;
445 ext4_lock_group(sb, block_group);
446 - mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
448 + mb_clear_bits(bitmap_bh->b_data, bit, count);
449 ext4_mb_free_metadata(handle, &e4b, new_entry);
450 - ext4_unlock_group(sb, block_group);
452 - ext4_lock_group(sb, block_group);
453 /* need to update group_info->bb_free and bitmap
454 * with group lock held. generate_buddy look at
455 * them with group lock_held
457 - mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
459 + ext4_lock_group(sb, block_group);
460 + mb_clear_bits(bitmap_bh->b_data, bit, count);
461 mb_free_blocks(inode, &e4b, bit, count);
462 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
463 - ext4_unlock_group(sb, block_group);
466 - spin_lock(sb_bgl_lock(sbi, block_group));
467 ret = ext4_free_blks_count(sb, gdp) + count;
468 ext4_free_blks_set(sb, gdp, ret);
469 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
470 - spin_unlock(sb_bgl_lock(sbi, block_group));
471 + ext4_unlock_group(sb, block_group);
472 percpu_counter_add(&sbi->s_freeblocks_counter, count);
474 if (sbi->s_log_groups_per_flex) {
475 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
476 - spin_lock(sb_bgl_lock(sbi, flex_group));
477 + ext4_lock_group(sb, flex_group);
478 sbi->s_flex_groups[flex_group].free_blocks += count;
479 - spin_unlock(sb_bgl_lock(sbi, flex_group));
480 + ext4_unlock_group(sb, flex_group);
483 ext4_mb_release_desc(&e4b);
484 Index: linux-stage/fs/ext4/super.c
485 ===================================================================
486 --- linux-stage.orig/fs/ext4/super.c
487 +++ linux-stage/fs/ext4/super.c
488 @@ -2020,18 +2020,18 @@ static int ext4_check_descriptors(struct
489 "(block %llu)!\n", i, inode_table);
492 - spin_lock(sb_bgl_lock(sbi, i));
493 + ext4_lock_group(sb, i);
494 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
495 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
496 "Checksum for group %u failed (%u!=%u)\n",
497 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
498 gdp)), le16_to_cpu(gdp->bg_checksum));
499 if (!(sb->s_flags & MS_RDONLY)) {
500 - spin_unlock(sb_bgl_lock(sbi, i));
501 + ext4_unlock_group(sb, i);
505 - spin_unlock(sb_bgl_lock(sbi, i));
506 + ext4_unlock_group(sb, i);
508 first_block += EXT4_BLOCKS_PER_GROUP(sb);