1 Index: linux-2.6.18-128.1.6/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-2.6.18-128.1.6.orig/fs/ext4/balloc.c
4 +++ linux-2.6.18-128.1.6/fs/ext4/balloc.c
5 @@ -321,16 +321,16 @@ ext4_read_block_bitmap(struct super_bloc
9 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 + ext4_lock_group(sb, block_group);
11 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12 ext4_init_block_bitmap(sb, bh, block_group, desc);
13 set_bitmap_uptodate(bh);
14 set_buffer_uptodate(bh);
15 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 + ext4_unlock_group(sb, block_group);
20 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
21 + ext4_unlock_group(sb, block_group);
22 if (buffer_uptodate(bh)) {
24 * if not uninit if bh is uptodate,
25 @@ -787,7 +788,7 @@ do_more:
28 BUFFER_TRACE(bitmap_bh, "clear bit");
29 - if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
30 + if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
31 bit + i, bitmap_bh->b_data)) {
32 jbd_unlock_bh_state(bitmap_bh);
33 ext4_error(sb, __func__,
34 @@ -801,18 +802,18 @@ do_more:
38 - spin_lock(sb_bgl_lock(sbi, block_group));
39 + ext4_lock_group(sb, block_group);
40 blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
41 ext4_free_blks_set(sb, desc, blk_free_count);
42 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
43 - spin_unlock(sb_bgl_lock(sbi, block_group));
44 + ext4_unlock_group(sb, block_group);
45 percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
47 if (sbi->s_log_groups_per_flex) {
48 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
49 - spin_lock(sb_bgl_lock(sbi, flex_group));
50 + ext4_lock_group(sb, block_group);
51 sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
52 - spin_unlock(sb_bgl_lock(sbi, flex_group));
53 + ext4_unlock_group(sb, block_group);
56 /* We dirtied the bitmap block */
57 Index: linux-2.6.18-128.1.6/fs/ext4/ialloc.c
58 ===================================================================
59 --- linux-2.6.18-128.1.6.orig/fs/ext4/ialloc.c
60 +++ linux-2.6.18-128.1.6/fs/ext4/ialloc.c
61 @@ -118,16 +118,16 @@ ext4_read_inode_bitmap(struct super_bloc
65 - spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
66 + ext4_lock_group(sb, block_group);
67 if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
68 ext4_init_inode_bitmap(sb, bh, block_group, desc);
69 set_bitmap_uptodate(bh);
70 set_buffer_uptodate(bh);
71 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
72 + ext4_unlock_group(sb, block_group);
76 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
77 + ext4_unlock_group(sb, block_group);
78 if (buffer_uptodate(bh)) {
80 * if not uninit if bh is uptodate,
81 @@ -221,9 +221,9 @@ void ext4_free_inode (handle_t *handle,
84 /* Ok, now we can actually update the inode bitmaps.. */
85 - spin_lock(sb_bgl_lock(sbi, block_group));
86 + ext4_lock_group(sb, block_group);
87 cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
88 - spin_unlock(sb_bgl_lock(sbi, block_group));
89 + ext4_unlock_group(sb, block_group);
91 ext4_error(sb, "ext4_free_inode",
92 "bit already cleared for inode %lu", ino);
93 @@ -233,7 +233,7 @@ void ext4_free_inode (handle_t *handle,
94 if (fatal) goto error_return;
97 - spin_lock(sb_bgl_lock(sbi, block_group));
98 + ext4_lock_group(sb, block_group);
99 count = ext4_free_inodes_count(sb, gdp) + 1;
100 ext4_free_inodes_set(sb, gdp, count);
102 @@ -233,16 +233,16 @@ void ext4_free_inode (handle_t *handle,
104 gdp->bg_checksum = ext4_group_desc_csum(sbi,
106 - spin_unlock(sb_bgl_lock(sbi, block_group));
107 + ext4_unlock_group(sb, block_group);
108 percpu_counter_inc(&sbi->s_freeinodes_counter);
110 percpu_counter_dec(&sbi->s_dirs_counter);
112 if (sbi->s_log_groups_per_flex) {
113 flex_group = ext4_flex_group(sbi, block_group);
114 - spin_lock(sb_bgl_lock(sbi, flex_group));
115 + ext4_lock_group(sb, flex_group);
116 sbi->s_flex_groups[flex_group].free_inodes++;
117 - spin_unlock(sb_bgl_lock(sbi, flex_group));
118 + ext4_unlock_group(sb, flex_group);
121 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
123 struct ext4_sb_info *sbi = EXT4_SB(sb);
124 struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
126 - spin_lock(sb_bgl_lock(sbi, group));
127 + ext4_lock_group(sb, group);
128 if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
129 /* not a free inode */
133 if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
134 ino > EXT4_INODES_PER_GROUP(sb)) {
135 - spin_unlock(sb_bgl_lock(sbi, group));
136 + ext4_unlock_group(sb, group);
137 ext4_error(sb, __func__,
138 "reserved inode or inode > inodes count - "
139 "block_group = %u, inode=%lu", group,
142 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
144 - spin_unlock(sb_bgl_lock(sbi, group));
145 + ext4_unlock_group(sb, group);
149 @@ -751,16 +751,16 @@ got:
153 - spin_lock(sb_bgl_lock(sbi, group));
154 + ext4_lock_group(sb, group);
155 /* recheck and clear flag under lock if we still need to */
156 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
157 free = ext4_free_blocks_after_init(sb, group, gdp);
158 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
159 ext4_free_blks_set(sb, gdp, free);
160 gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
163 - spin_unlock(sb_bgl_lock(sbi, group));
164 + ext4_unlock_group(sb, group);
166 /* Don't need to dirty bitmap block if we didn't change it */
168 @@ -819,9 +819,9 @@ got:
170 if (sbi->s_log_groups_per_flex) {
171 flex_group = ext4_flex_group(sbi, group);
172 - spin_lock(sb_bgl_lock(sbi, flex_group));
173 + ext4_lock_group(sb, flex_group);
174 sbi->s_flex_groups[flex_group].free_inodes--;
175 - spin_unlock(sb_bgl_lock(sbi, flex_group));
176 + ext4_unlock_group(sb, flex_group);
179 inode->i_uid = current->fsuid;
180 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.c
181 ===================================================================
182 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.c
183 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.c
184 @@ -361,24 +361,12 @@ static inline void mb_set_bit(int bit, v
185 ext4_set_bit(bit, addr);
188 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
190 - addr = mb_correct_addr_and_bit(&bit, addr);
191 - ext4_set_bit_atomic(lock, bit, addr);
194 static inline void mb_clear_bit(int bit, void *addr)
196 addr = mb_correct_addr_and_bit(&bit, addr);
197 ext4_clear_bit(bit, addr);
200 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
202 - addr = mb_correct_addr_and_bit(&bit, addr);
203 - ext4_clear_bit_atomic(lock, bit, addr);
206 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
208 int fix = 0, ret, tmpmax;
209 @@ -789,17 +777,17 @@ static int ext4_mb_init_cache(struct pag
210 unlock_buffer(bh[i]);
213 - spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
214 + ext4_lock_group(sb, first_group + i);
215 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
216 ext4_init_block_bitmap(sb, bh[i],
217 first_group + i, desc);
218 set_bitmap_uptodate(bh[i]);
219 set_buffer_uptodate(bh[i]);
220 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
221 + ext4_unlock_group(sb, first_group + i);
222 unlock_buffer(bh[i]);
225 - spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
226 + ext4_unlock_group(sb, first_group + i);
227 if (buffer_uptodate(bh[i])) {
229 * if not uninit if bh is uptodate,
230 @@ -1021,7 +1009,7 @@ static int mb_find_order_for_block(struc
234 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
235 +static void mb_clear_bits(void *bm, int cur, int len)
239 @@ -1034,15 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
244 - mb_clear_bit_atomic(lock, cur, bm);
246 - mb_clear_bit(cur, bm);
247 + mb_clear_bit(cur, bm);
252 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
253 +static void mb_set_bits(void *bm, int cur, int len)
257 @@ -1052,10 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
262 - mb_set_bit_atomic(lock, cur, bm);
264 - mb_set_bit(cur, bm);
265 + mb_set_bit(cur, bm);
269 @@ -1268,8 +1256,7 @@ static int mb_mark_used(struct ext4_budd
270 e4b->bd_info->bb_counters[ord]++;
273 - mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
274 - EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
275 + mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
279 @@ -2651,7 +2638,7 @@ int ext4_mb_init(struct super_block *sb,
283 -/* need to called with ext4 group lock (ext4_lock_group) */
284 +/* need to called with the ext4 group lock held */
285 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
287 struct ext4_prealloc_space *pa;
288 @@ -3130,14 +3117,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
289 * Fix the bitmap and repeat the block allocation
290 * We leak some of the blocks here.
292 - mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
293 - bitmap_bh->b_data, ac->ac_b_ex.fe_start,
294 - ac->ac_b_ex.fe_len);
295 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
296 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
297 + ac->ac_b_ex.fe_len);
298 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
299 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
305 + ext4_lock_group(sb, ac->ac_b_ex.fe_group);
306 #ifdef AGGRESSIVE_CHECK
309 @@ -3147,9 +3137,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
313 - spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
314 - mb_set_bits(NULL, bitmap_bh->b_data,
315 - ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
316 + mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
317 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
318 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
319 gdp->bg_free_blocks_count =
320 @@ -3160,7 +3148,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
321 len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
322 ext4_free_blks_set(sb, gdp, len);
323 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
324 - spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
326 + ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
327 percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
329 * Now reduce the dirty block count also. Should not go negative
330 @@ -3161,9 +3148,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
331 if (sbi->s_log_groups_per_flex) {
332 ext4_group_t flex_group = ext4_flex_group(sbi,
333 ac->ac_b_ex.fe_group);
334 - spin_lock(sb_bgl_lock(sbi, flex_group));
335 + ext4_lock_group(sb, flex_group);
336 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
337 - spin_unlock(sb_bgl_lock(sbi, flex_group));
338 + ext4_unlock_group(sb, flex_group);
341 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
342 @@ -3500,9 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
345 entry = rb_entry(n, struct ext4_free_data, node);
346 - mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
347 - bitmap, entry->start_blk,
349 + mb_set_bits(bitmap, entry->start_blk, entry->count);
353 @@ -3600,7 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
355 * the function goes through all preallocation in this group and marks them
356 * used in in-core bitmap. buddy must be generated from this bitmap
357 - * Need to be called with ext4 group lock (ext4_lock_group)
358 + * Need to be called with ext4 group lock held.
360 static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
362 @@ -3646,8 +3635,7 @@ static int ext4_mb_generate_from_pa(stru
365 BUG_ON(groupnr != group);
366 - mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
367 - bitmap, start, len);
368 + mb_set_bits(bitmap, start, len);
372 @@ -4742,6 +4730,7 @@ static void ext4_mb_poll_new_transaction
373 ext4_mb_free_committed_blocks(sb);
376 +/* need to be called with ldiskfs group lock held */
377 static noinline_for_stack int
378 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
379 ext4_group_t group, ext4_grpblk_t block, int count)
380 @@ -4912,35 +4908,30 @@ do_more:
381 new_entry->count = count;
382 new_entry->t_tid = handle->h_transaction->t_tid;
383 ext4_lock_group(sb, block_group);
384 - mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
386 + mb_clear_bits(bitmap_bh->b_data, bit, count);
387 ext4_mb_free_metadata(handle, &e4b, new_entry);
388 - ext4_unlock_group(sb, block_group);
390 ext4_lock_group(sb, block_group);
391 /* need to update group_info->bb_free and bitmap
392 * with group lock held. generate_buddy look at
393 * them with group lock_held
395 - mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
397 + mb_clear_bits(bitmap_bh->b_data, bit, count);
398 mb_free_blocks(inode, &e4b, bit, count);
399 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
400 - ext4_unlock_group(sb, block_group);
403 - spin_lock(sb_bgl_lock(sbi, block_group));
404 ret = ext4_free_blks_count(sb, gdp) + count;
405 ext4_free_blks_set(sb, gdp, ret);
406 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
407 - spin_unlock(sb_bgl_lock(sbi, block_group));
408 + ext4_unlock_group(sb, block_group);
409 percpu_counter_add(&sbi->s_freeblocks_counter, count);
411 if (sbi->s_log_groups_per_flex) {
412 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
413 - spin_lock(sb_bgl_lock(sbi, flex_group));
414 + ext4_lock_group(sb, flex_group);
415 sbi->s_flex_groups[flex_group].free_blocks += count;
416 - spin_unlock(sb_bgl_lock(sbi, flex_group));
417 + ext4_unlock_group(sb, flex_group);
420 ext4_mb_release_desc(&e4b);
421 Index: linux-2.6.18-128.1.6/fs/ext4/super.c
422 ===================================================================
423 --- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
424 +++ linux-2.6.18-128.1.6/fs/ext4/super.c
425 @@ -1934,18 +1934,18 @@ static int ext4_check_descriptors(struct
426 "(block %llu)!\n", i, inode_table);
429 - spin_lock(sb_bgl_lock(sbi, i));
430 + ext4_lock_group(sb, i);
431 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
432 printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
433 "Checksum for group %u failed (%u!=%u)\n",
434 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
435 gdp)), le16_to_cpu(gdp->bg_checksum));
436 if (!(sb->s_flags & MS_RDONLY)) {
437 - spin_unlock(sb_bgl_lock(sbi, i));
438 + ext4_unlock_group(sb, i);
442 - spin_unlock(sb_bgl_lock(sbi, i));
443 + ext4_unlock_group(sb, i);
445 first_block += EXT4_BLOCKS_PER_GROUP(sb);
447 Index: linux-2.6.18-128.1.6/fs/ext4/ext4.h
448 ===================================================================
449 --- linux-2.6.18-128.1.6.orig/fs/ext4/ext4.h
450 +++ linux-2.6.18-128.1.6/fs/ext4/ext4.h
451 @@ -127,35 +127,9 @@ struct ext4_group_info {
454 #define EXT4_GROUP_INFO_NEED_INIT_BIT 0
455 -#define EXT4_GROUP_INFO_LOCKED_BIT 1
457 #define EXT4_MB_GRP_NEED_INIT(grp) \
458 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
460 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
462 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
464 - bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
467 -static inline void ext4_unlock_group(struct super_block *sb,
468 - ext4_group_t group)
470 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
472 - bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
475 -static inline int ext4_is_group_locked(struct super_block *sb,
476 - ext4_group_t group)
478 - struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
480 - return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
481 - &(grinfo->bb_state));
485 * Inodes and files operations
487 @@ -1303,6 +1303,32 @@ extern int ext4_get_blocks_wrap(handle_t
488 set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
491 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
492 + ext4_group_t group)
494 + struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
495 + return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
498 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
500 + spin_lock(ext4_group_lock_ptr(sb, group));
503 +static inline void ext4_unlock_group(struct super_block *sb,
504 + ext4_group_t group)
506 + spin_unlock(ext4_group_lock_ptr(sb, group));
509 +static inline int ext4_is_group_locked(struct super_block *sb,
510 + ext4_group_t group)
512 + return spin_is_locked(ext4_group_lock_ptr(sb, group));
517 #endif /* __KERNEL__ */