Whamcloud - gitweb
b=24214 Discard preallocation blocks after failed allocated.
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-convert-group-lock-sles11.patch
1 Index: linux-stage/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-stage.orig/fs/ext4/balloc.c
4 +++ linux-stage/fs/ext4/balloc.c
5 @@ -329,16 +329,16 @@ ext4_read_block_bitmap(struct super_bloc
6                 unlock_buffer(bh);
7                 return bh;
8         }
9 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 +       ext4_lock_group(sb, block_group);
11         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12                 ext4_init_block_bitmap(sb, bh, block_group, desc);
13                 set_bitmap_uptodate(bh);
14                 set_buffer_uptodate(bh);
15 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 +               ext4_unlock_group(sb, block_group);
17                 unlock_buffer(bh);
18                 return bh;
19         }
20 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
21 +       ext4_unlock_group(sb, block_group);
22         if (buffer_uptodate(bh)) {
23                 /*
24                  * if not uninit if bh is uptodate,
25 @@ -454,7 +454,7 @@ void ext4_add_groupblocks(handle_t *hand
26         down_write(&grp->alloc_sem);
27         for (i = 0, blocks_freed = 0; i < count; i++) {
28                 BUFFER_TRACE(bitmap_bh, "clear bit");
29 -               if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
30 +               if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
31                                                 bit + i, bitmap_bh->b_data)) {
32                         ext4_error(sb, __func__,
33                                    "bit already cleared for block %llu",
34 @@ -464,18 +464,18 @@ void ext4_add_groupblocks(handle_t *hand
35                         blocks_freed++;
36                 }
37         }
38 -       spin_lock(sb_bgl_lock(sbi, block_group));
39 +       ext4_lock_group(sb, block_group);
40         blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
41         ext4_free_blks_set(sb, desc, blk_free_count);
42         desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
43 -       spin_unlock(sb_bgl_lock(sbi, block_group));
44 +       ext4_unlock_group(sb, block_group);
45         percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
46  
47         if (sbi->s_log_groups_per_flex) {
48                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
49 -               spin_lock(sb_bgl_lock(sbi, flex_group));
50 +               ext4_lock_group(sb, flex_group);
51                 sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
52 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
53 +               ext4_unlock_group(sb, flex_group);
54         }
55         /*
56          * request to reload the buddy with the
57 Index: linux-stage/fs/ext4/ext4.h
58 ===================================================================
59 --- linux-stage.orig/fs/ext4/ext4.h
60 +++ linux-stage/fs/ext4/ext4.h
61 @@ -1342,33 +1342,32 @@ struct ext4_group_info {
62  };
63  
64  #define EXT4_GROUP_INFO_NEED_INIT_BIT  0
65 -#define EXT4_GROUP_INFO_LOCKED_BIT     1
66  
67  #define EXT4_MB_GRP_NEED_INIT(grp)     \
68         (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
69  
70 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
71 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
72 +                                             ext4_group_t group)
73  {
74 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
75 +       struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
76 +       return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
77 +}
78  
79 -       bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
80 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
81 +{
82 +       spin_lock(ext4_group_lock_ptr(sb, group));
83  }
84  
85  static inline void ext4_unlock_group(struct super_block *sb,
86                                         ext4_group_t group)
87  {
88 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
89 -
90 -       bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
91 +       spin_unlock(ext4_group_lock_ptr(sb, group));
92  }
93  
94  static inline int ext4_is_group_locked(struct super_block *sb,
95                                         ext4_group_t group)
96  {
97 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
98 -
99 -       return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
100 -                                               &(grinfo->bb_state));
101 +       return spin_is_locked(ext4_group_lock_ptr(sb, group));
102  }
103  
104  /*
105 Index: linux-stage/fs/ext4/ialloc.c
106 ===================================================================
107 --- linux-stage.orig/fs/ext4/ialloc.c
108 +++ linux-stage/fs/ext4/ialloc.c
109 @@ -123,16 +123,16 @@ ext4_read_inode_bitmap(struct super_bloc
110                 unlock_buffer(bh);
111                 return bh;
112         }
113 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
114 +       ext4_lock_group(sb, block_group);
115         if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
116                 ext4_init_inode_bitmap(sb, bh, block_group, desc);
117                 set_bitmap_uptodate(bh);
118                 set_buffer_uptodate(bh);
119 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
120 +               ext4_unlock_group(sb, block_group);
121                 unlock_buffer(bh);
122                 return bh;
123         }
124 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
125 +       ext4_unlock_group(sb, block_group);
126         if (buffer_uptodate(bh)) {
127                 /*
128                  * if not uninit if bh is uptodate,
129 @@ -244,8 +244,8 @@ void ext4_free_inode(handle_t *handle, s
130                 goto error_return;
131  
132         /* Ok, now we can actually update the inode bitmaps.. */
133 -       if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
134 -                                       bit, bitmap_bh->b_data))
135 +       if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
136 +                                  bit, bitmap_bh->b_data))
137                 ext4_error(sb, "ext4_free_inode",
138                            "bit already cleared for inode %lu", ino);
139         else {
140 @@ -256,7 +256,7 @@ void ext4_free_inode(handle_t *handle, s
141                 if (fatal) goto error_return;
142  
143                 if (gdp) {
144 -                       spin_lock(sb_bgl_lock(sbi, block_group));
145 +                       ext4_lock_group(sb, block_group);
146                         count = ext4_free_inodes_count(sb, gdp) + 1;
147                         ext4_free_inodes_set(sb, gdp, count);
148                         if (is_directory) {
149 @@ -265,16 +265,16 @@ void ext4_free_inode(handle_t *handle, s
150                         }
151                         gdp->bg_checksum = ext4_group_desc_csum(sbi,
152                                                         block_group, gdp);
153 -                       spin_unlock(sb_bgl_lock(sbi, block_group));
154 +                       ext4_unlock_group(sb, block_group);
155                         percpu_counter_inc(&sbi->s_freeinodes_counter);
156                         if (is_directory)
157                                 percpu_counter_dec(&sbi->s_dirs_counter);
158  
159                         if (sbi->s_log_groups_per_flex) {
160                                 flex_group = ext4_flex_group(sbi, block_group);
161 -                               spin_lock(sb_bgl_lock(sbi, flex_group));
162 +                               ext4_lock_group(sb, flex_group);
163                                 sbi->s_flex_groups[flex_group].free_inodes++;
164 -                               spin_unlock(sb_bgl_lock(sbi, flex_group));
165 +                               ext4_unlock_group(sb, flex_group);
166                         }
167                 }
168                 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
169 @@ -595,10 +595,10 @@ static int find_group_other(struct super
170  
171  /*
172   * claim the inode from the inode bitmap. If the group
173 - * is uninit we need to take the groups's sb_bgl_lock
174 + * is uninit we need to take the groups's ext4_group_lock
175   * and clear the uninit flag. The inode bitmap update
176   * and group desc uninit flag clear should be done
177 - * after holding sb_bgl_lock so that ext4_read_inode_bitmap
178 + * after holding ext4_group_lock so that ext4_read_inode_bitmap
179   * doesn't race with the ext4_claim_inode
180   */
181  static int ext4_claim_inode(struct super_block *sb,
182 @@ -609,7 +609,7 @@ static int ext4_claim_inode(struct super
183         struct ext4_sb_info *sbi = EXT4_SB(sb);
184         struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
185  
186 -       spin_lock(sb_bgl_lock(sbi, group));
187 +       ext4_lock_group(sb, group);
188         if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
189                 /* not a free inode */
190                 retval = 1;
191 @@ -618,7 +618,7 @@ static int ext4_claim_inode(struct super
192         ino++;
193         if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
194                         ino > EXT4_INODES_PER_GROUP(sb)) {
195 -               spin_unlock(sb_bgl_lock(sbi, group));
196 +               ext4_unlock_group(sb, group);
197                 ext4_error(sb, __func__,
198                            "reserved inode or inode > inodes count - "
199                            "block_group = %u, inode=%lu", group,
200 @@ -662,7 +662,7 @@ static int ext4_claim_inode(struct super
201         }
202         gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
203  err_ret:
204 -       spin_unlock(sb_bgl_lock(sbi, group));
205 +       ext4_unlock_group(sb, group);
206         return retval;
207  }
208  
209 @@ -854,7 +854,7 @@ got:
210                 }
211  
212                 free = 0;
213 -               spin_lock(sb_bgl_lock(sbi, group));
214 +               ext4_lock_group(sb, group);
215                 /* recheck and clear flag under lock if we still need to */
216                 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
217                         free = ext4_free_blocks_after_init(sb, group, gdp);
218 @@ -863,7 +863,7 @@ got:
219                         gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
220                                                                 gdp);
221                 }
222 -               spin_unlock(sb_bgl_lock(sbi, group));
223 +               ext4_unlock_group(sb, group);
224  
225                 /* Don't need to dirty bitmap block if we didn't change it */
226                 if (free) {
227 @@ -888,9 +888,9 @@ got:
228  
229         if (sbi->s_log_groups_per_flex) {
230                 flex_group = ext4_flex_group(sbi, group);
231 -               spin_lock(sb_bgl_lock(sbi, flex_group));
232 +               ext4_lock_group(sb, flex_group);
233                 sbi->s_flex_groups[flex_group].free_inodes--;
234 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
235 +               ext4_unlock_group(sb, flex_group);
236         }
237  
238         inode->i_uid = current_fsuid();
239 Index: linux-stage/fs/ext4/mballoc.c
240 ===================================================================
241 --- linux-stage.orig/fs/ext4/mballoc.c
242 +++ linux-stage/fs/ext4/mballoc.c
243 @@ -375,24 +375,12 @@ static inline void mb_set_bit(int bit, v
244         ext4_set_bit(bit, addr);
245  }
246  
247 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
248 -{
249 -       addr = mb_correct_addr_and_bit(&bit, addr);
250 -       ext4_set_bit_atomic(lock, bit, addr);
251 -}
252 -
253  static inline void mb_clear_bit(int bit, void *addr)
254  {
255         addr = mb_correct_addr_and_bit(&bit, addr);
256         ext4_clear_bit(bit, addr);
257  }
258  
259 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
260 -{
261 -       addr = mb_correct_addr_and_bit(&bit, addr);
262 -       ext4_clear_bit_atomic(lock, bit, addr);
263 -}
264 -
265  static inline int mb_find_next_zero_bit(void *addr, int max, int start)
266  {
267         int fix = 0, ret, tmpmax;
268 @@ -805,17 +793,17 @@ static int ext4_mb_init_cache(struct pag
269                         unlock_buffer(bh[i]);
270                         continue;
271                 }
272 -               spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
273 +               ext4_lock_group(sb, first_group + i);
274                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
275                         ext4_init_block_bitmap(sb, bh[i],
276                                                 first_group + i, desc);
277                         set_bitmap_uptodate(bh[i]);
278                         set_buffer_uptodate(bh[i]);
279 -                       spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
280 +                       ext4_unlock_group(sb, first_group + i);
281                         unlock_buffer(bh[i]);
282                         continue;
283                 }
284 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
285 +               ext4_unlock_group(sb, first_group + i);
286                 if (buffer_uptodate(bh[i])) {
287                         /*
288                          * if not uninit if bh is uptodate,
289 @@ -1087,7 +1075,7 @@ static int mb_find_order_for_block(struc
290         return 0;
291  }
292  
293 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
294 +static void mb_clear_bits(void *bm, int cur, int len)
295  {
296         __u32 *addr;
297  
298 @@ -1100,15 +1088,12 @@ static void mb_clear_bits(spinlock_t *lo
299                         cur += 32;
300                         continue;
301                 }
302 -               if (lock)
303 -                       mb_clear_bit_atomic(lock, cur, bm);
304 -               else
305 -                       mb_clear_bit(cur, bm);
306 +               mb_clear_bit(cur, bm);
307                 cur++;
308         }
309  }
310  
311 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
312 +static void mb_set_bits(void *bm, int cur, int len)
313  {
314         __u32 *addr;
315  
316 @@ -1121,10 +1106,7 @@ static void mb_set_bits(spinlock_t *lock
317                         cur += 32;
318                         continue;
319                 }
320 -               if (lock)
321 -                       mb_set_bit_atomic(lock, cur, bm);
322 -               else
323 -                       mb_set_bit(cur, bm);
324 +               mb_set_bit(cur, bm);
325                 cur++;
326         }
327  }
328 @@ -1339,8 +1321,7 @@ static int mb_mark_used(struct ext4_budd
329                 e4b->bd_info->bb_counters[ord]++;
330         }
331  
332 -       mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
333 -                       EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
334 +       mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
335         mb_check_buddy(e4b);
336  
337         return ret;
338 @@ -2841,7 +2822,7 @@ int ext4_mb_init(struct super_block *sb,
339         return 0;
340  }
341  
342 -/* need to called with ext4 group lock (ext4_lock_group) */
343 +/* need to called with the ext4 group lock held */
344  static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
345  {
346         struct ext4_prealloc_space *pa;
347 @@ -3240,14 +3221,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
348                  * Fix the bitmap and repeat the block allocation
349                  * We leak some of the blocks here.
350                  */
351 -               mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
352 -                               bitmap_bh->b_data, ac->ac_b_ex.fe_start,
353 -                               ac->ac_b_ex.fe_len);
354 +               ext4_lock_group(sb, ac->ac_b_ex.fe_group);
355 +               mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
356 +                           ac->ac_b_ex.fe_len);
357 +               ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
358                 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
359                 if (!err)
360                         err = -EAGAIN;
361                 goto out_err;
362         }
363 +
364 +       ext4_lock_group(sb, ac->ac_b_ex.fe_group);
365  #ifdef AGGRESSIVE_CHECK
366         {
367                 int i;
368 @@ -3257,9 +3241,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
369                 }
370         }
371  #endif
372 -       spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
373 -       mb_set_bits(NULL, bitmap_bh->b_data,
374 -                               ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
375 +       mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
376         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
377                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
378                 ext4_free_blks_set(sb, gdp,
379 @@ -3269,7 +3251,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
380         len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
381         ext4_free_blks_set(sb, gdp, len);
382         gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
383 -       spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
384 +
385 +       ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
386         percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
387         /*
388          * Now reduce the dirty block count also. Should not go negative
389 @@ -3284,9 +3267,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
390         if (sbi->s_log_groups_per_flex) {
391                 ext4_group_t flex_group = ext4_flex_group(sbi,
392                                                           ac->ac_b_ex.fe_group);
393 -               spin_lock(sb_bgl_lock(sbi, flex_group));
394 +               ext4_lock_group(sb, flex_group);
395                 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
396 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
397 +               ext4_unlock_group(sb, flex_group);
398         }
399  
400         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
401 @@ -3686,7 +3669,7 @@ ext4_mb_use_preallocated(struct ext4_all
402   * the function goes through all block freed in the group
403   * but not yet committed and marks them used in in-core bitmap.
404   * buddy must be generated from this bitmap
405 - * Need to be called with ext4 group lock (ext4_lock_group)
406 + * Need to be called with the ext4 group lock held
407   */
408  static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
409                                                 ext4_group_t group)
410 @@ -3700,9 +3683,7 @@ static void ext4_mb_generate_from_freeli
411  
412         while (n) {
413                 entry = rb_entry(n, struct ext4_free_data, node);
414 -               mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
415 -                               bitmap, entry->start_blk,
416 -                               entry->count);
417 +               mb_set_bits(bitmap, entry->start_blk, entry->count);
418                 n = rb_next(n);
419         }
420         return;
421 @@ -3744,7 +3725,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
422  /*
423   * the function goes through all preallocation in this group and marks them
424   * used in in-core bitmap. buddy must be generated from this bitmap
425 - * Need to be called with ext4 group lock (ext4_lock_group)
426 + * Need to be called with ext4 group lock held.
427   */
428  static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
429                                         ext4_group_t group)
430 @@ -3790,8 +3771,7 @@ static int ext4_mb_generate_from_pa(stru
431                         continue;
432                 }
433                 BUG_ON(groupnr != group);
434 -               mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
435 -                                               bitmap, start, len);
436 +               mb_set_bits(bitmap, start, len);
437                 preallocated += len;
438                 count++;
439         }
440 @@ -5124,36 +5104,32 @@ do_more:
441                 new_entry->group  = block_group;
442                 new_entry->count = count;
443                 new_entry->t_tid = handle->h_transaction->t_tid;
444 +
445                 ext4_lock_group(sb, block_group);
446 -               mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
447 -                               bit, count);
448 +               mb_clear_bits(bitmap_bh->b_data, bit, count);
449                 ext4_mb_free_metadata(handle, &e4b, new_entry);
450 -               ext4_unlock_group(sb, block_group);
451         } else {
452 -               ext4_lock_group(sb, block_group);
453                 /* need to update group_info->bb_free and bitmap
454                  * with group lock held. generate_buddy look at
455                  * them with group lock_held
456                  */
457 -               mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
458 -                               bit, count);
459 +               ext4_lock_group(sb, block_group);
460 +               mb_clear_bits(bitmap_bh->b_data, bit, count);
461                 mb_free_blocks(inode, &e4b, bit, count);
462                 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
463 -               ext4_unlock_group(sb, block_group);
464         }
465  
466 -       spin_lock(sb_bgl_lock(sbi, block_group));
467         ret = ext4_free_blks_count(sb, gdp) + count;
468         ext4_free_blks_set(sb, gdp, ret);
469         gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
470 -       spin_unlock(sb_bgl_lock(sbi, block_group));
471 +       ext4_unlock_group(sb, block_group);
472         percpu_counter_add(&sbi->s_freeblocks_counter, count);
473  
474         if (sbi->s_log_groups_per_flex) {
475                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
476 -               spin_lock(sb_bgl_lock(sbi, flex_group));
477 +               ext4_lock_group(sb, flex_group);
478                 sbi->s_flex_groups[flex_group].free_blocks += count;
479 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
480 +               ext4_unlock_group(sb, flex_group);
481         }
482  
483         ext4_mb_release_desc(&e4b);
484 Index: linux-stage/fs/ext4/super.c
485 ===================================================================
486 --- linux-stage.orig/fs/ext4/super.c
487 +++ linux-stage/fs/ext4/super.c
488 @@ -2020,18 +2020,18 @@ static int ext4_check_descriptors(struct
489                                "(block %llu)!\n", i, inode_table);
490                         return 0;
491                 }
492 -               spin_lock(sb_bgl_lock(sbi, i));
493 +               ext4_lock_group(sb, i);
494                 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
495                         printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
496                                "Checksum for group %u failed (%u!=%u)\n",
497                                i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
498                                gdp)), le16_to_cpu(gdp->bg_checksum));
499                         if (!(sb->s_flags & MS_RDONLY)) {
500 -                               spin_unlock(sb_bgl_lock(sbi, i));
501 +                               ext4_unlock_group(sb, i);
502                                 return 0;
503                         }
504                 }
505 -               spin_unlock(sb_bgl_lock(sbi, i));
506 +               ext4_unlock_group(sb, i);
507                 if (!flexbg_flag)
508                         first_block += EXT4_BLOCKS_PER_GROUP(sb);
509         }