Whamcloud - gitweb
Branch b1_8
[fs/lustre-release.git] / ldiskfs / kernel_patches / patches / ext4-convert-group-lock-rhel5.patch
1 Index: linux-2.6.18-128.1.6/fs/ext4/balloc.c
2 ===================================================================
3 --- linux-2.6.18-128.1.6.orig/fs/ext4/balloc.c
4 +++ linux-2.6.18-128.1.6/fs/ext4/balloc.c
5 @@ -321,16 +321,16 @@ ext4_read_block_bitmap(struct super_bloc
6                 unlock_buffer(bh);
7                 return bh;
8         }
9 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
10 +       ext4_lock_group(sb, block_group);
11         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
12                 ext4_init_block_bitmap(sb, bh, block_group, desc);
13                 set_bitmap_uptodate(bh);
14                 set_buffer_uptodate(bh);
15 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
16 +               ext4_unlock_group(sb, block_group);
17                 unlock_buffer(bh);
18                 return bh;
19         }
20 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
21 +       ext4_unlock_group(sb, block_group);
22         if (buffer_uptodate(bh)) {
23                 /*
24                  * if not uninit if bh is uptodate,
25 @@ -787,7 +788,7 @@ do_more:
26                  * the allocator uses.
27                  */
28                 BUFFER_TRACE(bitmap_bh, "clear bit");
29 -               if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
30 +               if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
31                                                 bit + i, bitmap_bh->b_data)) {
32                         jbd_unlock_bh_state(bitmap_bh);
33                         ext4_error(sb, __func__,
34 @@ -801,18 +802,18 @@ do_more:
35                         blocks_freed++;
36                 }
37         }
38 -       spin_lock(sb_bgl_lock(sbi, block_group));
39 +       ext4_lock_group(sb, block_group);
40         blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc);
41         ext4_free_blks_set(sb, desc, blk_free_count);
42         desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
43 -       spin_unlock(sb_bgl_lock(sbi, block_group));
44 +       ext4_unlock_group(sb, block_group);
45         percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed);
46  
47         if (sbi->s_log_groups_per_flex) {
48                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
49 -               spin_lock(sb_bgl_lock(sbi, flex_group));
50 +               ext4_lock_group(sb, block_group);
51                 sbi->s_flex_groups[flex_group].free_blocks += blocks_freed;
52 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
53 +               ext4_unlock_group(sb, block_group);
54         }
55  
56         /* We dirtied the bitmap block */
57 Index: linux-2.6.18-128.1.6/fs/ext4/ialloc.c
58 ===================================================================
59 --- linux-2.6.18-128.1.6.orig/fs/ext4/ialloc.c
60 +++ linux-2.6.18-128.1.6/fs/ext4/ialloc.c
61 @@ -118,16 +118,16 @@ ext4_read_inode_bitmap(struct super_bloc
62                 unlock_buffer(bh);
63                 return bh;
64         }
65 -       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
66 +       ext4_lock_group(sb, block_group);
67         if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
68                 ext4_init_inode_bitmap(sb, bh, block_group, desc);
69                 set_bitmap_uptodate(bh);
70                 set_buffer_uptodate(bh);
71 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
72 +               ext4_unlock_group(sb, block_group);
73                 unlock_buffer(bh);
74                 return bh;
75         }
76 -       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
77 +       ext4_unlock_group(sb, block_group);
78         if (buffer_uptodate(bh)) {
79                 /*
80                  * if not uninit if bh is uptodate,
81 @@ -221,9 +221,9 @@ void ext4_free_inode (handle_t *handle, 
82                 goto error_return;
83  
84         /* Ok, now we can actually update the inode bitmaps.. */
85 -       spin_lock(sb_bgl_lock(sbi, block_group));
86 +       ext4_lock_group(sb, block_group);
87         cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
88 -       spin_unlock(sb_bgl_lock(sbi, block_group));
89 +       ext4_unlock_group(sb, block_group);
90         if (!cleared)
91                 ext4_error(sb, "ext4_free_inode",
92                            "bit already cleared for inode %lu", ino);
93 @@ -233,7 +233,7 @@ void ext4_free_inode (handle_t *handle, 
94                 if (fatal) goto error_return;
95  
96                 if (gdp) {
97 -                       spin_lock(sb_bgl_lock(sbi, block_group));
98 +                       ext4_lock_group(sb, block_group);
99                         count = ext4_free_inodes_count(sb, gdp) + 1;
100                         ext4_free_inodes_set(sb, gdp, count);
101                         if (is_directory) {
102 @@ -233,16 +233,16 @@ void ext4_free_inode (handle_t *handle, 
103                         }
104                         gdp->bg_checksum = ext4_group_desc_csum(sbi,
105                                                         block_group, gdp);
106 -                       spin_unlock(sb_bgl_lock(sbi, block_group));
107 +                       ext4_unlock_group(sb, block_group);
108                         percpu_counter_inc(&sbi->s_freeinodes_counter);
109                         if (is_directory)
110                                 percpu_counter_dec(&sbi->s_dirs_counter);
111  
112                         if (sbi->s_log_groups_per_flex) {
113                                 flex_group = ext4_flex_group(sbi, block_group);
114 -                               spin_lock(sb_bgl_lock(sbi, flex_group));
115 +                               ext4_lock_group(sb, flex_group);
116                                 sbi->s_flex_groups[flex_group].free_inodes++;
117 -                               spin_unlock(sb_bgl_lock(sbi, flex_group));
118 +                               ext4_unlock_group(sb, flex_group);
119                         }
120                 }
121                 BUFFER_TRACE(bh2, "call ext4_journal_dirty_metadata");
122 @@ -630,7 +630,7 @@
123         struct ext4_sb_info *sbi = EXT4_SB(sb);
124         struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
125
126 -       spin_lock(sb_bgl_lock(sbi, group));
127 +       ext4_lock_group(sb, group);
128         if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
129                 /* not a free inode */
130                 retval = 1;
131 @@ -691,7 +691,7 @@
132         ino++;
133         if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
134                         ino > EXT4_INODES_PER_GROUP(sb)) {
135 -               spin_unlock(sb_bgl_lock(sbi, group));
136 +               ext4_unlock_group(sb, group);
137                 ext4_error(sb, __func__,
138                            "reserved inode or inode > inodes count - "
139                            "block_group = %u, inode=%lu", group,
140 @@ -692,7 +692,7 @@
141         }
142         gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
143  err_ret:
144 -       spin_unlock(sb_bgl_lock(sbi, group));
145 +       ext4_unlock_group(sb, group);
146         return retval;
147  }
148  
149 @@ -751,16 +751,16 @@ got:
150                 }
151  
152                 free = 0;
153 -               spin_lock(sb_bgl_lock(sbi, group));
154 +               ext4_lock_group(sb, group);
155                 /* recheck and clear flag under lock if we still need to */
156                 if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
157                         free = ext4_free_blocks_after_init(sb, group, gdp);
158                         gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
159                         ext4_free_blks_set(sb, gdp, free);
160                         gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
161                                                                 gdp);
162                 }
163 -               spin_unlock(sb_bgl_lock(sbi, group));
164 +               ext4_unlock_group(sb, group);
165  
166                 /* Don't need to dirty bitmap block if we didn't change it */
167                 if (free) {
168 @@ -819,9 +819,9 @@ got:
169  
170         if (sbi->s_log_groups_per_flex) {
171                 flex_group = ext4_flex_group(sbi, group);
172 -               spin_lock(sb_bgl_lock(sbi, flex_group));
173 +               ext4_lock_group(sb, flex_group);
174                 sbi->s_flex_groups[flex_group].free_inodes--;
175 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
176 +               ext4_unlock_group(sb, flex_group);
177         }
178  
179         inode->i_uid = current->fsuid;
180 Index: linux-2.6.18-128.1.6/fs/ext4/mballoc.c
181 ===================================================================
182 --- linux-2.6.18-128.1.6.orig/fs/ext4/mballoc.c
183 +++ linux-2.6.18-128.1.6/fs/ext4/mballoc.c
184 @@ -361,24 +361,12 @@ static inline void mb_set_bit(int bit, v
185         ext4_set_bit(bit, addr);
186  }
187  
188 -static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
189 -{
190 -       addr = mb_correct_addr_and_bit(&bit, addr);
191 -       ext4_set_bit_atomic(lock, bit, addr);
192 -}
193 -
194  static inline void mb_clear_bit(int bit, void *addr)
195  {
196         addr = mb_correct_addr_and_bit(&bit, addr);
197         ext4_clear_bit(bit, addr);
198  }
199  
200 -static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
201 -{
202 -       addr = mb_correct_addr_and_bit(&bit, addr);
203 -       ext4_clear_bit_atomic(lock, bit, addr);
204 -}
205 -
206  static inline int mb_find_next_zero_bit(void *addr, int max, int start)
207  {
208         int fix = 0, ret, tmpmax;
209 @@ -789,17 +777,17 @@ static int ext4_mb_init_cache(struct pag
210                         unlock_buffer(bh[i]);
211                         continue;
212                 }
213 -               spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
214 +               ext4_lock_group(sb, first_group + i);
215                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
216                         ext4_init_block_bitmap(sb, bh[i],
217                                                 first_group + i, desc);
218                         set_bitmap_uptodate(bh[i]);
219                         set_buffer_uptodate(bh[i]);
220 -                       spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
221 +                       ext4_unlock_group(sb, first_group + i);
222                         unlock_buffer(bh[i]);
223                         continue;
224                 }
225 -               spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
226 +               ext4_unlock_group(sb, first_group + i);
227                 if (buffer_uptodate(bh[i])) {
228                         /*
229                          * if not uninit if bh is uptodate,
230 @@ -1021,7 +1009,7 @@ static int mb_find_order_for_block(struc
231         return 0;
232  }
233  
234 -static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
235 +static void mb_clear_bits(void *bm, int cur, int len)
236  {
237         __u32 *addr;
238  
239 @@ -1034,15 +1022,12 @@ static void mb_clear_bits(spinlock_t *lo
240                         cur += 32;
241                         continue;
242                 }
243 -               if (lock)
244 -                       mb_clear_bit_atomic(lock, cur, bm);
245 -               else
246 -                       mb_clear_bit(cur, bm);
247 +               mb_clear_bit(cur, bm);
248                 cur++;
249         }
250  }
251  
252 -static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
253 +static void mb_set_bits(void *bm, int cur, int len)
254  {
255         __u32 *addr;
256  
257 @@ -1052,10 +1040,7 @@ static void mb_set_bits(spinlock_t *lock
258                         cur += 32;
259                         continue;
260                 }
261 -               if (lock)
262 -                       mb_set_bit_atomic(lock, cur, bm);
263 -               else
264 -                       mb_set_bit(cur, bm);
265 +               mb_set_bit(cur, bm);
266                 cur++;
267         }
268  }
269 @@ -1268,8 +1256,7 @@ static int mb_mark_used(struct ext4_budd
270                 e4b->bd_info->bb_counters[ord]++;
271         }
272  
273 -       mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
274 -                       EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
275 +       mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
276         mb_check_buddy(e4b);
277  
278         return ret;
279 @@ -2651,7 +2638,7 @@ int ext4_mb_init(struct super_block *sb,
280         return 0;
281  }
282  
283 -/* need to called with ext4 group lock (ext4_lock_group) */
284 +/* need to called with the ext4 group lock held */
285  static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
286  {
287         struct ext4_prealloc_space *pa;
288 @@ -3130,14 +3117,17 @@ ext4_mb_mark_diskspace_used(struct ext4_
289                  * Fix the bitmap and repeat the block allocation
290                  * We leak some of the blocks here.
291                  */
292 -               mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
293 -                               bitmap_bh->b_data, ac->ac_b_ex.fe_start,
294 -                               ac->ac_b_ex.fe_len);
295 +               ext4_lock_group(sb, ac->ac_b_ex.fe_group);
296 +               mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
297 +                           ac->ac_b_ex.fe_len);
298 +               ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
299                 err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
300                 if (!err)
301                         err = -EAGAIN;
302                 goto out_err;
303         }
304 +
305 +       ext4_lock_group(sb, ac->ac_b_ex.fe_group);
306  #ifdef AGGRESSIVE_CHECK
307         {
308                 int i;
309 @@ -3147,9 +3137,7 @@ ext4_mb_mark_diskspace_used(struct ext4_
310                 }
311         }
312  #endif
313 -       spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
314 -       mb_set_bits(NULL, bitmap_bh->b_data,
315 -                               ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
316 +       mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,ac->ac_b_ex.fe_len);
317         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
318                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
319                 gdp->bg_free_blocks_count =
320 @@ -3160,7 +3148,8 @@ ext4_mb_mark_diskspace_used(struct ext4_
321         len = ext4_free_blks_count(sb, gdp) - ac->ac_b_ex.fe_len;
322         ext4_free_blks_set(sb, gdp, len);
323         gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
324 -       spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
325 +
326 +       ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
327         percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
328         /*
329          * Now reduce the dirty block count also. Should not go negative
330 @@ -3161,9 +3148,9 @@ ext4_mb_mark_diskspace_used(struct ext4_
331         if (sbi->s_log_groups_per_flex) {
332                 ext4_group_t flex_group = ext4_flex_group(sbi,
333                                                           ac->ac_b_ex.fe_group);
334 -               spin_lock(sb_bgl_lock(sbi, flex_group));
335 +               ext4_lock_group(sb, flex_group);
336                 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
337 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
338 +               ext4_unlock_group(sb, flex_group);
339         }
340  
341         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
342 @@ -3500,9 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
343  
344         while (n) {
345                 entry = rb_entry(n, struct ext4_free_data, node);
346 -               mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
347 -                               bitmap, entry->start_blk,
348 -                               entry->count);
349 +               mb_set_bits(bitmap, entry->start_blk, entry->count);
350                 n = rb_next(n);
351         }
352         return;
353 @@ -3600,7 +3589,7 @@ int ext4_mb_check_ondisk_bitmap(struct s
354  /*
355   * the function goes through all preallocation in this group and marks them
356   * used in in-core bitmap. buddy must be generated from this bitmap
357 - * Need to be called with ext4 group lock (ext4_lock_group)
358 + * Need to be called with ext4 group lock held.
359   */
360  static int ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
361                                         ext4_group_t group)
362 @@ -3646,8 +3635,7 @@ static int ext4_mb_generate_from_pa(stru
363                         continue;
364                 }
365                 BUG_ON(groupnr != group);
366 -               mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
367 -                                               bitmap, start, len);
368 +               mb_set_bits(bitmap, start, len);
369                 preallocated += len;
370                 count++;
371         }
372 @@ -4742,6 +4730,7 @@ static void ext4_mb_poll_new_transaction
373         ext4_mb_free_committed_blocks(sb);
374  }
375  
376 +/* need to be called with ldiskfs group lock held */
377  static noinline_for_stack int
378  ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
379                           ext4_group_t group, ext4_grpblk_t block, int count)
380 @@ -4912,35 +4908,30 @@ do_more:
381                 new_entry->count = count;
382                 new_entry->t_tid = handle->h_transaction->t_tid;
383                 ext4_lock_group(sb, block_group);
384 -               mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
385 -                               bit, count);
386 +               mb_clear_bits(bitmap_bh->b_data, bit, count);
387                 ext4_mb_free_metadata(handle, &e4b, new_entry);
388 -               ext4_unlock_group(sb, block_group);
389         } else {
390                 ext4_lock_group(sb, block_group);
391                 /* need to update group_info->bb_free and bitmap
392                  * with group lock held. generate_buddy look at
393                  * them with group lock_held
394                  */
395 -               mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
396 -                               bit, count);
397 +               mb_clear_bits(bitmap_bh->b_data, bit, count);
398                 mb_free_blocks(inode, &e4b, bit, count);
399                 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
400 -               ext4_unlock_group(sb, block_group);
401         }
402  
403 -       spin_lock(sb_bgl_lock(sbi, block_group));
404         ret = ext4_free_blks_count(sb, gdp) + count;
405         ext4_free_blks_set(sb, gdp, ret);
406         gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
407 -       spin_unlock(sb_bgl_lock(sbi, block_group));
408 +       ext4_unlock_group(sb, block_group);
409         percpu_counter_add(&sbi->s_freeblocks_counter, count);
410  
411         if (sbi->s_log_groups_per_flex) {
412                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
413 -               spin_lock(sb_bgl_lock(sbi, flex_group));
414 +               ext4_lock_group(sb, flex_group);
415                 sbi->s_flex_groups[flex_group].free_blocks += count;
416 -               spin_unlock(sb_bgl_lock(sbi, flex_group));
417 +               ext4_unlock_group(sb, flex_group);
418         }
419  
420         ext4_mb_release_desc(&e4b);
421 Index: linux-2.6.18-128.1.6/fs/ext4/super.c
422 ===================================================================
423 --- linux-2.6.18-128.1.6.orig/fs/ext4/super.c
424 +++ linux-2.6.18-128.1.6/fs/ext4/super.c
425 @@ -1934,18 +1934,18 @@ static int ext4_check_descriptors(struct
426                                "(block %llu)!\n", i, inode_table);
427                         return 0;
428                 }
429 -               spin_lock(sb_bgl_lock(sbi, i));
430 +               ext4_lock_group(sb, i);
431                 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
432                         printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
433                                "Checksum for group %u failed (%u!=%u)\n",
434                                i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
435                                gdp)), le16_to_cpu(gdp->bg_checksum));
436                         if (!(sb->s_flags & MS_RDONLY)) {
437 -                               spin_unlock(sb_bgl_lock(sbi, i));
438 +                               ext4_unlock_group(sb, i);
439                                 return 0;
440                         }
441                 }
442 -               spin_unlock(sb_bgl_lock(sbi, i));
443 +               ext4_unlock_group(sb, i);
444                 if (!flexbg_flag)
445                         first_block += EXT4_BLOCKS_PER_GROUP(sb);
446         }
447 Index: linux-2.6.18-128.1.6/fs/ext4/ext4.h
448 ===================================================================
449 --- linux-2.6.18-128.1.6.orig/fs/ext4/ext4.h
450 +++ linux-2.6.18-128.1.6/fs/ext4/ext4.h
451 @@ -127,35 +127,9 @@ struct ext4_group_info {
452  };
453  
454  #define EXT4_GROUP_INFO_NEED_INIT_BIT  0
455 -#define EXT4_GROUP_INFO_LOCKED_BIT     1
456  
457  #define EXT4_MB_GRP_NEED_INIT(grp)     \
458         (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
459 -
460 -static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
461 -{
462 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
463 -
464 -       bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
465 -}
466 -
467 -static inline void ext4_unlock_group(struct super_block *sb,
468 -                                       ext4_group_t group)
469 -{
470 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
471 -
472 -       bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
473 -}
474 -
475 -static inline int ext4_is_group_locked(struct super_block *sb,
476 -                                       ext4_group_t group)
477 -{
478 -       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
479 -
480 -       return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
481 -                                               &(grinfo->bb_state));
482 -}
483 -
484  /*
485   * Inodes and files operations
486   */
487 @@ -1303,6 +1303,32 @@ extern int ext4_get_blocks_wrap(handle_t
488         set_bit(BH_BITMAP_UPTODATE, &(bh)->b_state);
489  }
490  
491 +static inline spinlock_t *ext4_group_lock_ptr(struct super_block *sb,
492 +                                             ext4_group_t group)
493 +{
494 +       struct blockgroup_lock *bgl = &EXT4_SB(sb)->s_blockgroup_lock;
495 +       return &bgl->locks[group & (NR_BG_LOCKS-1)].lock;
496 +}
497 +
498 +static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
499 +{
500 +       spin_lock(ext4_group_lock_ptr(sb, group));
501 +}
502 +
503 +static inline void ext4_unlock_group(struct super_block *sb,
504 +                                       ext4_group_t group)
505 +{
506 +       spin_unlock(ext4_group_lock_ptr(sb, group));
507 +}
508 +
509 +static inline int ext4_is_group_locked(struct super_block *sb,
510 +                                       ext4_group_t group)
511 +{
512 +       return spin_is_locked(ext4_group_lock_ptr(sb, group));
513 +}
514 +
515 +
516 +
517  #endif /* __KERNEL__ */
518  
519  #endif /* _EXT4_H */